Commit 2f0bf925 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-20121218' of git://github.com/czankel/xtensa-linux

Pull Xtensa patchset from Chris Zankel:
 "This contains support of device trees, many fixes, and code clean-ups"

* tag 'xtensa-20121218' of git://github.com/czankel/xtensa-linux: (33 commits)
  xtensa: don't try to build DTB when OF is disabled
  xtensa: set the correct ethernet address for xtfpga
  xtensa: clean up files to make them code-style compliant
  xtensa: provide endianness macro for sparse
  xtensa: fix RASID SR initialization
  xtensa: initialize CPENABLE SR when core has one
  xtensa: reset all timers on initialization
  Use for_each_compatible_node() macro.
  xtensa: add XTFPGA DTS
  xtensa: add support for the XTFPGA boards
  xtensa: add device trees support
  xtensa: add IRQ domains support
  xtensa: add U-Boot image support (uImage).
  xtensa: clean up boot make rules
  xtensa: fix mb and wmb definitions
  xtensa: add s32c1i-based spinlock implementations
  xtensa: add s32c1i-based bitops implementations
  xtensa: add s32c1i-based atomic ops implementations
  xtensa: add s32c1i sanity check
  xtensa: add trap_set_handler function
  ...
parents 1bd12c91 055d4db1
We Have Atomic Operation Control (ATOMCTL) Register.
This register determines the effect of using a S32C1I instruction
with various combinations of:
1. With and without an Coherent Cache Controller which
can do Atomic Transactions to the memory internally.
2. With and without An Intelligent Memory Controller which
can do Atomic Transactions itself.
The Core comes up with a default value of for the three types of cache ops:
0x28: (WB: Internal, WT: Internal, BY:Exception)
On the FPGA Cards we typically simulate an Intelligent Memory controller
which can implement RCW transactions. For FPGA cards with an External
Memory controller we let it to the atomic operations internally while
doing a Cached (WB) transaction and use the Memory RCW for un-cached
operations.
For systems without an coherent cache controller, non-MX, we always
use the memory controllers RCW, thought non-MX controlers likely
support the Internal Operation.
CUSTOMER-WARNING:
Virtually all customers buy their memory controllers from vendors that
don't support atomic RCW memory transactions and will likely want to
configure this register to not use RCW.
Developers might find using RCW in Bypass mode convenient when testing
with the cache being bypassed; for example studying cache alias problems.
See Section 4.3.12.4 of ISA; Bits:
WB WT BY
5 4 | 3 2 | 1 0
2 Bit
Field
Values WB - Write Back WT - Write Thru BY - Bypass
--------- --------------- ----------------- ----------------
0 Exception Exception Exception
1 RCW Transaction RCW Transaction RCW Transaction
2 Internal Operation Exception Reserved
3 Reserved Reserved Reserved
...@@ -17,6 +17,7 @@ config XTENSA ...@@ -17,6 +17,7 @@ config XTENSA
select GENERIC_KERNEL_EXECVE select GENERIC_KERNEL_EXECVE
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
select CLONE_BACKWARDS select CLONE_BACKWARDS
select IRQ_DOMAIN
help help
Xtensa processors are 32-bit RISC machines designed by Tensilica Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both primarily for embedded systems. These processors are both
...@@ -150,6 +151,15 @@ config XTENSA_PLATFORM_S6105 ...@@ -150,6 +151,15 @@ config XTENSA_PLATFORM_S6105
select SERIAL_CONSOLE select SERIAL_CONSOLE
select NO_IOPORT select NO_IOPORT
config XTENSA_PLATFORM_XTFPGA
bool "XTFPGA"
select SERIAL_CONSOLE
select ETHOC
select XTENSA_CALIBRATE_CCOUNT
help
XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
This hardware is capable of running a full Linux distribution.
endchoice endchoice
...@@ -177,6 +187,17 @@ config CMDLINE ...@@ -177,6 +187,17 @@ config CMDLINE
time by entering them here. As a minimum, you should specify the time by entering them here. As a minimum, you should specify the
memory size and the root device (e.g., mem=64M root=/dev/nfs). memory size and the root device (e.g., mem=64M root=/dev/nfs).
config USE_OF
bool "Flattened Device Tree support"
select OF
select OF_EARLY_FLATTREE
help
Include support for flattened device tree machine descriptions.
config BUILTIN_DTB
string "DTB to build into the kernel image"
depends on OF
source "mm/Kconfig" source "mm/Kconfig"
source "drivers/pcmcia/Kconfig" source "drivers/pcmcia/Kconfig"
......
...@@ -2,6 +2,26 @@ menu "Kernel hacking" ...@@ -2,6 +2,26 @@ menu "Kernel hacking"
source "lib/Kconfig.debug" source "lib/Kconfig.debug"
endmenu config LD_NO_RELAX
bool "Disable linker relaxation"
default n
help
Enable this function to disable link-time optimizations.
The default linker behavior is to combine identical literal
values to reduce code size and remove unnecessary overhead from
assembler-generated 'longcall' sequences.
Enabling this option improves the link time but increases the
code size, and possibly execution time.
config S32C1I_SELFTEST
bool "Perform S32C1I instruction self-test at boot"
default y
help
Enable this option to test S32C1I instruction behavior at boot.
Correct operation of this instruction requires some cooperation from hardware
external to the processor (such as bus bridge, bus fabric, or memory controller).
It is easy to make wrong hardware configuration, this test should catch it early.
Say 'N' on stable hardware.
endmenu
...@@ -38,6 +38,7 @@ endif ...@@ -38,6 +38,7 @@ endif
platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000 platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
platform-$(CONFIG_XTENSA_PLATFORM_S6105) := s6105 platform-$(CONFIG_XTENSA_PLATFORM_S6105) := s6105
platform-$(CONFIG_XTENSA_PLATFORM_XTFPGA) := xtfpga
PLATFORM = $(platform-y) PLATFORM = $(platform-y)
export PLATFORM export PLATFORM
...@@ -49,6 +50,17 @@ KBUILD_CFLAGS += -pipe -mlongcalls ...@@ -49,6 +50,17 @@ KBUILD_CFLAGS += -pipe -mlongcalls
KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,) KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
ifneq ($(CONFIG_LD_NO_RELAX),)
LDFLAGS := --no-relax
endif
ifeq ($(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
CHECKFLAGS += -D__XTENSA_EB__
endif
ifeq ($(shell echo -e __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1)
CHECKFLAGS += -D__XTENSA_EL__
endif
vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y)) vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y)) plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
...@@ -75,6 +87,10 @@ core-y += $(buildvar) $(buildplf) ...@@ -75,6 +87,10 @@ core-y += $(buildvar) $(buildplf)
libs-y += arch/xtensa/lib/ $(LIBGCC) libs-y += arch/xtensa/lib/ $(LIBGCC)
ifneq ($(CONFIG_BUILTIN_DTB),"")
core-$(CONFIG_OF) += arch/xtensa/boot/
endif
boot := arch/xtensa/boot boot := arch/xtensa/boot
all: zImage all: zImage
...@@ -84,7 +100,9 @@ bzImage : zImage ...@@ -84,7 +100,9 @@ bzImage : zImage
zImage: vmlinux zImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@ $(Q)$(MAKE) $(build)=$(boot) $@
%.dtb:
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
define archhelp define archhelp
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
endef endef
...@@ -22,12 +22,35 @@ subdir-y := lib ...@@ -22,12 +22,35 @@ subdir-y := lib
# Subdirs for the boot loader(s) # Subdirs for the boot loader(s)
bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf
bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf boot-uboot
bootdir-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += boot-redboot boot-elf boot-uboot
BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
ifneq ($(CONFIG_BUILTIN_DTB),"")
obj-$(CONFIG_OF) += $(BUILTIN_DTB)
endif
# Rule to build device tree blobs
$(obj)/%.dtb: $(src)/dts/%.dts FORCE
$(call if_changed_dep,dtc)
clean-files := *.dtb.S
zImage Image: $(bootdir-y) zImage Image: $(bootdir-y)
$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \ $(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
$(addprefix $(obj)/,$(host-progs)) $(addprefix $(obj)/,$(host-progs))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS) $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
vmlinux.bin.gz: vmlinux.bin FORCE
$(call if_changed,gzip)
boot-elf: vmlinux.bin
boot-redboot: vmlinux.bin.gz
boot-uboot: vmlinux.bin.gz
...@@ -4,9 +4,6 @@ ...@@ -4,9 +4,6 @@
# for more details. # for more details.
# #
GZIP = gzip
GZIP_FLAGS = -v9fc
ifeq ($(BIG_ENDIAN),1) ifeq ($(BIG_ENDIAN),1)
OBJCOPY_ARGS := -O elf32-xtensa-be OBJCOPY_ARGS := -O elf32-xtensa-be
else else
...@@ -20,18 +17,17 @@ boot-y := bootstrap.o ...@@ -20,18 +17,17 @@ boot-y := bootstrap.o
OBJS := $(addprefix $(obj)/,$(boot-y)) OBJS := $(addprefix $(obj)/,$(boot-y))
vmlinux.tmp: vmlinux $(obj)/Image.o: vmlinux.bin $(OBJS)
$(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \ $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
$^ $@ --add-section image=vmlinux.bin \
Image: vmlinux.tmp $(OBJS) arch/$(ARCH)/boot/boot-elf/boot.lds
$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
--add-section image=vmlinux.tmp \
--set-section-flags image=contents,alloc,load,load,data \ --set-section-flags image=contents,alloc,load,load,data \
$(OBJS) $@.tmp $(OBJS) $@
$(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
-T arch/$(ARCH)/boot/boot-elf/boot.lds \
-o arch/$(ARCH)/boot/$@.elf $@.tmp
zImage: Image $(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds
$(Q)$(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
-T $(obj)/boot.lds \
--build-id=none \
-o $@ $(obj)/Image.o
$(Q)$(kecho) ' Kernel: $@ is ready'
zImage: $(obj)/../Image.elf
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
# for more details. # for more details.
# #
GZIP = gzip
GZIP_FLAGS = -v9fc
ifeq ($(BIG_ENDIAN),1) ifeq ($(BIG_ENDIAN),1)
OBJCOPY_ARGS := -O elf32-xtensa-be OBJCOPY_ARGS := -O elf32-xtensa-be
else else
...@@ -21,17 +19,17 @@ LIBS := arch/xtensa/boot/lib/lib.a arch/xtensa/lib/lib.a ...@@ -21,17 +19,17 @@ LIBS := arch/xtensa/boot/lib/lib.a arch/xtensa/lib/lib.a
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
vmlinux.tmp: vmlinux $(obj)/zImage.o: vmlinux.bin.gz $(OBJS)
$(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \ $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
$^ $@ --add-section image=vmlinux.bin.gz \
--set-section-flags image=contents,alloc,load,load,data \
$(OBJS) $@
vmlinux.tmp.gz: vmlinux.tmp $(obj)/zImage.elf: $(obj)/zImage.o $(LIBS)
$(GZIP) $(GZIP_FLAGS) $^ > $@ $(Q)$(LD) $(LD_ARGS) -o $@ $^ -L/xtensa-elf/lib $(LIBGCC)
zImage: vmlinux.tmp.gz $(OBJS) $(LIBS) $(obj)/../zImage.redboot: $(obj)/zImage.elf
$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \ $(Q)$(OBJCOPY) -S -O binary $< $@
--add-section image=vmlinux.tmp.gz \ $(Q)$(kecho) ' Kernel: $@ is ready'
--set-section-flags image=contents,alloc,load,load,data \
$(OBJS) $@.tmp zImage: $(obj)/../zImage.redboot
$(LD) $(LD_ARGS) -o $@.elf $@.tmp $(LIBS) -L/xtensa-elf/lib $(LIBGCC)
$(OBJCOPY) -S -O binary $@.elf arch/$(ARCH)/boot/$@.redboot
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
UIMAGE_LOADADDR = 0xd0001000
UIMAGE_COMPRESSION = gzip
$(obj)/../uImage: vmlinux.bin.gz FORCE
$(call if_changed,uimage)
$(Q)$(kecho) ' Kernel: $@ is ready'
zImage: $(obj)/../uImage
/dts-v1/;
/include/ "xtfpga.dtsi"
/include/ "xtfpga-flash-4m.dtsi"
/ {
compatible = "xtensa,lx60";
memory@0 {
device_type = "memory";
reg = <0x00000000 0x04000000>;
};
};
/dts-v1/;
/include/ "xtfpga.dtsi"
/include/ "xtfpga-flash-16m.dtsi"
/ {
compatible = "xtensa,ml605";
memory@0 {
device_type = "memory";
reg = <0x00000000 0x08000000>;
};
};
/ {
flash: flash@f8000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0xf8000000 0x01000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "boot loader area";
reg = <0x00000000 0x00400000>;
};
partition@0x400000 {
label = "kernel image";
reg = <0x00400000 0x00600000>;
};
partition@0xa00000 {
label = "data";
reg = <0x00a00000 0x005e0000>;
};
partition@0xfe0000 {
label = "boot environment";
reg = <0x00fe0000 0x00020000>;
};
};
};
/ {
flash: flash@f8000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0xf8000000 0x00400000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "boot loader area";
reg = <0x00000000 0x003f0000>;
};
partition@0x3f0000 {
label = "boot environment";
reg = <0x003f0000 0x00010000>;
};
};
};
/ {
compatible = "xtensa,xtfpga";
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&pic>;
chosen {
bootargs = "earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug";
};
memory@0 {
device_type = "memory";
reg = <0x00000000 0x06000000>;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
compatible = "xtensa,cpu";
reg = <0>;
/* Filled in by platform_setup from FPGA register
* clock-frequency = <100000000>;
*/
};
};
pic: pic {
compatible = "xtensa,pic";
/* one cell: internal irq number,
* two cells: second cell == 0: internal irq number
* second cell == 1: external irq number
*/
#interrupt-cells = <2>;
interrupt-controller;
};
serial0: serial@fd050020 {
device_type = "serial";
compatible = "ns16550a";
no-loopback-test;
reg = <0xfd050020 0x20>;
reg-shift = <2>;
interrupts = <0 1>; /* external irq 0 */
/* Filled in by platform_setup from FPGA register
* clock-frequency = <100000000>;
*/
};
enet0: ethoc@fd030000 {
compatible = "opencores,ethoc";
reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
interrupts = <1 1>; /* external irq 1 */
local-mac-address = [00 50 c2 13 6f 00];
};
};
...@@ -66,19 +66,35 @@ ...@@ -66,19 +66,35 @@
*/ */
static inline void atomic_add(int i, atomic_t * v) static inline void atomic_add(int i, atomic_t * v)
{ {
unsigned int vval; #if XCHAL_HAVE_S32C1I
unsigned long tmp;
__asm__ __volatile__( int result;
"rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t" __asm__ __volatile__(
"add %0, %0, %1 \n\t" "1: l32i %1, %3, 0\n"
"s32i %0, %2, 0 \n\t" " wsr %1, scompare1\n"
"wsr a15, ps \n\t" " add %0, %1, %2\n"
"rsync \n" " s32c1i %0, %3, 0\n"
: "=&a" (vval) " bne %0, %1, 1b\n"
: "a" (i), "a" (v) : "=&a" (result), "=&a" (tmp)
: "a15", "memory" : "a" (i), "a" (v)
); : "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" add %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#endif
} }
/** /**
...@@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v) ...@@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v)
*/ */
static inline void atomic_sub(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v)
{ {
unsigned int vval; #if XCHAL_HAVE_S32C1I
unsigned long tmp;
__asm__ __volatile__( int result;
"rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t" __asm__ __volatile__(
"sub %0, %0, %1 \n\t" "1: l32i %1, %3, 0\n"
"s32i %0, %2, 0 \n\t" " wsr %1, scompare1\n"
"wsr a15, ps \n\t" " sub %0, %1, %2\n"
"rsync \n" " s32c1i %0, %3, 0\n"
: "=&a" (vval) " bne %0, %1, 1b\n"
: "a" (i), "a" (v) : "=&a" (result), "=&a" (tmp)
: "a15", "memory" : "a" (i), "a" (v)
); : "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#endif
} }
/* /*
...@@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t * v) static inline int atomic_add_return(int i, atomic_t * v)
{ {
unsigned int vval; #if XCHAL_HAVE_S32C1I
unsigned long tmp;
__asm__ __volatile__( int result;
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t" __asm__ __volatile__(
"add %0, %0, %1 \n\t" "1: l32i %1, %3, 0\n"
"s32i %0, %2, 0 \n\t" " wsr %1, scompare1\n"
"wsr a15, ps \n\t" " add %0, %1, %2\n"
"rsync \n" " s32c1i %0, %3, 0\n"
: "=&a" (vval) " bne %0, %1, 1b\n"
: "a" (i), "a" (v) " add %0, %0, %2\n"
: "a15", "memory" : "=&a" (result), "=&a" (tmp)
); : "a" (i), "a" (v)
: "memory"
return vval; );
return result;
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" add %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
#endif
} }
static inline int atomic_sub_return(int i, atomic_t * v) static inline int atomic_sub_return(int i, atomic_t * v)
{ {
unsigned int vval; #if XCHAL_HAVE_S32C1I
unsigned long tmp;
__asm__ __volatile__( int result;
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t" __asm__ __volatile__(
"sub %0, %0, %1 \n\t" "1: l32i %1, %3, 0\n"
"s32i %0, %2, 0 \n\t" " wsr %1, scompare1\n"
"wsr a15, ps \n\t" " sub %0, %1, %2\n"
"rsync \n" " s32c1i %0, %3, 0\n"
: "=&a" (vval) " bne %0, %1, 1b\n"
: "a" (i), "a" (v) " sub %0, %0, %2\n"
: "a15", "memory" : "=&a" (result), "=&a" (tmp)
); : "a" (i), "a" (v)
: "memory"
return vval; );
return result;
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
#endif
} }
/** /**
...@@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
unsigned int all_f = -1; #if XCHAL_HAVE_S32C1I
unsigned int vval; unsigned long tmp;
int result;
__asm__ __volatile__(
"rsil a15,"__stringify(LOCKLEVEL)"\n\t" __asm__ __volatile__(
"l32i %0, %2, 0 \n\t" "1: l32i %1, %3, 0\n"
"xor %1, %4, %3 \n\t" " wsr %1, scompare1\n"
"and %0, %0, %4 \n\t" " and %0, %1, %2\n"
"s32i %0, %2, 0 \n\t" " s32c1i %0, %3, 0\n"
"wsr a15, ps \n\t" " bne %0, %1, 1b\n"
"rsync \n" : "=&a" (result), "=&a" (tmp)
: "=&a" (vval), "=a" (mask) : "a" (~mask), "a" (v)
: "a" (v), "a" (all_f), "1" (mask) : "memory"
: "a15", "memory" );
); #else
unsigned int all_f = -1;
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" xor %1, %4, %3\n"
" and %0, %0, %4\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval), "=a" (mask)
: "a" (v), "a" (all_f), "1" (mask)
: "a15", "memory"
);
#endif
} }
static inline void atomic_set_mask(unsigned int mask, atomic_t *v) static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{ {
unsigned int vval; #if XCHAL_HAVE_S32C1I
unsigned long tmp;
__asm__ __volatile__( int result;
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t" __asm__ __volatile__(
"or %0, %0, %1 \n\t" "1: l32i %1, %3, 0\n"
"s32i %0, %2, 0 \n\t" " wsr %1, scompare1\n"
"wsr a15, ps \n\t" " or %0, %1, %2\n"
"rsync \n" " s32c1i %0, %3, 0\n"
: "=&a" (vval) " bne %0, %1, 1b\n"
: "a" (mask), "a" (v) : "=&a" (result), "=&a" (tmp)
: "a15", "memory" : "a" (mask), "a" (v)
); : "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" or %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (mask), "a" (v)
: "a15", "memory"
);
#endif
} }
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
...@@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) ...@@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */ #endif /* _XTENSA_ATOMIC_H */
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2012 Tensilica Inc.
*/ */
#ifndef _XTENSA_SYSTEM_H #ifndef _XTENSA_SYSTEM_H
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
#define smp_read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0)
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
#define mb() barrier() #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() mb() #define rmb() barrier()
#define wmb() mb() #define wmb() mb()
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#define smp_mb__before_clear_bit() barrier() #define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier()
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/non-atomic.h>
#if XCHAL_HAVE_NSA #if XCHAL_HAVE_NSA
...@@ -104,6 +103,132 @@ static inline unsigned long __fls(unsigned long word) ...@@ -104,6 +103,132 @@ static inline unsigned long __fls(unsigned long word)
#endif #endif
#include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/fls64.h>
#if XCHAL_HAVE_S32C1I
static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
}
static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
}
static inline void change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" xor %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
}
static inline int
test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return tmp & mask;
}
static inline int
test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
return tmp & mask;
}
static inline int
test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" xor %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return tmp & mask;
}
#else
#include <asm-generic/bitops/atomic.h>
#endif /* XCHAL_HAVE_S32C1I */
#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/le.h>
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */ #define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */ #define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */
#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */ #define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
#define BP_TAG_FDT 0x1006 /* flat device tree addr */
#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */ #define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
#define BP_TAG_LAST 0x7E0B /* last tag */ #define BP_TAG_LAST 0x7E0B /* last tag */
...@@ -31,15 +32,15 @@ ...@@ -31,15 +32,15 @@
/* All records are aligned to 4 bytes */ /* All records are aligned to 4 bytes */
typedef struct bp_tag { typedef struct bp_tag {
unsigned short id; /* tag id */ unsigned short id; /* tag id */
unsigned short size; /* size of this record excluding the structure*/ unsigned short size; /* size of this record excluding the structure*/
unsigned long data[0]; /* data */ unsigned long data[0]; /* data */
} bp_tag_t; } bp_tag_t;
typedef struct meminfo { typedef struct meminfo {
unsigned long type; unsigned long type;
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
} meminfo_t; } meminfo_t;
#define SYSMEM_BANKS_MAX 5 #define SYSMEM_BANKS_MAX 5
...@@ -48,14 +49,11 @@ typedef struct meminfo { ...@@ -48,14 +49,11 @@ typedef struct meminfo {
#define MEMORY_TYPE_NONE 0x2000 #define MEMORY_TYPE_NONE 0x2000
typedef struct sysmem_info { typedef struct sysmem_info {
int nr_banks; int nr_banks;
meminfo_t bank[SYSMEM_BANKS_MAX]; meminfo_t bank[SYSMEM_BANKS_MAX];
} sysmem_info_t; } sysmem_info_t;
extern sysmem_info_t sysmem; extern sysmem_info_t sysmem;
#endif #endif
#endif #endif
...@@ -174,4 +174,3 @@ ...@@ -174,4 +174,3 @@
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
.endm .endm
...@@ -104,7 +104,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, ...@@ -104,7 +104,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page*); extern void flush_dcache_page(struct page*);
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); extern void flush_cache_page(struct vm_area_struct*,
unsigned long, unsigned long);
#else #else
......
...@@ -36,8 +36,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); ...@@ -36,8 +36,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary * better 64-bit) boundary
*/ */
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum, asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int *src_err_ptr, int *dst_err_ptr); int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
/* /*
* Note: when you get a NULL pointer exception here this means someone * Note: when you get a NULL pointer exception here this means someone
...@@ -54,7 +55,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, ...@@ -54,7 +55,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst,
static inline static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst, __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr) int len, __wsum sum, int *err_ptr)
{ {
return csum_partial_copy_generic((__force const void *)src, dst, return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL); len, sum, err_ptr, NULL);
...@@ -112,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) ...@@ -112,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
/* Since the input registers which are loaded with iph and ihl /* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */ will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr) : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
"=&r" (endaddr)
: "1" (iph), "2" (ihl) : "1" (iph), "2" (ihl)
: "memory"); : "memory");
...@@ -168,7 +170,7 @@ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, ...@@ -168,7 +170,7 @@ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
static __inline__ __sum16 ip_compute_csum(const void *buff, int len) static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
{ {
return csum_fold (csum_partial(buff, len, 0)); return csum_fold (csum_partial(buff, len, 0));
} }
#define _HAVE_ARCH_IPV6_CSUM #define _HAVE_ARCH_IPV6_CSUM
...@@ -238,11 +240,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, ...@@ -238,11 +240,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
* Copy and checksum to user * Copy and checksum to user
*/ */
#define HAVE_CSUM_COPY_USER #define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst, static __inline__ __wsum csum_and_copy_to_user(const void *src,
int len, __wsum sum, int *err_ptr) void __user *dst, int len,
__wsum sum, int *err_ptr)
{ {
if (access_ok(VERIFY_WRITE, dst, len)) if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
if (len) if (len)
*err_ptr = -EFAULT; *err_ptr = -EFAULT;
......
...@@ -22,17 +22,30 @@ ...@@ -22,17 +22,30 @@
static inline unsigned long static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new) __cmpxchg_u32(volatile int *p, int old, int new)
{ {
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" #if XCHAL_HAVE_S32C1I
"l32i %0, %1, 0 \n\t" __asm__ __volatile__(
"bne %0, %2, 1f \n\t" " wsr %2, scompare1\n"
"s32i %3, %1, 0 \n\t" " s32c1i %0, %1, 0\n"
"1: \n\t" : "+a" (new)
"wsr a15, ps \n\t" : "a" (p), "a" (old)
"rsync \n\t" : "memory"
: "=&a" (old) );
: "a" (p), "a" (old), "r" (new)
: "a15", "memory"); return new;
return old; #else
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %1, 0\n"
" bne %0, %2, 1f\n"
" s32i %3, %1, 0\n"
"1:\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (old)
: "a" (p), "a" (old), "r" (new)
: "a15", "memory");
return old;
#endif
} }
/* This function doesn't exist, so you'll get a linker error /* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */ * if something tries to do an invalid cmpxchg(). */
...@@ -93,19 +106,36 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ...@@ -93,19 +106,36 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
static inline unsigned long xchg_u32(volatile int * m, unsigned long val) static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{ {
unsigned long tmp; #if XCHAL_HAVE_S32C1I
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" unsigned long tmp, result;
"l32i %0, %1, 0 \n\t" __asm__ __volatile__(
"s32i %2, %1, 0 \n\t" "1: l32i %1, %2, 0\n"
"wsr a15, ps \n\t" " mov %0, %3\n"
"rsync \n\t" " wsr %1, scompare1\n"
: "=&a" (tmp) " s32c1i %0, %2, 0\n"
: "a" (m), "a" (val) " bne %0, %1, 1b\n"
: "a15", "memory"); : "=&a" (result), "=&a" (tmp)
return tmp; : "a" (m), "a" (val)
: "memory"
);
return result;
#else
unsigned long tmp;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %1, 0\n"
" s32i %2, %1, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (tmp)
: "a" (m), "a" (val)
: "a15", "memory");
return tmp;
#endif
} }
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) #define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/* /*
* This only works if the compiler isn't horribly bad at optimizing. * This only works if the compiler isn't horribly bad at optimizing.
......
...@@ -30,7 +30,7 @@ static inline struct task_struct *get_current(void) ...@@ -30,7 +30,7 @@ static inline struct task_struct *get_current(void)
#define GET_CURRENT(reg,sp) \ #define GET_CURRENT(reg,sp) \
GET_THREAD_INFO(reg,sp); \ GET_THREAD_INFO(reg,sp); \
l32i reg, reg, TI_TASK \ l32i reg, reg, TI_TASK \
#endif #endif
......
...@@ -19,9 +19,9 @@ extern unsigned long loops_per_jiffy; ...@@ -19,9 +19,9 @@ extern unsigned long loops_per_jiffy;
static inline void __delay(unsigned long loops) static inline void __delay(unsigned long loops)
{ {
/* 2 cycles per loop. */ /* 2 cycles per loop. */
__asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
: "=r" (loops) : "0" (loops)); : "=r" (loops) : "0" (loops));
} }
static __inline__ u32 xtensa_get_ccount(void) static __inline__ u32 xtensa_get_ccount(void)
...@@ -46,4 +46,3 @@ static __inline__ void udelay (unsigned long usecs) ...@@ -46,4 +46,3 @@ static __inline__ void udelay (unsigned long usecs)
} }
#endif #endif
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
/* /*
* DMA-consistent mapping functions. * DMA-consistent mapping functions.
*/ */
...@@ -98,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, ...@@ -98,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
} }
static inline void static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
enum dma_data_direction direction) size_t size, enum dma_data_direction direction)
{ {
consistent_sync((void *)bus_to_virt(dma_handle), size, direction); consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
} }
......
...@@ -168,11 +168,11 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); ...@@ -168,11 +168,11 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
*/ */
#define ELF_PLAT_INIT(_r, load_addr) \ #define ELF_PLAT_INIT(_r, load_addr) \
do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \ do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
_r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \ _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
_r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \ _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
_r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
} while (0) } while (0)
typedef struct { typedef struct {
xtregs_opt_t opt; xtregs_opt_t opt;
......
...@@ -14,4 +14,3 @@ ...@@ -14,4 +14,3 @@
extern void flush_cache_kmaps(void); extern void flush_cache_kmaps(void);
#endif #endif
/*
* arch/xtensa/include/asm/initialize_mmu.h
*
* Initializes MMU:
*
* For the new V3 MMU we remap the TLB from virtual == physical
* to the standard Linux mapping used in earlier MMU's.
*
* The the MMU we also support a new configuration register that
* specifies how the S32C1I instruction operates with the cache
* controller.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2008 - 2012 Tensilica, Inc.
*
* Marc Gauthier <marc@tensilica.com>
* Pete Delaney <piet@tensilica.com>
*/
#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H
#ifdef __ASSEMBLY__
#define XTENSA_HWVERSION_RC_2009_0 230000
.macro initialize_mmu
#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
/*
* We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
* For details see Documentation/xtensa/atomctl.txt
*/
#if XCHAL_DCACHE_IS_COHERENT
movi a3, 0x25 /* For SMP/MX -- internal for writeback,
* RCW otherwise
*/
#else
movi a3, 0x29 /* non-MX -- Most cores use Std Memory
* Controlers which usually can't use RCW
*/
#endif
wsr a3, atomctl
#endif /* XCHAL_HAVE_S32C1I &&
* (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
*/
.endm
#endif /*__ASSEMBLY__*/
#endif /* _XTENSA_INITIALIZE_MMU_H */
...@@ -107,7 +107,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) ...@@ -107,7 +107,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
unsigned long asid = asid_cache; unsigned long asid = asid_cache;
......
...@@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
{ {
return 0; return 0;
} }
......
...@@ -29,19 +29,19 @@ ...@@ -29,19 +29,19 @@
* PAGE_SHIFT determines the page size * PAGE_SHIFT determines the page size
*/ */
#define PAGE_SHIFT 12 #define PAGE_SHIFT 12
#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE #define MAX_MEM_PFN XCHAL_KSEG_SIZE
#else #else
#define PAGE_OFFSET 0 #define PAGE_OFFSET 0
#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) #define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
#endif #endif
#define PGTABLE_START 0x80000000 #define PGTABLE_START 0x80000000
/* /*
* Cache aliasing: * Cache aliasing:
...@@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*); ...@@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) #define pfn_valid(pfn) \
((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM
# error CONFIG_DISCONTIGMEM not supported # error CONFIG_DISCONTIGMEM not supported
#endif #endif
......
...@@ -35,7 +35,7 @@ struct pci_space { ...@@ -35,7 +35,7 @@ struct pci_space {
struct pci_controller { struct pci_controller {
int index; /* used for pci_controller_num */ int index; /* used for pci_controller_num */
struct pci_controller *next; struct pci_controller *next;
struct pci_bus *bus; struct pci_bus *bus;
void *arch_data; void *arch_data;
int first_busno; int first_busno;
......
...@@ -53,7 +53,7 @@ struct pci_dev; ...@@ -53,7 +53,7 @@ struct pci_dev;
/* Map a range of PCI memory or I/O space for a device into user space */ /* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine); enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1 #define HAVE_PCI_MMAP 1
......
...@@ -42,7 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) ...@@ -42,7 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
extern struct kmem_cache *pgtable_cache; extern struct kmem_cache *pgtable_cache;
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
......
...@@ -284,7 +284,7 @@ struct vm_area_struct; ...@@ -284,7 +284,7 @@ struct vm_area_struct;
static inline int static inline int
ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
pte_t pte = *ptep; pte_t pte = *ptep;
if (!pte_young(pte)) if (!pte_young(pte))
...@@ -304,8 +304,8 @@ ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) ...@@ -304,8 +304,8 @@ ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
static inline void static inline void
ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
pte_t pte = *ptep; pte_t pte = *ptep;
update_pte(ptep, pte_wrprotect(pte)); update_pte(ptep, pte_wrprotect(pte));
} }
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
...@@ -399,7 +399,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma, ...@@ -399,7 +399,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
*/ */
#define io_remap_pfn_range(vma,from,pfn,size,prot) \ #define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot) remap_pfn_range(vma, from, pfn, size, prot)
typedef pte_t *pte_addr_t; typedef pte_t *pte_addr_t;
......
...@@ -75,4 +75,3 @@ extern int platform_pcibios_fixup (void); ...@@ -75,4 +75,3 @@ extern int platform_pcibios_fixup (void);
extern void platform_calibrate_ccount (void); extern void platform_calibrate_ccount (void);
#endif /* _XTENSA_PLATFORM_H */ #endif /* _XTENSA_PLATFORM_H */
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) #define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
typedef struct { typedef struct {
unsigned long seg; unsigned long seg;
} mm_segment_t; } mm_segment_t;
struct thread_struct { struct thread_struct {
...@@ -145,10 +145,10 @@ struct thread_struct { ...@@ -145,10 +145,10 @@ struct thread_struct {
* set_thread_state in signal.c depends on it. * set_thread_state in signal.c depends on it.
*/ */
#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \ #define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
(1 << PS_CALLINC_SHIFT) | \ (1 << PS_CALLINC_SHIFT) | \
(USER_RING << PS_RING_SHIFT) | \ (USER_RING << PS_RING_SHIFT) | \
(1 << PS_UM_BIT) | \ (1 << PS_UM_BIT) | \
(1 << PS_EXCM_BIT)) (1 << PS_EXCM_BIT))
/* Clearing a0 terminates the backtrace. */ /* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \ #define start_thread(regs, new_pc, new_sp) \
......
#ifndef _XTENSA_ASM_PROM_H
#define _XTENSA_ASM_PROM_H
#define HAVE_ARCH_DEVTREE_FIXUPS
#endif /* _XTENSA_ASM_PROM_H */
...@@ -37,7 +37,7 @@ struct pt_regs { ...@@ -37,7 +37,7 @@ struct pt_regs {
unsigned long windowstart; /* 52 */ unsigned long windowstart; /* 52 */
unsigned long syscall; /* 56 */ unsigned long syscall; /* 56 */
unsigned long icountlevel; /* 60 */ unsigned long icountlevel; /* 60 */
int reserved[1]; /* 64 */ unsigned long scompare1; /* 64 */
/* Additional configurable registers that are used by the compiler. */ /* Additional configurable registers that are used by the compiler. */
xtregs_opt_t xtregs_opt; xtregs_opt_t xtregs_opt;
...@@ -55,7 +55,7 @@ struct pt_regs { ...@@ -55,7 +55,7 @@ struct pt_regs {
# define arch_has_single_step() (1) # define arch_has_single_step() (1)
# define task_pt_regs(tsk) ((struct pt_regs*) \ # define task_pt_regs(tsk) ((struct pt_regs*) \
(task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0) # define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc) # define instruction_pointer(regs) ((regs)->pc)
......
...@@ -52,6 +52,10 @@ ...@@ -52,6 +52,10 @@
#define EXCCAUSE_SPECULATION 7 #define EXCCAUSE_SPECULATION 7
#define EXCCAUSE_PRIVILEGED 8 #define EXCCAUSE_PRIVILEGED 8
#define EXCCAUSE_UNALIGNED 9 #define EXCCAUSE_UNALIGNED 9
#define EXCCAUSE_INSTR_DATA_ERROR 12
#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
#define EXCCAUSE_INSTR_ADDR_ERROR 14
#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
#define EXCCAUSE_ITLB_MISS 16 #define EXCCAUSE_ITLB_MISS 16
#define EXCCAUSE_ITLB_MULTIHIT 17 #define EXCCAUSE_ITLB_MULTIHIT 17
#define EXCCAUSE_ITLB_PRIVILEGE 18 #define EXCCAUSE_ITLB_PRIVILEGE 18
...@@ -105,4 +109,3 @@ ...@@ -105,4 +109,3 @@
#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */ #define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
#endif /* _XTENSA_SPECREG_H */ #endif /* _XTENSA_SPECREG_H */
...@@ -11,6 +11,192 @@ ...@@ -11,6 +11,192 @@
#ifndef _XTENSA_SPINLOCK_H #ifndef _XTENSA_SPINLOCK_H
#define _XTENSA_SPINLOCK_H #define _XTENSA_SPINLOCK_H
#include <linux/spinlock.h> /*
* spinlock
*
* There is at most one owner of a spinlock. There are not different
* types of spinlock owners like there are for rwlocks (see below).
*
* When trying to obtain a spinlock, the function "spins" forever, or busy-
* waits, until the lock is obtained. When spinning, presumably some other
* owner will soon give up the spinlock making it available to others. Use
* the trylock functions to avoid spinning forever.
*
* possible values:
*
* 0 nobody owns the spinlock
* 1 somebody owns the spinlock
*/
#define __raw_spin_is_locked(x) ((x)->slock != 0)
#define __raw_spin_unlock_wait(lock) \
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
"1: movi %0, 1\n"
" s32c1i %0, %1, 0\n"
" bnez %0, 1b\n"
: "=&a" (tmp)
: "a" (&lock->slock)
: "memory");
}
/* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
" movi %0, 1\n"
" s32c1i %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&lock->slock)
: "memory");
return tmp == 0 ? 1 : 0;
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" s32ri %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&lock->slock)
: "memory");
}
/*
* rwlock
*
* Read-write locks are really a more flexible spinlock. They allow
* multiple readers but only one writer. Write ownership is exclusive
* (i.e., all other readers and writers are blocked from ownership while
* there is a write owner). These rwlocks are unfair to writers. Writers
* can be starved for an indefinite time by readers.
*
* possible values:
*
* 0 nobody owns the rwlock
* >0 one or more readers own the rwlock
* (the positive value is the actual number of readers)
* 0x80000000 one writer owns the rwlock, no other writers, no readers
*/
#define __raw_write_can_lock(x) ((x)->lock == 0)
static inline void __raw_write_lock(raw_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
"1: movi %0, 1\n"
" slli %0, %0, 31\n"
" s32c1i %0, %1, 0\n"
" bnez %0, 1b\n"
: "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
}
/* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int __raw_write_trylock(raw_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
" movi %0, 1\n"
" slli %0, %0, 31\n"
" s32c1i %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
return tmp == 0 ? 1 : 0;
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" s32ri %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
}
static inline void __raw_read_lock(raw_rwlock_t *rw)
{
unsigned long tmp;
unsigned long result;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
" bltz %1, 1b\n"
" wsr %1, scompare1\n"
" addi %0, %1, 1\n"
" s32c1i %0, %2, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
}
/* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int __raw_read_trylock(raw_rwlock_t *rw)
{
unsigned long result;
unsigned long tmp;
__asm__ __volatile__(
" l32i %1, %2, 0\n"
" addi %0, %1, 1\n"
" bltz %0, 1f\n"
" wsr %1, scompare1\n"
" s32c1i %0, %2, 0\n"
" sub %0, %0, %1\n"
"1:\n"
: "=&a" (result), "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
return result == 0;
}
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
" addi %0, %1, -1\n"
" wsr %1, scompare1\n"
" s32c1i %0, %2, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp1), "=&a" (tmp2)
: "a" (&rw->lock)
: "memory");
}
#endif /* _XTENSA_SPINLOCK_H */ #endif /* _XTENSA_SPINLOCK_H */
...@@ -25,9 +25,10 @@ asmlinkage long xtensa_fadvise64_64(int, int, ...@@ -25,9 +25,10 @@ asmlinkage long xtensa_fadvise64_64(int, int,
/* Should probably move to linux/syscalls.h */ /* Should probably move to linux/syscalls.h */
struct pollfd; struct pollfd;
asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec __user *tsp, void __user *sig); fd_set __user *exp, struct timespec __user *tsp,
void __user *sig);
asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
struct timespec __user *tsp, const sigset_t __user *sigmask, struct timespec __user *tsp,
size_t sigsetsize); const sigset_t __user *sigmask,
asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
size_t sigsetsize); asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
/*
* arch/xtensa/include/asm/traps.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 Tensilica Inc.
*/
#ifndef _XTENSA_TRAPS_H
#define _XTENSA_TRAPS_H
#include <asm/ptrace.h>
/*
* handler must be either of the following:
* void (*)(struct pt_regs *regs);
* void (*)(struct pt_regs *regs, unsigned long exccause);
*/
extern void * __init trap_set_handler(int cause, void *handler);
extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
#endif /* _XTENSA_TRAPS_H */
...@@ -180,7 +180,8 @@ ...@@ -180,7 +180,8 @@
#define segment_eq(a,b) ((a).seg == (b).seg) #define segment_eq(a,b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) #define __user_ok(addr,size) \
(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
...@@ -234,10 +235,10 @@ do { \ ...@@ -234,10 +235,10 @@ do { \
int __cb; \ int __cb; \
retval = 0; \ retval = 0; \
switch (size) { \ switch (size) { \
case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
case 8: { \ case 8: { \
__typeof__(*ptr) __v64 = x; \ __typeof__(*ptr) __v64 = x; \
retval = __copy_to_user(ptr,&__v64,8); \ retval = __copy_to_user(ptr,&__v64,8); \
break; \ break; \
...@@ -291,7 +292,7 @@ do { \ ...@@ -291,7 +292,7 @@ do { \
* __check_align_* macros still work. * __check_align_* macros still work.
*/ */
#define __put_user_asm(x, addr, err, align, insn, cb) \ #define __put_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \ __asm__ __volatile__( \
__check_align_##align \ __check_align_##align \
"1: "insn" %2, %3, 0 \n" \ "1: "insn" %2, %3, 0 \n" \
"2: \n" \ "2: \n" \
...@@ -301,8 +302,8 @@ do { \ ...@@ -301,8 +302,8 @@ do { \
" .long 2b \n" \ " .long 2b \n" \
"5: \n" \ "5: \n" \
" l32r %1, 4b \n" \ " l32r %1, 4b \n" \
" movi %0, %4 \n" \ " movi %0, %4 \n" \
" jx %1 \n" \ " jx %1 \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \ " .long 1b, 5b \n" \
...@@ -334,13 +335,13 @@ extern long __get_user_bad(void); ...@@ -334,13 +335,13 @@ extern long __get_user_bad(void);
do { \ do { \
int __cb; \ int __cb; \
retval = 0; \ retval = 0; \
switch (size) { \ switch (size) { \
case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
case 8: retval = __copy_from_user(&x,ptr,8); break; \ case 8: retval = __copy_from_user(&x,ptr,8); break; \
default: (x) = __get_user_bad(); \ default: (x) = __get_user_bad(); \
} \ } \
} while (0) } while (0)
...@@ -349,7 +350,7 @@ do { \ ...@@ -349,7 +350,7 @@ do { \
* __check_align_* macros still work. * __check_align_* macros still work.
*/ */
#define __get_user_asm(x, addr, err, align, insn, cb) \ #define __get_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \ __asm__ __volatile__( \
__check_align_##align \ __check_align_##align \
"1: "insn" %2, %3, 0 \n" \ "1: "insn" %2, %3, 0 \n" \
"2: \n" \ "2: \n" \
...@@ -360,8 +361,8 @@ do { \ ...@@ -360,8 +361,8 @@ do { \
"5: \n" \ "5: \n" \
" l32r %1, 4b \n" \ " l32r %1, 4b \n" \
" movi %2, 0 \n" \ " movi %2, 0 \n" \
" movi %0, %4 \n" \ " movi %0, %4 \n" \
" jx %1 \n" \ " jx %1 \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \ " .long 1b, 5b \n" \
...@@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) ...@@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) #define __copy_to_user(to,from,n) \
#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) __generic_copy_to_user_nocheck((to),(from),(n))
#define __copy_from_user(to,from,n) \
__generic_copy_from_user_nocheck((to),(from),(n))
#define __copy_to_user_inatomic __copy_to_user #define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user #define __copy_from_user_inatomic __copy_from_user
......
...@@ -23,13 +23,13 @@ obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o ...@@ -23,13 +23,13 @@ obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
# #
# Replicate rules in scripts/Makefile.build # Replicate rules in scripts/Makefile.build
sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \ sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
-e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \ -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \
-e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g' -e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
quiet_cmd__cpp_lds_S = LDS $@ quiet_cmd__cpp_lds_S = LDS $@
cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \ cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
| sed $(sed-y) >$@ | sed $(sed-y) >$@
$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE $(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
$(call if_changed_dep,_cpp_lds_S) $(call if_changed_dep,_cpp_lds_S)
...@@ -442,7 +442,7 @@ ENTRY(fast_unaligned) ...@@ -442,7 +442,7 @@ ENTRY(fast_unaligned)
mov a1, a2 mov a1, a2
rsr a0, ps rsr a0, ps
bbsi.l a2, PS_UM_BIT, 1f # jump if user mode bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
movi a0, _kernel_exception movi a0, _kernel_exception
jx a0 jx a0
...@@ -450,6 +450,6 @@ ENTRY(fast_unaligned) ...@@ -450,6 +450,6 @@ ENTRY(fast_unaligned)
1: movi a0, _user_exception 1: movi a0, _user_exception
jx a0 jx a0
ENDPROC(fast_unaligned)
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
...@@ -41,6 +41,7 @@ int main(void) ...@@ -41,6 +41,7 @@ int main(void)
DEFINE(PT_SAR, offsetof (struct pt_regs, sar)); DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel)); DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall)); DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1])); DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
...@@ -91,7 +92,8 @@ int main(void) ...@@ -91,7 +92,8 @@ int main(void)
#endif #endif
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds)); DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \
thread.current_ds));
/* struct mm_struct */ /* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
...@@ -108,4 +110,3 @@ int main(void) ...@@ -108,4 +110,3 @@ int main(void)
return 0; return 0;
} }
...@@ -43,10 +43,13 @@ ...@@ -43,10 +43,13 @@
/* IO protection is currently unsupported. */ /* IO protection is currently unsupported. */
ENTRY(fast_io_protect) ENTRY(fast_io_protect)
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
ENDPROC(fast_io_protect)
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
/* /*
...@@ -139,6 +142,7 @@ ENTRY(fast_io_protect) ...@@ -139,6 +142,7 @@ ENTRY(fast_io_protect)
*/ */
ENTRY(coprocessor_save) ENTRY(coprocessor_save)
entry a1, 32 entry a1, 32
s32i a0, a1, 0 s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table movi a0, .Lsave_cp_regs_jump_table
...@@ -150,7 +154,10 @@ ENTRY(coprocessor_save) ...@@ -150,7 +154,10 @@ ENTRY(coprocessor_save)
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw retw
ENDPROC(coprocessor_save)
ENTRY(coprocessor_load) ENTRY(coprocessor_load)
entry a1, 32 entry a1, 32
s32i a0, a1, 0 s32i a0, a1, 0
movi a0, .Lload_cp_regs_jump_table movi a0, .Lload_cp_regs_jump_table
...@@ -162,8 +169,10 @@ ENTRY(coprocessor_load) ...@@ -162,8 +169,10 @@ ENTRY(coprocessor_load)
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw retw
ENDPROC(coprocessor_load)
/* /*
* coprocessor_flush(struct task_info*, index) * coprocessor_flush(struct task_info*, index)
* a2 a3 * a2 a3
* coprocessor_restore(struct task_info*, index) * coprocessor_restore(struct task_info*, index)
* a2 a3 * a2 a3
...@@ -178,6 +187,7 @@ ENTRY(coprocessor_load) ...@@ -178,6 +187,7 @@ ENTRY(coprocessor_load)
ENTRY(coprocessor_flush) ENTRY(coprocessor_flush)
entry a1, 32 entry a1, 32
s32i a0, a1, 0 s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table movi a0, .Lsave_cp_regs_jump_table
...@@ -191,6 +201,8 @@ ENTRY(coprocessor_flush) ...@@ -191,6 +201,8 @@ ENTRY(coprocessor_flush)
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw retw
ENDPROC(coprocessor_flush)
ENTRY(coprocessor_restore) ENTRY(coprocessor_restore)
entry a1, 32 entry a1, 32
s32i a0, a1, 0 s32i a0, a1, 0
...@@ -205,6 +217,8 @@ ENTRY(coprocessor_restore) ...@@ -205,6 +217,8 @@ ENTRY(coprocessor_restore)
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw retw
ENDPROC(coprocessor_restore)
/* /*
* Entry condition: * Entry condition:
* *
...@@ -220,10 +234,12 @@ ENTRY(coprocessor_restore) ...@@ -220,10 +234,12 @@ ENTRY(coprocessor_restore)
*/ */
ENTRY(fast_coprocessor_double) ENTRY(fast_coprocessor_double)
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
ENDPROC(fast_coprocessor_double)
ENTRY(fast_coprocessor) ENTRY(fast_coprocessor)
...@@ -327,9 +343,14 @@ ENTRY(fast_coprocessor) ...@@ -327,9 +343,14 @@ ENTRY(fast_coprocessor)
rfe rfe
ENDPROC(fast_coprocessor)
.data .data
ENTRY(coprocessor_owner) ENTRY(coprocessor_owner)
.fill XCHAL_CP_MAX, 4, 0 .fill XCHAL_CP_MAX, 4, 0
#endif /* XTENSA_HAVE_COPROCESSORS */ END(coprocessor_owner)
#endif /* XTENSA_HAVE_COPROCESSORS */
...@@ -219,6 +219,7 @@ _user_exception: ...@@ -219,6 +219,7 @@ _user_exception:
j common_exception j common_exception
ENDPROC(user_exception)
/* /*
* First-level exit handler for kernel exceptions * First-level exit handler for kernel exceptions
...@@ -371,6 +372,13 @@ common_exception: ...@@ -371,6 +372,13 @@ common_exception:
s32i a2, a1, PT_LBEG s32i a2, a1, PT_LBEG
s32i a3, a1, PT_LEND s32i a3, a1, PT_LEND
/* Save SCOMPARE1 */
#if XCHAL_HAVE_S32C1I
rsr a2, scompare1
s32i a2, a1, PT_SCOMPARE1
#endif
/* Save optional registers. */ /* Save optional registers. */
save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
...@@ -432,6 +440,12 @@ common_exception_return: ...@@ -432,6 +440,12 @@ common_exception_return:
load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
/* Restore SCOMPARE1 */
#if XCHAL_HAVE_S32C1I
l32i a2, a1, PT_SCOMPARE1
wsr a2, scompare1
#endif
wsr a3, ps /* disable interrupts */ wsr a3, ps /* disable interrupts */
_bbci.l a3, PS_UM_BIT, kernel_exception_exit _bbci.l a3, PS_UM_BIT, kernel_exception_exit
...@@ -641,6 +655,8 @@ common_exception_exit: ...@@ -641,6 +655,8 @@ common_exception_exit:
l32i a1, a1, PT_AREG1 l32i a1, a1, PT_AREG1
rfde rfde
ENDPROC(kernel_exception)
/* /*
* Debug exception handler. * Debug exception handler.
* *
...@@ -701,6 +717,7 @@ ENTRY(debug_exception) ...@@ -701,6 +717,7 @@ ENTRY(debug_exception)
/* Debug exception while in exception mode. */ /* Debug exception while in exception mode. */
1: j 1b // FIXME!! 1: j 1b // FIXME!!
ENDPROC(debug_exception)
/* /*
* We get here in case of an unrecoverable exception. * We get here in case of an unrecoverable exception.
...@@ -751,6 +768,7 @@ ENTRY(unrecoverable_exception) ...@@ -751,6 +768,7 @@ ENTRY(unrecoverable_exception)
1: j 1b 1: j 1b
ENDPROC(unrecoverable_exception)
/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
...@@ -856,7 +874,7 @@ ENTRY(fast_alloca) ...@@ -856,7 +874,7 @@ ENTRY(fast_alloca)
_bnei a0, 1, 1f # no 'movsp a1, ax': jump _bnei a0, 1, 1f # no 'movsp a1, ax': jump
/* Move the save area. This implies the use of the L32E /* Move the save area. This implies the use of the L32E
* and S32E instructions, because this move must be done with * and S32E instructions, because this move must be done with
* the user's PS.RING privilege levels, not with ring 0 * the user's PS.RING privilege levels, not with ring 0
* (kernel's) privileges currently active with PS.EXCM * (kernel's) privileges currently active with PS.EXCM
...@@ -929,6 +947,7 @@ ENTRY(fast_alloca) ...@@ -929,6 +947,7 @@ ENTRY(fast_alloca)
l32i a2, a2, PT_AREG2 l32i a2, a2, PT_AREG2
rfe rfe
ENDPROC(fast_alloca)
/* /*
* fast system calls. * fast system calls.
...@@ -966,6 +985,8 @@ ENTRY(fast_syscall_kernel) ...@@ -966,6 +985,8 @@ ENTRY(fast_syscall_kernel)
j kernel_exception j kernel_exception
ENDPROC(fast_syscall_kernel)
ENTRY(fast_syscall_user) ENTRY(fast_syscall_user)
/* Skip syscall. */ /* Skip syscall. */
...@@ -983,19 +1004,21 @@ ENTRY(fast_syscall_user) ...@@ -983,19 +1004,21 @@ ENTRY(fast_syscall_user)
j user_exception j user_exception
ENTRY(fast_syscall_unrecoverable) ENDPROC(fast_syscall_user)
/* Restore all states. */ ENTRY(fast_syscall_unrecoverable)
l32i a0, a2, PT_AREG0 # restore a0 /* Restore all states. */
xsr a2, depc # restore a2, depc
rsr a3, excsave1
wsr a0, excsave1 l32i a0, a2, PT_AREG0 # restore a0
movi a0, unrecoverable_exception xsr a2, depc # restore a2, depc
callx0 a0 rsr a3, excsave1
wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0
ENDPROC(fast_syscall_unrecoverable)
/* /*
* sysxtensa syscall handler * sysxtensa syscall handler
...@@ -1101,7 +1124,7 @@ CATCH ...@@ -1101,7 +1124,7 @@ CATCH
movi a2, -EINVAL movi a2, -EINVAL
rfe rfe
ENDPROC(fast_syscall_xtensa)
/* fast_syscall_spill_registers. /* fast_syscall_spill_registers.
...@@ -1160,6 +1183,8 @@ ENTRY(fast_syscall_spill_registers) ...@@ -1160,6 +1183,8 @@ ENTRY(fast_syscall_spill_registers)
movi a2, 0 movi a2, 0
rfe rfe
ENDPROC(fast_syscall_spill_registers)
/* Fixup handler. /* Fixup handler.
* *
* We get here if the spill routine causes an exception, e.g. tlb miss. * We get here if the spill routine causes an exception, e.g. tlb miss.
...@@ -1228,9 +1253,9 @@ fast_syscall_spill_registers_fixup: ...@@ -1228,9 +1253,9 @@ fast_syscall_spill_registers_fixup:
movi a3, exc_table movi a3, exc_table
rsr a0, exccause rsr a0, exccause
addx4 a0, a0, a3 # find entry in table addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a0, a0, EXC_TABLE_FAST_USER # load handler
jx a0 jx a0
fast_syscall_spill_registers_fixup_return: fast_syscall_spill_registers_fixup_return:
...@@ -1432,7 +1457,7 @@ ENTRY(_spill_registers) ...@@ -1432,7 +1457,7 @@ ENTRY(_spill_registers)
rsr a0, ps rsr a0, ps
_bbci.l a0, PS_UM_BIT, 1f _bbci.l a0, PS_UM_BIT, 1f
/* User space: Setup a dummy frame and kill application. /* User space: Setup a dummy frame and kill application.
* Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
*/ */
...@@ -1464,6 +1489,8 @@ ENTRY(_spill_registers) ...@@ -1464,6 +1489,8 @@ ENTRY(_spill_registers)
callx0 a0 # should not return callx0 a0 # should not return
1: j 1b 1: j 1b
ENDPROC(_spill_registers)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* /*
* We should never get here. Bail out! * We should never get here. Bail out!
...@@ -1475,6 +1502,8 @@ ENTRY(fast_second_level_miss_double_kernel) ...@@ -1475,6 +1502,8 @@ ENTRY(fast_second_level_miss_double_kernel)
callx0 a0 # should not return callx0 a0 # should not return
1: j 1b 1: j 1b
ENDPROC(fast_second_level_miss_double_kernel)
/* First-level entry handler for user, kernel, and double 2nd-level /* First-level entry handler for user, kernel, and double 2nd-level
* TLB miss exceptions. Note that for now, user and kernel miss * TLB miss exceptions. Note that for now, user and kernel miss
* exceptions share the same entry point and are handled identically. * exceptions share the same entry point and are handled identically.
...@@ -1682,6 +1711,7 @@ ENTRY(fast_second_level_miss) ...@@ -1682,6 +1711,7 @@ ENTRY(fast_second_level_miss)
j _kernel_exception j _kernel_exception
1: j _user_exception 1: j _user_exception
ENDPROC(fast_second_level_miss)
/* /*
* StoreProhibitedException * StoreProhibitedException
...@@ -1777,6 +1807,9 @@ ENTRY(fast_store_prohibited) ...@@ -1777,6 +1807,9 @@ ENTRY(fast_store_prohibited)
bbsi.l a2, PS_UM_BIT, 1f bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception j _kernel_exception
1: j _user_exception 1: j _user_exception
ENDPROC(fast_store_prohibited)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
/* /*
...@@ -1787,6 +1820,7 @@ ENTRY(fast_store_prohibited) ...@@ -1787,6 +1820,7 @@ ENTRY(fast_store_prohibited)
*/ */
ENTRY(system_call) ENTRY(system_call)
entry a1, 32 entry a1, 32
/* regs->syscall = regs->areg[2] */ /* regs->syscall = regs->areg[2] */
...@@ -1831,6 +1865,8 @@ ENTRY(system_call) ...@@ -1831,6 +1865,8 @@ ENTRY(system_call)
callx4 a4 callx4 a4
retw retw
ENDPROC(system_call)
/* /*
* Task switch. * Task switch.
...@@ -1899,6 +1935,7 @@ ENTRY(_switch_to) ...@@ -1899,6 +1935,7 @@ ENTRY(_switch_to)
retw retw
ENDPROC(_switch_to)
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
...@@ -1914,6 +1951,8 @@ ENTRY(ret_from_fork) ...@@ -1914,6 +1951,8 @@ ENTRY(ret_from_fork)
j common_exception_return j common_exception_return
ENDPROC(ret_from_fork)
/* /*
* Kernel thread creation helper * Kernel thread creation helper
* On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheasm.h> #include <asm/cacheasm.h>
#include <asm/initialize_mmu.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
...@@ -47,16 +48,19 @@ ...@@ -47,16 +48,19 @@
*/ */
__HEAD __HEAD
.globl _start ENTRY(_start)
_start: _j 2f
_j 2f
.align 4 .align 4
1: .word _startup 1: .word _startup
2: l32r a0, 1b 2: l32r a0, 1b
jx a0 jx a0
ENDPROC(_start)
.section .init.text, "ax" .section .init.text, "ax"
.align 4
_startup: ENTRY(_startup)
/* Disable interrupts and exceptions. */ /* Disable interrupts and exceptions. */
...@@ -107,7 +111,7 @@ _startup: ...@@ -107,7 +111,7 @@ _startup:
/* Disable all timers. */ /* Disable all timers. */
.set _index, 0 .set _index, 0
.rept XCHAL_NUM_TIMERS - 1 .rept XCHAL_NUM_TIMERS
wsr a0, SREG_CCOMPARE + _index wsr a0, SREG_CCOMPARE + _index
.set _index, _index + 1 .set _index, _index + 1
.endr .endr
...@@ -120,7 +124,7 @@ _startup: ...@@ -120,7 +124,7 @@ _startup:
/* Disable coprocessors. */ /* Disable coprocessors. */
#if XCHAL_CP_NUM > 0 #if XCHAL_HAVE_CP
wsr a0, cpenable wsr a0, cpenable
#endif #endif
...@@ -152,6 +156,8 @@ _startup: ...@@ -152,6 +156,8 @@ _startup:
isync isync
initialize_mmu
/* Unpack data sections /* Unpack data sections
* *
* The linker script used to build the Linux kernel image * The linker script used to build the Linux kernel image
...@@ -230,6 +236,7 @@ _startup: ...@@ -230,6 +236,7 @@ _startup:
should_never_return: should_never_return:
j should_never_return j should_never_return
ENDPROC(_startup)
/* /*
* BSS section * BSS section
...@@ -239,6 +246,8 @@ __PAGE_ALIGNED_BSS ...@@ -239,6 +246,8 @@ __PAGE_ALIGNED_BSS
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
ENTRY(swapper_pg_dir) ENTRY(swapper_pg_dir)
.fill PAGE_SIZE, 1, 0 .fill PAGE_SIZE, 1, 0
END(swapper_pg_dir)
#endif #endif
ENTRY(empty_zero_page) ENTRY(empty_zero_page)
.fill PAGE_SIZE, 1, 0 .fill PAGE_SIZE, 1, 0
END(empty_zero_page)
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/platform.h> #include <asm/platform.h>
...@@ -26,19 +28,22 @@ static unsigned int cached_irq_mask; ...@@ -26,19 +28,22 @@ static unsigned int cached_irq_mask;
atomic_t irq_err_count; atomic_t irq_err_count;
static struct irq_domain *root_domain;
/* /*
* do_IRQ handles all normal device IRQ's (the special * do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific * SMP cross-CPU interrupts have their own specific
* handlers). * handlers).
*/ */
asmlinkage void do_IRQ(int irq, struct pt_regs *regs) asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
int irq = irq_find_mapping(root_domain, hwirq);
if (irq >= NR_IRQS) { if (hwirq >= NR_IRQS) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n", printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, irq); __func__, hwirq);
} }
irq_enter(); irq_enter();
...@@ -71,40 +76,39 @@ int arch_show_interrupts(struct seq_file *p, int prec) ...@@ -71,40 +76,39 @@ int arch_show_interrupts(struct seq_file *p, int prec)
static void xtensa_irq_mask(struct irq_data *d) static void xtensa_irq_mask(struct irq_data *d)
{ {
cached_irq_mask &= ~(1 << d->irq); cached_irq_mask &= ~(1 << d->hwirq);
set_sr (cached_irq_mask, intenable); set_sr (cached_irq_mask, intenable);
} }
static void xtensa_irq_unmask(struct irq_data *d) static void xtensa_irq_unmask(struct irq_data *d)
{ {
cached_irq_mask |= 1 << d->irq; cached_irq_mask |= 1 << d->hwirq;
set_sr (cached_irq_mask, intenable); set_sr (cached_irq_mask, intenable);
} }
static void xtensa_irq_enable(struct irq_data *d) static void xtensa_irq_enable(struct irq_data *d)
{ {
variant_irq_enable(d->irq); variant_irq_enable(d->hwirq);
xtensa_irq_unmask(d); xtensa_irq_unmask(d);
} }
static void xtensa_irq_disable(struct irq_data *d) static void xtensa_irq_disable(struct irq_data *d)
{ {
xtensa_irq_mask(d); xtensa_irq_mask(d);
variant_irq_disable(d->irq); variant_irq_disable(d->hwirq);
} }
static void xtensa_irq_ack(struct irq_data *d) static void xtensa_irq_ack(struct irq_data *d)
{ {
set_sr(1 << d->irq, intclear); set_sr(1 << d->hwirq, intclear);
} }
static int xtensa_irq_retrigger(struct irq_data *d) static int xtensa_irq_retrigger(struct irq_data *d)
{ {
set_sr (1 << d->irq, INTSET); set_sr(1 << d->hwirq, intset);
return 1; return 1;
} }
static struct irq_chip xtensa_irq_chip = { static struct irq_chip xtensa_irq_chip = {
.name = "xtensa", .name = "xtensa",
.irq_enable = xtensa_irq_enable, .irq_enable = xtensa_irq_enable,
...@@ -115,37 +119,99 @@ static struct irq_chip xtensa_irq_chip = { ...@@ -115,37 +119,99 @@ static struct irq_chip xtensa_irq_chip = {
.irq_retrigger = xtensa_irq_retrigger, .irq_retrigger = xtensa_irq_retrigger,
}; };
void __init init_IRQ(void) static int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{ {
int index; u32 mask = 1 << hw;
for (index = 0; index < XTENSA_NR_IRQS; index++) { if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
int mask = 1 << index; irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
handle_simple_irq, "level");
if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) irq_set_status_flags(irq, IRQ_LEVEL);
irq_set_chip_and_handler(index, &xtensa_irq_chip, } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
handle_simple_irq); irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL);
} else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
} else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL);
} else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
/* XCHAL_INTTYPE_MASK_NMI */
irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
}
return 0;
}
else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) static unsigned map_ext_irq(unsigned ext_irq)
irq_set_chip_and_handler(index, &xtensa_irq_chip, {
handle_edge_irq); unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
unsigned i;
else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) for (i = 0; mask; ++i, mask >>= 1) {
irq_set_chip_and_handler(index, &xtensa_irq_chip, if ((mask & 1) && ext_irq-- == 0)
handle_level_irq); return i;
}
return XCHAL_NUM_INTERRUPTS;
}
else if (mask & XCHAL_INTTYPE_MASK_TIMER) /*
irq_set_chip_and_handler(index, &xtensa_irq_chip, * Device Tree IRQ specifier translation function which works with one or
handle_edge_irq); * two cell bindings. First cell value maps directly to the hwirq number.
* Second cell if present specifies whether hwirq number is external (1) or
* internal (0).
*/
int xtensa_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
if (WARN_ON(intsize < 1 || intsize > 2))
return -EINVAL;
if (intsize == 2 && intspec[1] == 1) {
unsigned int_irq = map_ext_irq(intspec[0]);
if (int_irq < XCHAL_NUM_INTERRUPTS)
*out_hwirq = int_irq;
else
return -EINVAL;
} else {
*out_hwirq = intspec[0];
}
*out_type = IRQ_TYPE_NONE;
return 0;
}
else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */ static const struct irq_domain_ops xtensa_irq_domain_ops = {
/* XCHAL_INTTYPE_MASK_NMI */ .xlate = xtensa_irq_domain_xlate,
.map = xtensa_irq_map,
};
irq_set_chip_and_handler(index, &xtensa_irq_chip, void __init init_IRQ(void)
handle_level_irq); {
} struct device_node *intc = NULL;
cached_irq_mask = 0; cached_irq_mask = 0;
set_sr(~0, intclear);
#ifdef CONFIG_OF
/* The interrupt controller device node is mandatory */
intc = of_find_compatible_node(NULL, NULL, "xtensa,pic");
BUG_ON(!intc);
root_domain = irq_domain_add_linear(intc, NR_IRQS,
&xtensa_irq_domain_ops, NULL);
#else
root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
&xtensa_irq_domain_ops, NULL);
#endif
irq_set_default_host(root_domain);
variant_init_irq(); variant_init_irq();
} }
...@@ -53,7 +53,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -53,7 +53,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
struct module *mod) struct module *mod)
{ {
unsigned int i; unsigned int i;
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym; Elf32_Sym *sym;
unsigned char *location; unsigned char *location;
uint32_t value; uint32_t value;
......
...@@ -44,4 +44,3 @@ _F(void, calibrate_ccount, (void), ...@@ -44,4 +44,3 @@ _F(void, calibrate_ccount, (void),
ccount_per_jiffy = 10 * (1000000UL/HZ); ccount_per_jiffy = 10 * (1000000UL/HZ);
}); });
#endif #endif
...@@ -108,7 +108,7 @@ void coprocessor_flush_all(struct thread_info *ti) ...@@ -108,7 +108,7 @@ void coprocessor_flush_all(struct thread_info *ti)
void cpu_idle(void) void cpu_idle(void)
{ {
local_irq_enable(); local_irq_enable();
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
......
...@@ -154,7 +154,7 @@ int ptrace_setxregs(struct task_struct *child, void __user *uregs) ...@@ -154,7 +154,7 @@ int ptrace_setxregs(struct task_struct *child, void __user *uregs)
coprocessor_flush_all(ti); coprocessor_flush_all(ti);
coprocessor_release_all(ti); coprocessor_release_all(ti);
ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0, ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
sizeof(xtregs_coprocessor_t)); sizeof(xtregs_coprocessor_t));
#endif #endif
ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt, ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
...@@ -343,4 +343,3 @@ void do_syscall_trace_leave(struct pt_regs *regs) ...@@ -343,4 +343,3 @@ void do_syscall_trace_leave(struct pt_regs *regs)
&& (current->ptrace & PT_PTRACED)) && (current->ptrace & PT_PTRACED))
do_syscall_trace(); do_syscall_trace();
} }
This diff is collapsed.
...@@ -212,7 +212,7 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame) ...@@ -212,7 +212,7 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
if (err) if (err)
return err; return err;
/* The signal handler may have used coprocessors in which /* The signal handler may have used coprocessors in which
* case they are still enabled. We disable them to force a * case they are still enabled. We disable them to force a
* reloading of the original task's CP state by the lazy * reloading of the original task's CP state by the lazy
* context-switching mechanisms of CP exception handling. * context-switching mechanisms of CP exception handling.
...@@ -396,7 +396,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -396,7 +396,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
*/ */
/* Set up registers for signal handler */ /* Set up registers for signal handler */
start_thread(regs, (unsigned long) ka->sa.sa_handler, start_thread(regs, (unsigned long) ka->sa.sa_handler,
(unsigned long) frame); (unsigned long) frame);
/* Set up a stack frame for a call4 /* Set up a stack frame for a call4
...@@ -424,9 +424,9 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -424,9 +424,9 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
return -EFAULT; return -EFAULT;
} }
asmlinkage long xtensa_sigaltstack(const stack_t __user *uss, asmlinkage long xtensa_sigaltstack(const stack_t __user *uss,
stack_t __user *uoss, stack_t __user *uoss,
long a2, long a3, long a4, long a5, long a2, long a3, long a4, long a5,
struct pt_regs *regs) struct pt_regs *regs)
{ {
return do_sigaltstack(uss, uoss, regs->areg[1]); return do_sigaltstack(uss, uoss, regs->areg[1]);
......
...@@ -52,4 +52,3 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, ...@@ -52,4 +52,3 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
{ {
return sys_fadvise64_64(fd, offset, len, advice); return sys_fadvise64_64(fd, offset, len, advice);
} }
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/irqdomain.h>
#include <asm/timex.h> #include <asm/timex.h>
#include <asm/platform.h> #include <asm/platform.h>
...@@ -31,7 +32,7 @@ unsigned long ccount_per_jiffy; /* per 1/HZ */ ...@@ -31,7 +32,7 @@ unsigned long ccount_per_jiffy; /* per 1/HZ */
unsigned long nsec_per_ccount; /* nsec per ccount increment */ unsigned long nsec_per_ccount; /* nsec per ccount increment */
#endif #endif
static cycle_t ccount_read(void) static cycle_t ccount_read(struct clocksource *cs)
{ {
return (cycle_t)get_ccount(); return (cycle_t)get_ccount();
} }
...@@ -52,6 +53,7 @@ static struct irqaction timer_irqaction = { ...@@ -52,6 +53,7 @@ static struct irqaction timer_irqaction = {
void __init time_init(void) void __init time_init(void)
{ {
unsigned int irq;
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency "); printk("Calibrating CPU frequency ");
platform_calibrate_ccount(); platform_calibrate_ccount();
...@@ -62,7 +64,8 @@ void __init time_init(void) ...@@ -62,7 +64,8 @@ void __init time_init(void)
/* Initialize the linux timer interrupt. */ /* Initialize the linux timer interrupt. */
setup_irq(LINUX_TIMER_INT, &timer_irqaction); irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
setup_irq(irq, &timer_irqaction);
set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY); set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
} }
......
...@@ -293,6 +293,17 @@ do_debug(struct pt_regs *regs) ...@@ -293,6 +293,17 @@ do_debug(struct pt_regs *regs)
} }
/* Set exception C handler - for temporary use when probing exceptions */
void * __init trap_set_handler(int cause, void *handler)
{
unsigned long *entry = &exc_table[EXC_TABLE_DEFAULT / 4 + cause];
void *previous = (void *)*entry;
*entry = (unsigned long)handler;
return previous;
}
/* /*
* Initialize dispatch tables. * Initialize dispatch tables.
* *
...@@ -397,7 +408,8 @@ static inline void spill_registers(void) ...@@ -397,7 +408,8 @@ static inline void spill_registers(void)
"wsr a13, sar\n\t" "wsr a13, sar\n\t"
"wsr a14, ps\n\t" "wsr a14, ps\n\t"
:: "a" (&a0), "a" (&ps) :: "a" (&a0), "a" (&ps)
: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
"memory");
} }
void show_trace(struct task_struct *task, unsigned long *sp) void show_trace(struct task_struct *task, unsigned long *sp)
...@@ -452,7 +464,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) ...@@ -452,7 +464,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if (!sp) if (!sp)
sp = stack_pointer(task); sp = stack_pointer(task);
stack = sp; stack = sp;
printk("\nStack: "); printk("\nStack: ");
...@@ -523,5 +535,3 @@ void die(const char * str, struct pt_regs * regs, long err) ...@@ -523,5 +535,3 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(err); do_exit(err);
} }
...@@ -79,6 +79,8 @@ ENTRY(_UserExceptionVector) ...@@ -79,6 +79,8 @@ ENTRY(_UserExceptionVector)
l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a0, a0, EXC_TABLE_FAST_USER # load handler
jx a0 jx a0
ENDPROC(_UserExceptionVector)
/* /*
* Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0) * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
* *
...@@ -103,6 +105,7 @@ ENTRY(_KernelExceptionVector) ...@@ -103,6 +105,7 @@ ENTRY(_KernelExceptionVector)
l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
jx a0 jx a0
ENDPROC(_KernelExceptionVector)
/* /*
* Double exception vector (Exceptions with PS.EXCM == 1) * Double exception vector (Exceptions with PS.EXCM == 1)
...@@ -225,7 +228,13 @@ ENTRY(_DoubleExceptionVector) ...@@ -225,7 +228,13 @@ ENTRY(_DoubleExceptionVector)
/* Window overflow/underflow exception. Get stack pointer. */ /* Window overflow/underflow exception. Get stack pointer. */
mov a3, a2 mov a3, a2
movi a2, exc_table /* This explicit literal and the following references to it are made
* in order to fit DoubleExceptionVector.literals into the available
* 16-byte gap before DoubleExceptionVector.text in the absence of
* link time relaxation. See kernel/vmlinux.lds.S
*/
.literal .Lexc_table, exc_table
l32r a2, .Lexc_table
l32i a2, a2, EXC_TABLE_KSTK l32i a2, a2, EXC_TABLE_KSTK
/* Check for overflow/underflow exception, jump if overflow. */ /* Check for overflow/underflow exception, jump if overflow. */
...@@ -255,7 +264,7 @@ ENTRY(_DoubleExceptionVector) ...@@ -255,7 +264,7 @@ ENTRY(_DoubleExceptionVector)
s32i a0, a2, PT_AREG0 s32i a0, a2, PT_AREG0
wsr a3, excsave1 # save a3 wsr a3, excsave1 # save a3
movi a3, exc_table l32r a3, .Lexc_table
rsr a0, exccause rsr a0, exccause
s32i a0, a2, PT_DEPC # mark it as a regular exception s32i a0, a2, PT_DEPC # mark it as a regular exception
...@@ -267,7 +276,7 @@ ENTRY(_DoubleExceptionVector) ...@@ -267,7 +276,7 @@ ENTRY(_DoubleExceptionVector)
/* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */ /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
movi a3, exc_table l32r a3, .Lexc_table
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable
/* Enter critical section. */ /* Enter critical section. */
...@@ -296,7 +305,7 @@ ENTRY(_DoubleExceptionVector) ...@@ -296,7 +305,7 @@ ENTRY(_DoubleExceptionVector)
/* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */ /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
movi a3, exc_table l32r a3, .Lexc_table
rsr a0, exccause rsr a0, exccause
addx4 a0, a0, a3 addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER l32i a0, a0, EXC_TABLE_FAST_USER
...@@ -338,6 +347,7 @@ ENTRY(_DoubleExceptionVector) ...@@ -338,6 +347,7 @@ ENTRY(_DoubleExceptionVector)
.end literal_prefix .end literal_prefix
ENDPROC(_DoubleExceptionVector)
/* /*
* Debug interrupt vector * Debug interrupt vector
...@@ -349,9 +359,11 @@ ENTRY(_DoubleExceptionVector) ...@@ -349,9 +359,11 @@ ENTRY(_DoubleExceptionVector)
.section .DebugInterruptVector.text, "ax" .section .DebugInterruptVector.text, "ax"
ENTRY(_DebugInterruptVector) ENTRY(_DebugInterruptVector)
xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
jx a0 jx a0
ENDPROC(_DebugInterruptVector)
/* Window overflow and underflow handlers. /* Window overflow and underflow handlers.
...@@ -363,38 +375,43 @@ ENTRY(_DebugInterruptVector) ...@@ -363,38 +375,43 @@ ENTRY(_DebugInterruptVector)
* we try to access any page that would cause a page fault early. * we try to access any page that would cause a page fault early.
*/ */
#define ENTRY_ALIGN64(name) \
.globl name; \
.align 64; \
name:
.section .WindowVectors.text, "ax" .section .WindowVectors.text, "ax"
/* 4-Register Window Overflow Vector (Handler) */ /* 4-Register Window Overflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowOverflow4)
.global _WindowOverflow4
_WindowOverflow4:
s32e a0, a5, -16 s32e a0, a5, -16
s32e a1, a5, -12 s32e a1, a5, -12
s32e a2, a5, -8 s32e a2, a5, -8
s32e a3, a5, -4 s32e a3, a5, -4
rfwo rfwo
ENDPROC(_WindowOverflow4)
/* 4-Register Window Underflow Vector (Handler) */ /* 4-Register Window Underflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowUnderflow4)
.global _WindowUnderflow4
_WindowUnderflow4:
l32e a0, a5, -16 l32e a0, a5, -16
l32e a1, a5, -12 l32e a1, a5, -12
l32e a2, a5, -8 l32e a2, a5, -8
l32e a3, a5, -4 l32e a3, a5, -4
rfwu rfwu
ENDPROC(_WindowUnderflow4)
/* 8-Register Window Overflow Vector (Handler) */ /* 8-Register Window Overflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowOverflow8)
.global _WindowOverflow8
_WindowOverflow8:
s32e a0, a9, -16 s32e a0, a9, -16
l32e a0, a1, -12 l32e a0, a1, -12
s32e a2, a9, -8 s32e a2, a9, -8
...@@ -406,11 +423,12 @@ _WindowOverflow8: ...@@ -406,11 +423,12 @@ _WindowOverflow8:
s32e a7, a0, -20 s32e a7, a0, -20
rfwo rfwo
ENDPROC(_WindowOverflow8)
/* 8-Register Window Underflow Vector (Handler) */ /* 8-Register Window Underflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowUnderflow8)
.global _WindowUnderflow8
_WindowUnderflow8:
l32e a1, a9, -12 l32e a1, a9, -12
l32e a0, a9, -16 l32e a0, a9, -16
l32e a7, a1, -12 l32e a7, a1, -12
...@@ -422,12 +440,12 @@ _WindowUnderflow8: ...@@ -422,12 +440,12 @@ _WindowUnderflow8:
l32e a7, a7, -20 l32e a7, a7, -20
rfwu rfwu
ENDPROC(_WindowUnderflow8)
/* 12-Register Window Overflow Vector (Handler) */ /* 12-Register Window Overflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowOverflow12)
.global _WindowOverflow12
_WindowOverflow12:
s32e a0, a13, -16 s32e a0, a13, -16
l32e a0, a1, -12 l32e a0, a1, -12
s32e a1, a13, -12 s32e a1, a13, -12
...@@ -443,11 +461,12 @@ _WindowOverflow12: ...@@ -443,11 +461,12 @@ _WindowOverflow12:
s32e a11, a0, -20 s32e a11, a0, -20
rfwo rfwo
ENDPROC(_WindowOverflow12)
/* 12-Register Window Underflow Vector (Handler) */ /* 12-Register Window Underflow Vector (Handler) */
.align 64 ENTRY_ALIGN64(_WindowUnderflow12)
.global _WindowUnderflow12
_WindowUnderflow12:
l32e a1, a13, -12 l32e a1, a13, -12
l32e a0, a13, -16 l32e a0, a13, -16
l32e a11, a1, -12 l32e a11, a1, -12
...@@ -463,6 +482,6 @@ _WindowUnderflow12: ...@@ -463,6 +482,6 @@ _WindowUnderflow12:
l32e a11, a11, -20 l32e a11, a11, -20
rfwu rfwu
.text ENDPROC(_WindowUnderflow12)
.text
...@@ -41,10 +41,11 @@ ...@@ -41,10 +41,11 @@
.text .text
ENTRY(csum_partial) ENTRY(csum_partial)
/*
* Experiments with Ethernet and SLIP connections show that buf /*
* is aligned on either a 2-byte or 4-byte boundary. * Experiments with Ethernet and SLIP connections show that buf
*/ * is aligned on either a 2-byte or 4-byte boundary.
*/
entry sp, 32 entry sp, 32
extui a5, a2, 0, 2 extui a5, a2, 0, 2
bnez a5, 8f /* branch if 2-byte aligned */ bnez a5, 8f /* branch if 2-byte aligned */
...@@ -170,7 +171,7 @@ ENTRY(csum_partial) ...@@ -170,7 +171,7 @@ ENTRY(csum_partial)
3: 3:
j 5b /* branch to handle the remaining byte */ j 5b /* branch to handle the remaining byte */
ENDPROC(csum_partial)
/* /*
* Copy from ds while checksumming, otherwise like csum_partial * Copy from ds while checksumming, otherwise like csum_partial
...@@ -211,6 +212,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, ...@@ -211,6 +212,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
*/ */
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
entry sp, 32 entry sp, 32
mov a12, a3 mov a12, a3
mov a11, a4 mov a11, a4
...@@ -367,6 +369,8 @@ DST( s8i a8, a3, 1 ) ...@@ -367,6 +369,8 @@ DST( s8i a8, a3, 1 )
6: 6:
j 4b /* process the possible trailing odd byte */ j 4b /* process the possible trailing odd byte */
ENDPROC(csum_partial_copy_generic)
# Exception handler: # Exception handler:
.section .fixup, "ax" .section .fixup, "ax"
...@@ -406,4 +410,3 @@ DST( s8i a8, a3, 1 ) ...@@ -406,4 +410,3 @@ DST( s8i a8, a3, 1 )
retw retw
.previous .previous
...@@ -210,8 +210,10 @@ memcpy: ...@@ -210,8 +210,10 @@ memcpy:
_beqz a4, .Ldone # avoid loading anything for zero-length copies _beqz a4, .Ldone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src # copy 16 bytes per iteration for word-aligned dst and unaligned src
ssa8 a3 # set shift amount from byte offset ssa8 a3 # set shift amount from byte offset
#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the
lint or ferret client, or 0 to save a few cycles */ /* set to 1 when running on ISS (simulator) with the
lint or ferret client, or 0 to save a few cycles */
#define SIM_CHECKS_ALIGNMENT 1
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
and a11, a3, a8 # save unalignment offset for below and a11, a3, a8 # save unalignment offset for below
sub a3, a3, a11 # align a3 sub a3, a3, a11 # align a3
......
...@@ -241,8 +241,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) ...@@ -241,8 +241,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
unsigned char header_type; unsigned char header_type;
struct pci_dev *dev = &pciauto_dev; struct pci_dev *dev = &pciauto_dev;
pciauto_dev.bus = &pciauto_bus; pciauto_dev.bus = &pciauto_bus;
pciauto_dev.sysdata = pci_ctrl; pciauto_dev.sysdata = pci_ctrl;
pciauto_bus.ops = pci_ctrl->ops; pciauto_bus.ops = pci_ctrl->ops;
/* /*
...@@ -345,8 +345,3 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) ...@@ -345,8 +345,3 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
} }
return sub_bus; return sub_bus;
} }
...@@ -166,7 +166,7 @@ __strncpy_user: ...@@ -166,7 +166,7 @@ __strncpy_user:
retw retw
.Lz1: # byte 1 is zero .Lz1: # byte 1 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
extui a9, a9, 16, 16 extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */ #endif /* __XTENSA_EB__ */
EX(s16i, a9, a11, 0, fixup_s) EX(s16i, a9, a11, 0, fixup_s)
addi a11, a11, 1 # advance dst pointer addi a11, a11, 1 # advance dst pointer
...@@ -174,7 +174,7 @@ __strncpy_user: ...@@ -174,7 +174,7 @@ __strncpy_user:
retw retw
.Lz2: # byte 2 is zero .Lz2: # byte 2 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
extui a9, a9, 16, 16 extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */ #endif /* __XTENSA_EB__ */
EX(s16i, a9, a11, 0, fixup_s) EX(s16i, a9, a11, 0, fixup_s)
movi a9, 0 movi a9, 0
......
...@@ -145,4 +145,3 @@ __strnlen_user: ...@@ -145,4 +145,3 @@ __strnlen_user:
lenfixup: lenfixup:
movi a2, 0 movi a2, 0
retw retw
...@@ -318,4 +318,3 @@ l_fixup: ...@@ -318,4 +318,3 @@ l_fixup:
/* Ignore memset return value in a6. */ /* Ignore memset return value in a6. */
/* a2 still contains bytes not copied. */ /* a2 still contains bytes not copied. */
retw retw
...@@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page) ...@@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page)
* For now, flush the whole cache. FIXME?? * For now, flush the whole cache. FIXME??
*/ */
void flush_cache_range(struct vm_area_struct* vma, void flush_cache_range(struct vm_area_struct* vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
__flush_invalidate_dcache_all(); __flush_invalidate_dcache_all();
...@@ -133,7 +133,7 @@ void flush_cache_range(struct vm_area_struct* vma, ...@@ -133,7 +133,7 @@ void flush_cache_range(struct vm_area_struct* vma,
*/ */
void flush_cache_page(struct vm_area_struct* vma, unsigned long address, void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
unsigned long pfn) unsigned long pfn)
{ {
/* Note that we have to use the 'alias' address to avoid multi-hit */ /* Note that we have to use the 'alias' address to avoid multi-hit */
...@@ -166,14 +166,14 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) ...@@ -166,14 +166,14 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
unsigned long paddr = (unsigned long) page_address(page); unsigned long paddr = (unsigned long) page_address(page);
unsigned long phys = page_to_phys(page); unsigned long phys = page_to_phys(page);
unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page(paddr); __flush_invalidate_dcache_page(paddr);
__flush_invalidate_dcache_page_alias(vaddr, phys); __flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(vaddr, phys); __invalidate_icache_page_alias(tmp, phys);
clear_bit(PG_arch_1, &page->flags); clear_bit(PG_arch_1, &page->flags);
} }
...@@ -195,7 +195,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) ...@@ -195,7 +195,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src, unsigned long vaddr, void *dst, const void *src,
unsigned long len) unsigned long len)
{ {
...@@ -205,8 +205,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -205,8 +205,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
/* Flush and invalidate user page if aliased. */ /* Flush and invalidate user page if aliased. */
if (alias) { if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(temp, phys); __flush_invalidate_dcache_page_alias(t, phys);
} }
/* Copy data */ /* Copy data */
...@@ -219,12 +219,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -219,12 +219,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
*/ */
if (alias) { if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_range((unsigned long) dst, len); __flush_invalidate_dcache_range((unsigned long) dst, len);
if ((vma->vm_flags & VM_EXEC) != 0) { if ((vma->vm_flags & VM_EXEC) != 0)
__invalidate_icache_page_alias(temp, phys); __invalidate_icache_page_alias(t, phys);
}
} else if ((vma->vm_flags & VM_EXEC) != 0) { } else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len); __flush_dcache_range((unsigned long)dst,len);
...@@ -245,8 +244,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -245,8 +244,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
*/ */
if (alias) { if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(temp, phys); __flush_invalidate_dcache_page_alias(t, phys);
} }
memcpy(dst, src, len); memcpy(dst, src, len);
......
...@@ -254,4 +254,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) ...@@ -254,4 +254,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
die("Oops", regs, sig); die("Oops", regs, sig);
do_exit(sig); do_exit(sig);
} }
...@@ -75,15 +75,15 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) ...@@ -75,15 +75,15 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
sysmem.nr_banks++; sysmem.nr_banks++;
} }
sysmem.bank[i].end = start; sysmem.bank[i].end = start;
} else if (end < sysmem.bank[i].end) {
sysmem.bank[i].start = end;
} else { } else {
if (end < sysmem.bank[i].end) /* remove entry */
sysmem.bank[i].start = end; sysmem.nr_banks--;
else { sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
/* remove entry */ sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
sysmem.nr_banks--;
sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
}
} }
return -1; return -1;
} }
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
*/ */
ENTRY(clear_page) ENTRY(clear_page)
entry a1, 16 entry a1, 16
movi a3, 0 movi a3, 0
...@@ -45,6 +46,8 @@ ENTRY(clear_page) ...@@ -45,6 +46,8 @@ ENTRY(clear_page)
retw retw
ENDPROC(clear_page)
/* /*
* copy_page and copy_user_page are the same for non-cache-aliased configs. * copy_page and copy_user_page are the same for non-cache-aliased configs.
* *
...@@ -53,6 +56,7 @@ ENTRY(clear_page) ...@@ -53,6 +56,7 @@ ENTRY(clear_page)
*/ */
ENTRY(copy_page) ENTRY(copy_page)
entry a1, 16 entry a1, 16
__loopi a2, a4, PAGE_SIZE, 32 __loopi a2, a4, PAGE_SIZE, 32
...@@ -84,6 +88,8 @@ ENTRY(copy_page) ...@@ -84,6 +88,8 @@ ENTRY(copy_page)
retw retw
ENDPROC(copy_page)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* /*
* If we have to deal with cache aliasing, we use temporary memory mappings * If we have to deal with cache aliasing, we use temporary memory mappings
...@@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start) ...@@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start)
*/ */
ENTRY(clear_user_page) ENTRY(clear_user_page)
entry a1, 32 entry a1, 32
/* Mark page dirty and determine alias. */ /* Mark page dirty and determine alias. */
...@@ -164,6 +171,8 @@ ENTRY(clear_user_page) ...@@ -164,6 +171,8 @@ ENTRY(clear_user_page)
retw retw
ENDPROC(clear_user_page)
/* /*
* copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
* a2 a3 a4 a5 * a2 a3 a4 a5
...@@ -171,7 +180,7 @@ ENTRY(clear_user_page) ...@@ -171,7 +180,7 @@ ENTRY(clear_user_page)
ENTRY(copy_user_page) ENTRY(copy_user_page)
entry a1, 32 entry a1, 32
/* Mark page dirty and determine alias for destination. */ /* Mark page dirty and determine alias for destination. */
...@@ -262,6 +271,8 @@ ENTRY(copy_user_page) ...@@ -262,6 +271,8 @@ ENTRY(copy_user_page)
retw retw
ENDPROC(copy_user_page)
#endif #endif
#if (DCACHE_WAY_SIZE > PAGE_SIZE) #if (DCACHE_WAY_SIZE > PAGE_SIZE)
...@@ -272,6 +283,7 @@ ENTRY(copy_user_page) ...@@ -272,6 +283,7 @@ ENTRY(copy_user_page)
*/ */
ENTRY(__flush_invalidate_dcache_page_alias) ENTRY(__flush_invalidate_dcache_page_alias)
entry sp, 16 entry sp, 16
movi a7, 0 # required for exception handler movi a7, 0 # required for exception handler
...@@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias) ...@@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias)
retw retw
ENDPROC(__flush_invalidate_dcache_page_alias)
#endif #endif
ENTRY(__tlbtemp_mapping_itlb) ENTRY(__tlbtemp_mapping_itlb)
...@@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb) ...@@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb)
#if (ICACHE_WAY_SIZE > PAGE_SIZE) #if (ICACHE_WAY_SIZE > PAGE_SIZE)
ENTRY(__invalidate_icache_page_alias) ENTRY(__invalidate_icache_page_alias)
entry sp, 16 entry sp, 16
addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
...@@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias) ...@@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias)
isync isync
retw retw
ENDPROC(__invalidate_icache_page_alias)
#endif #endif
/* End of special treatment in tlb miss exception */ /* End of special treatment in tlb miss exception */
ENTRY(__tlbtemp_mapping_end) ENTRY(__tlbtemp_mapping_end)
#endif /* CONFIG_MMU #endif /* CONFIG_MMU
/* /*
...@@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end) ...@@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end)
*/ */
ENTRY(__invalidate_icache_page) ENTRY(__invalidate_icache_page)
entry sp, 16 entry sp, 16
___invalidate_icache_page a2 a3 ___invalidate_icache_page a2 a3
...@@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page) ...@@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page)
retw retw
ENDPROC(__invalidate_icache_page)
/* /*
* void __invalidate_dcache_page(ulong start) * void __invalidate_dcache_page(ulong start)
*/ */
ENTRY(__invalidate_dcache_page) ENTRY(__invalidate_dcache_page)
entry sp, 16 entry sp, 16
___invalidate_dcache_page a2 a3 ___invalidate_dcache_page a2 a3
...@@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page) ...@@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page)
retw retw
ENDPROC(__invalidate_dcache_page)
/* /*
* void __flush_invalidate_dcache_page(ulong start) * void __flush_invalidate_dcache_page(ulong start)
*/ */
ENTRY(__flush_invalidate_dcache_page) ENTRY(__flush_invalidate_dcache_page)
entry sp, 16 entry sp, 16
___flush_invalidate_dcache_page a2 a3 ___flush_invalidate_dcache_page a2 a3
...@@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page) ...@@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page)
dsync dsync
retw retw
ENDPROC(__flush_invalidate_dcache_page)
/* /*
* void __flush_dcache_page(ulong start) * void __flush_dcache_page(ulong start)
*/ */
ENTRY(__flush_dcache_page) ENTRY(__flush_dcache_page)
entry sp, 16 entry sp, 16
___flush_dcache_page a2 a3 ___flush_dcache_page a2 a3
...@@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page) ...@@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page)
dsync dsync
retw retw
ENDPROC(__flush_dcache_page)
/* /*
* void __invalidate_icache_range(ulong start, ulong size) * void __invalidate_icache_range(ulong start, ulong size)
*/ */
ENTRY(__invalidate_icache_range) ENTRY(__invalidate_icache_range)
entry sp, 16 entry sp, 16
___invalidate_icache_range a2 a3 a4 ___invalidate_icache_range a2 a3 a4
...@@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range) ...@@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range)
retw retw
ENDPROC(__invalidate_icache_range)
/* /*
* void __flush_invalidate_dcache_range(ulong start, ulong size) * void __flush_invalidate_dcache_range(ulong start, ulong size)
*/ */
ENTRY(__flush_invalidate_dcache_range) ENTRY(__flush_invalidate_dcache_range)
entry sp, 16 entry sp, 16
___flush_invalidate_dcache_range a2 a3 a4 ___flush_invalidate_dcache_range a2 a3 a4
...@@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range) ...@@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range)
retw retw
ENDPROC(__flush_invalidate_dcache_range)
/* /*
* void _flush_dcache_range(ulong start, ulong size) * void _flush_dcache_range(ulong start, ulong size)
*/ */
ENTRY(__flush_dcache_range) ENTRY(__flush_dcache_range)
entry sp, 16 entry sp, 16
___flush_dcache_range a2 a3 a4 ___flush_dcache_range a2 a3 a4
...@@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range) ...@@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range)
retw retw
ENDPROC(__flush_dcache_range)
/* /*
* void _invalidate_dcache_range(ulong start, ulong size) * void _invalidate_dcache_range(ulong start, ulong size)
*/ */
ENTRY(__invalidate_dcache_range) ENTRY(__invalidate_dcache_range)
entry sp, 16 entry sp, 16
___invalidate_dcache_range a2 a3 a4 ___invalidate_dcache_range a2 a3 a4
retw retw
ENDPROC(__invalidate_dcache_range)
/* /*
* void _invalidate_icache_all(void) * void _invalidate_icache_all(void)
*/ */
ENTRY(__invalidate_icache_all) ENTRY(__invalidate_icache_all)
entry sp, 16 entry sp, 16
___invalidate_icache_all a2 a3 ___invalidate_icache_all a2 a3
...@@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all) ...@@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all)
retw retw
ENDPROC(__invalidate_icache_all)
/* /*
* void _flush_invalidate_dcache_all(void) * void _flush_invalidate_dcache_all(void)
*/ */
ENTRY(__flush_invalidate_dcache_all) ENTRY(__flush_invalidate_dcache_all)
entry sp, 16 entry sp, 16
___flush_invalidate_dcache_all a2 a3 ___flush_invalidate_dcache_all a2 a3
...@@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all) ...@@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all)
retw retw
ENDPROC(__flush_invalidate_dcache_all)
/* /*
* void _invalidate_dcache_all(void) * void _invalidate_dcache_all(void)
*/ */
ENTRY(__invalidate_dcache_all) ENTRY(__invalidate_dcache_all)
entry sp, 16 entry sp, 16
___invalidate_dcache_all a2 a3 ___invalidate_dcache_all a2 a3
...@@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all) ...@@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all)
retw retw
ENDPROC(__invalidate_dcache_all)
...@@ -37,7 +37,7 @@ void __init init_mmu(void) ...@@ -37,7 +37,7 @@ void __init init_mmu(void)
/* Set rasid register to a known value. */ /* Set rasid register to a known value. */
set_rasid_register(ASID_USER_FIRST); set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
/* Set PTEVADDR special register to the start of the page /* Set PTEVADDR special register to the start of the page
* table, which is in kernel mappable space (ie. not * table, which is in kernel mappable space (ie. not
......
...@@ -63,7 +63,7 @@ void flush_tlb_all (void) ...@@ -63,7 +63,7 @@ void flush_tlb_all (void)
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
{ {
if (mm == current->active_mm) { if (mm == current->active_mm) {
int flags; unsigned long flags;
local_save_flags(flags); local_save_flags(flags);
__get_new_mmu_context(mm); __get_new_mmu_context(mm);
__load_mmu_context(mm); __load_mmu_context(mm);
...@@ -82,7 +82,7 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -82,7 +82,7 @@ void flush_tlb_mm(struct mm_struct *mm)
#endif #endif
void flush_tlb_range (struct vm_area_struct *vma, void flush_tlb_range (struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long flags; unsigned long flags;
...@@ -100,7 +100,7 @@ void flush_tlb_range (struct vm_area_struct *vma, ...@@ -100,7 +100,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
int oldpid = get_rasid_register(); int oldpid = get_rasid_register();
set_rasid_register (ASID_INSERT(mm->context)); set_rasid_register (ASID_INSERT(mm->context));
start &= PAGE_MASK; start &= PAGE_MASK;
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
while(start < end) { while(start < end) {
invalidate_itlb_mapping(start); invalidate_itlb_mapping(start);
invalidate_dtlb_mapping(start); invalidate_dtlb_mapping(start);
...@@ -130,7 +130,7 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) ...@@ -130,7 +130,7 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
local_save_flags(flags); local_save_flags(flags);
oldpid = get_rasid_register(); oldpid = get_rasid_register();
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
invalidate_itlb_mapping(page); invalidate_itlb_mapping(page);
...@@ -140,4 +140,3 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) ...@@ -140,4 +140,3 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
local_irq_restore(flags); local_irq_restore(flags);
} }
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 Tensilica Inc.
*/
#ifndef __ASM_XTENSA_ISS_SERIAL_H
#define __ASM_XTENSA_ISS_SERIAL_H
/* Have no meaning on ISS, but needed for 8250_early.c */
#define BASE_BAUD 0
#endif /* __ASM_XTENSA_ISS_SERIAL_H */
...@@ -74,13 +74,12 @@ static inline int __simc(int a, int b, int c, int d, int e, int f) ...@@ -74,13 +74,12 @@ static inline int __simc(int a, int b, int c, int d, int e, int f)
"mov %1, a3\n" "mov %1, a3\n"
: "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1) : "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1)
: "r"(c1), "r"(d1), "r"(e1), "r"(f1) : "r"(c1), "r"(d1), "r"(e1), "r"(f1)
: ); : "memory");
return ret; return ret;
} }
static inline int simc_open(const char *file, int flags, int mode) static inline int simc_open(const char *file, int flags, int mode)
{ {
wmb();
return __simc(SYS_open, (int) file, flags, mode, 0, 0); return __simc(SYS_open, (int) file, flags, mode, 0, 0);
} }
...@@ -91,19 +90,16 @@ static inline int simc_close(int fd) ...@@ -91,19 +90,16 @@ static inline int simc_close(int fd)
static inline int simc_ioctl(int fd, int request, void *arg) static inline int simc_ioctl(int fd, int request, void *arg)
{ {
wmb();
return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0); return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
} }
static inline int simc_read(int fd, void *buf, size_t count) static inline int simc_read(int fd, void *buf, size_t count)
{ {
rmb();
return __simc(SYS_read, fd, (int) buf, count, 0, 0); return __simc(SYS_read, fd, (int) buf, count, 0, 0);
} }
static inline int simc_write(int fd, const void *buf, size_t count) static inline int simc_write(int fd, const void *buf, size_t count)
{ {
wmb();
return __simc(SYS_write, fd, (int) buf, count, 0, 0); return __simc(SYS_write, fd, (int) buf, count, 0, 0);
} }
...@@ -111,7 +107,6 @@ static inline int simc_poll(int fd) ...@@ -111,7 +107,6 @@ static inline int simc_poll(int fd)
{ {
struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
wmb();
return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv, return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,
0, 0); 0, 0);
} }
......
# Makefile for the Tensilica xtavnet Emulation Board
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definitions are in the main makefile...
obj-y = setup.o lcd.o
/*
* arch/xtensa/platform/xtavnet/include/platform/hardware.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Tensilica Inc.
*/
/*
* This file contains the hardware configuration of the XTAVNET boards.
*/
#ifndef __XTENSA_XTAVNET_HARDWARE_H
#define __XTENSA_XTAVNET_HARDWARE_H
/* By default NO_IRQ is defined to 0 in Linux, but we use the
interrupt 0 for UART... */
#define NO_IRQ -1
/* Memory configuration. */
#define PLATFORM_DEFAULT_MEM_START 0x00000000
#define PLATFORM_DEFAULT_MEM_SIZE 0x04000000
/* Interrupt configuration. */
#define PLATFORM_NR_IRQS 10
/* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_ARCH_HAS_SMP
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM
#else
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
#define OETH_IRQ XCHAL_EXTINT1_NUM
#endif
/*
* Device addresses and parameters.
*/
/* UART */
#define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
/* LCD instruction and data addresses. */
#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
/* Misc. */
#define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
/* Clock frequency in Hz (read-only): */
#define XTFPGA_CLKFRQ_VADDR (XTFPGA_FPGAREGS_VADDR + 0x04)
/* Setting of 8 DIP switches: */
#define DIP_SWITCHES_VADDR (XTFPGA_FPGAREGS_VADDR + 0x0C)
/* Software reset (write 0xdead): */
#define XTFPGA_SWRST_VADDR (XTFPGA_FPGAREGS_VADDR + 0x10)
/* OpenCores Ethernet controller: */
/* regs + RX/TX descriptors */
#define OETH_REGS_PADDR (XCHAL_KIO_PADDR + 0x0D030000)
#define OETH_REGS_SIZE 0x1000
#define OETH_SRAMBUFF_PADDR (XCHAL_KIO_PADDR + 0x0D800000)
/* 5*rx buffs + 5*tx buffs */
#define OETH_SRAMBUFF_SIZE (5 * 0x600 + 5 * 0x600)
#endif /* __XTENSA_XTAVNET_HARDWARE_H */
/*
* arch/xtensa/platform/xtavnet/include/platform/lcd.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001, 2006 Tensilica Inc.
*/
#ifndef __XTENSA_XTAVNET_LCD_H
#define __XTENSA_XTAVNET_LCD_H
/* Display string STR at position POS on the LCD. */
void lcd_disp_at_pos(char *str, unsigned char pos);
/* Shift the contents of the LCD display left or right. */
void lcd_shiftleft(void);
void lcd_shiftright(void);
#endif
/*
* arch/xtensa/platform/xtavnet/include/platform/serial.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001, 2006 Tensilica Inc.
*/
#ifndef __ASM_XTENSA_XTAVNET_SERIAL_H
#define __ASM_XTENSA_XTAVNET_SERIAL_H
#include <platform/hardware.h>
#define BASE_BAUD (*(long *)XTFPGA_CLKFRQ_VADDR / 16)
#endif /* __ASM_XTENSA_XTAVNET_SERIAL_H */
/*
* Driver for the LCD display on the Tensilica LX60 Board.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001, 2006 Tensilica Inc.
*/
/*
*
* FIXME: this code is from the examples from the LX60 user guide.
*
* The lcd_pause function does busy waiting, which is probably not
* great. Maybe the code could be changed to use kernel timers, or
* change the hardware to not need to wait.
*/
#include <linux/init.h>
#include <linux/io.h>
#include <platform/hardware.h>
#include <platform/lcd.h>
#include <linux/delay.h>
#define LCD_PAUSE_ITERATIONS 4000
#define LCD_CLEAR 0x1
#define LCD_DISPLAY_ON 0xc
/* 8bit and 2 lines display */
#define LCD_DISPLAY_MODE8BIT 0x38
#define LCD_DISPLAY_POS 0x80
#define LCD_SHIFT_LEFT 0x18
#define LCD_SHIFT_RIGHT 0x1c
static int __init lcd_init(void)
{
*LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
mdelay(5);
*LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
udelay(200);
*LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
udelay(50);
*LCD_INSTR_ADDR = LCD_DISPLAY_ON;
udelay(50);
*LCD_INSTR_ADDR = LCD_CLEAR;
mdelay(10);
lcd_disp_at_pos("XTENSA LINUX", 0);
return 0;
}
void lcd_disp_at_pos(char *str, unsigned char pos)
{
*LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
udelay(100);
while (*str != 0) {
*LCD_DATA_ADDR = *str;
udelay(200);
str++;
}
}
void lcd_shiftleft(void)
{
*LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
udelay(50);
}
void lcd_shiftright(void)
{
*LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
udelay(50);
}
arch_initcall(lcd_init);
/*
*
* arch/xtensa/platform/xtavnet/setup.c
*
* ...
*
* Authors: Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com>
*
* Copyright 2001 - 2006 Tensilica Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/reboot.h>
#include <linux/kdev_t.h>
#include <linux/types.h>
#include <linux/major.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <asm/timex.h>
#include <asm/processor.h>
#include <asm/platform.h>
#include <asm/bootparam.h>
#include <platform/lcd.h>
#include <platform/hardware.h>
void platform_halt(void)
{
lcd_disp_at_pos(" HALT ", 0);
local_irq_disable();
while (1)
cpu_relax();
}
void platform_power_off(void)
{
lcd_disp_at_pos("POWEROFF", 0);
local_irq_disable();
while (1)
cpu_relax();
}
void platform_restart(void)
{
/* Flush and reset the mmu, simulate a processor reset, and
* jump to the reset vector. */
__asm__ __volatile__ ("movi a2, 15\n\t"
"wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
"wsr a2, icount\n\t"
"wsr a2, ibreakenable\n\t"
"wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
"wsr a2, ps\n\t"
"isync\n\t"
"jx %0\n\t"
:
: "a" (XCHAL_RESET_VECTOR_VADDR)
: "a2"
);
/* control never gets here */
}
void __init platform_setup(char **cmdline)
{
}
#ifdef CONFIG_OF
static void __init update_clock_frequency(struct device_node *node)
{
struct property *newfreq;
u32 freq;
if (!of_property_read_u32(node, "clock-frequency", &freq) && freq != 0)
return;
newfreq = kzalloc(sizeof(*newfreq) + sizeof(u32), GFP_KERNEL);
if (!newfreq)
return;
newfreq->value = newfreq + 1;
newfreq->length = sizeof(freq);
newfreq->name = kstrdup("clock-frequency", GFP_KERNEL);
if (!newfreq->name) {
kfree(newfreq);
return;
}
*(u32 *)newfreq->value = cpu_to_be32(*(u32 *)XTFPGA_CLKFRQ_VADDR);
prom_update_property(node, newfreq);
}
#define MAC_LEN 6
static void __init update_local_mac(struct device_node *node)
{
struct property *newmac;
const u8* macaddr;
int prop_len;
macaddr = of_get_property(node, "local-mac-address", &prop_len);
if (macaddr == NULL || prop_len != MAC_LEN)
return;
newmac = kzalloc(sizeof(*newmac) + MAC_LEN, GFP_KERNEL);
if (newmac == NULL)
return;
newmac->value = newmac + 1;
newmac->length = MAC_LEN;
newmac->name = kstrdup("local-mac-address", GFP_KERNEL);
if (newmac->name == NULL) {
kfree(newmac);
return;
}
memcpy(newmac->value, macaddr, MAC_LEN);
((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f;
prom_update_property(node, newmac);
}
static int __init machine_setup(void)
{
struct device_node *serial;
struct device_node *eth = NULL;
for_each_compatible_node(serial, NULL, "ns16550a")
update_clock_frequency(serial);
if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
update_local_mac(eth);
return 0;
}
arch_initcall(machine_setup);
#endif
/* early initialization */
void __init platform_init(bp_tag_t *first)
{
}
/* Heartbeat. */
void platform_heartbeat(void)
{
}
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
void platform_calibrate_ccount(void)
{
long clk_freq = 0;
#ifdef CONFIG_OF
struct device_node *cpu =
of_find_compatible_node(NULL, NULL, "xtensa,cpu");
if (cpu) {
u32 freq;
update_clock_frequency(cpu);
if (!of_property_read_u32(cpu, "clock-frequency", &freq))
clk_freq = freq;
}
#endif
if (!clk_freq)
clk_freq = *(long *)XTFPGA_CLKFRQ_VADDR;
ccount_per_jiffy = clk_freq / HZ;
nsec_per_ccount = 1000000000UL / clk_freq;
}
#endif
#ifndef CONFIG_OF
#include <linux/serial_8250.h>
#include <linux/if.h>
#include <net/ethoc.h>
/*----------------------------------------------------------------------------
* Ethernet -- OpenCores Ethernet MAC (ethoc driver)
*/
static struct resource ethoc_res[] __initdata = {
[0] = { /* register space */
.start = OETH_REGS_PADDR,
.end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = { /* buffer space */
.start = OETH_SRAMBUFF_PADDR,
.end = OETH_SRAMBUFF_PADDR + OETH_SRAMBUFF_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[2] = { /* IRQ number */
.start = OETH_IRQ,
.end = OETH_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct ethoc_platform_data ethoc_pdata __initdata = {
/*
* The MAC address for these boards is 00:50:c2:13:6f:xx.
* The last byte (here as zero) is read from the DIP switches on the
* board.
*/
.hwaddr = { 0x00, 0x50, 0xc2, 0x13, 0x6f, 0 },
.phy_id = -1,
};
static struct platform_device ethoc_device __initdata = {
.name = "ethoc",
.id = -1,
.num_resources = ARRAY_SIZE(ethoc_res),
.resource = ethoc_res,
.dev = {
.platform_data = &ethoc_pdata,
},
};
/*----------------------------------------------------------------------------
* UART
*/
static struct resource serial_resource __initdata = {
.start = DUART16552_PADDR,
.end = DUART16552_PADDR + 0x1f,
.flags = IORESOURCE_MEM,
};
static struct plat_serial8250_port serial_platform_data[] __initdata = {
[0] = {
.mapbase = DUART16552_PADDR,
.irq = DUART16552_INTNUM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM32,
.regshift = 2,
.uartclk = 0, /* set in xtavnet_init() */
},
{ },
};
static struct platform_device xtavnet_uart __initdata = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = serial_platform_data,
},
.num_resources = 1,
.resource = &serial_resource,
};
/* platform devices */
static struct platform_device *platform_devices[] __initdata = {
&ethoc_device,
&xtavnet_uart,
};
static int __init xtavnet_init(void)
{
/* Ethernet MAC address. */
ethoc_pdata.hwaddr[5] = *(u32 *)DIP_SWITCHES_VADDR;
/* Clock rate varies among FPGA bitstreams; board specific FPGA register
* reports the actual clock rate.
*/
serial_platform_data[0].uartclk = *(long *)XTFPGA_CLKFRQ_VADDR;
/* register platform devices */
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
/* ETHOC driver is a bit quiet; at least display Ethernet MAC, so user
* knows whether they set it correctly on the DIP switches.
*/
pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
return 0;
}
/*
* Register to be done during do_initcalls().
*/
arch_initcall(xtavnet_init);
#endif /* CONFIG_OF */
...@@ -164,7 +164,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc) ...@@ -164,7 +164,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc)
int cirq; int cirq;
chip->irq_mask(&desc->irq_data); chip->irq_mask(&desc->irq_data);
chip->irq_ack(&desc->irq_data)); chip->irq_ack(&desc->irq_data);
pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask; pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask;
cirq = IRQ_BASE - 1; cirq = IRQ_BASE - 1;
while (pending) { while (pending) {
...@@ -173,7 +173,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc) ...@@ -173,7 +173,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc)
pending >>= n; pending >>= n;
generic_handle_irq(cirq); generic_handle_irq(cirq);
} }
chip->irq_unmask(&desc->irq_data)); chip->irq_unmask(&desc->irq_data);
} }
extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS]; extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment