Commit 4d7048f5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-20191201' of git://github.com/jcmvbkbc/linux-xtensa

Pull Xtensa updates from Max Filippov:

 - add support for execute in place (XIP) kernels

 - improvements in inline assembly: use named arguments and "m"
   constraints where possible

 - improve stack dumping

 - clean up system_call code and syscall tracing

 - various small fixes and cleanups

* tag 'xtensa-20191201' of git://github.com/jcmvbkbc/linux-xtensa: (30 commits)
  xtensa: clean up system_call/xtensa_rt_sigreturn interaction
  xtensa: fix system_call interaction with ptrace
  xtensa: rearrange syscall tracing
  xtensa: fix syscall_set_return_value
  xtensa: drop unneeded headers from coprocessor.S
  xtensa: entry: Remove unneeded need_resched() loop
  xtensa: use MEMBLOCK_ALLOC_ANYWHERE for KASAN shadow map
  xtensa: fix TLB sanity checker
  xtensa: get rid of __ARCH_USE_5LEVEL_HACK
  xtensa: mm: fix PMD folding implementation
  xtensa: make stack dump size configurable
  xtensa: improve stack dumping
  xtensa: use "m" constraint instead of "r" in futex.h assembly
  xtensa: use "m" constraint instead of "a" in cmpxchg.h assembly
  xtensa: use named assembly arguments in cmpxchg.h
  xtensa: use "m" constraint instead of "a" in atomic.h assembly
  xtensa: use named assembly arguments in atomic.h
  xtensa: use "m" constraint instead of "a" in bitops.h assembly
  xtensa: use named assembly arguments in bitops.h
  xtensa: use macros to generate *_bit and test_and_*_bit functions
  ...
parents 043cf468 9d9043f6
......@@ -30,5 +30,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
| xtensa: | TODO |
| xtensa: | ok |
-----------------------
This diff is collapsed.
......@@ -31,3 +31,10 @@ config S32C1I_SELFTEST
It is easy to make wrong hardware configuration, this test should catch it early.
Say 'N' on stable hardware.
config PRINT_STACK_DEPTH
int "Stack depth to print" if DEBUG_KERNEL
default 64
help
This option allows you to set the stack depth that the kernel
prints in stack traces.
......@@ -87,7 +87,7 @@ drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
boot := arch/xtensa/boot
all Image zImage uImage: vmlinux
all Image zImage uImage xipImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
archheaders:
......@@ -97,4 +97,5 @@ define archhelp
@echo '* Image - Kernel ELF image with reset vector'
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
@echo '* uImage - U-Boot wrapped image'
@echo ' xipImage - XIP image'
endef
......@@ -29,6 +29,7 @@ all: $(boot-y)
Image: boot-elf
zImage: boot-redboot
uImage: $(obj)/uImage
xipImage: $(obj)/xipImage
boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
......@@ -50,3 +51,7 @@ UIMAGE_COMPRESSION = gzip
$(obj)/uImage: vmlinux.bin.gz FORCE
$(call if_changed,uimage)
$(Q)$(kecho) ' Kernel: $@ is ready'
$(obj)/xipImage: vmlinux FORCE
$(call if_changed,objcopy)
$(Q)$(kecho) ' Kernel: $@ is ready'
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_MEMCG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_DEBUG=y
CONFIG_NAMESPACES=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
CONFIG_XTENSA_VARIANT_DC233C=y
CONFIG_XTENSA_UNALIGNED_USER=y
CONFIG_XIP_KERNEL=y
CONFIG_XIP_DATA_ADDR=0xd0000000
CONFIG_KERNEL_VIRTUAL_ADDRESS=0xe6000000
CONFIG_KERNEL_LOAD_ADDRESS=0xf6000000
CONFIG_XTENSA_KSEG_512M=y
CONFIG_HIGHMEM=y
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB_SOURCE="kc705"
# CONFIG_PARSE_BOOTPARAM is not set
CONFIG_OPROFILE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_MARVELL_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_DEVKMEM=y
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=y
# CONFIG_VGA_CONSOLE is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
CONFIG_FANOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_SUNRPC_DEBUG=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_STACKTRACE=y
CONFIG_RCU_TRACE=y
# CONFIG_FTRACE is not set
# CONFIG_S32C1I_SELFTEST is not set
......@@ -11,6 +11,7 @@ generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
......@@ -30,6 +31,7 @@ generic-y += qspinlock.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -64,13 +64,13 @@ static inline void atomic_##op(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
"1: l32ex %1, %3\n" \
" " #op " %0, %1, %2\n" \
" s32ex %0, %3\n" \
" getex %0\n" \
" beqz %0, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32ex %[tmp], %[addr]\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32ex %[result], %[addr]\n" \
" getex %[result]\n" \
" beqz %[result], 1b\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp) \
: [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
} \
......@@ -82,14 +82,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
"1: l32ex %1, %3\n" \
" " #op " %0, %1, %2\n" \
" s32ex %0, %3\n" \
" getex %0\n" \
" beqz %0, 1b\n" \
" " #op " %0, %1, %2\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32ex %[tmp], %[addr]\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32ex %[result], %[addr]\n" \
" getex %[result]\n" \
" beqz %[result], 1b\n" \
" " #op " %[result], %[tmp], %[i]\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp) \
: [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
\
......@@ -103,13 +103,13 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
"1: l32ex %1, %3\n" \
" " #op " %0, %1, %2\n" \
" s32ex %0, %3\n" \
" getex %0\n" \
" beqz %0, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32ex %[tmp], %[addr]\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32ex %[result], %[addr]\n" \
" getex %[result]\n" \
" beqz %[result], 1b\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp) \
: [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
\
......@@ -124,13 +124,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32i %[tmp], %[mem]\n" \
" wsr %[tmp], scompare1\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %[result], %[mem]\n" \
" bne %[result], %[tmp], 1b\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp), \
[mem] "+m" (*v) \
: [i] "a" (i) \
: "memory" \
); \
} \
......@@ -142,14 +143,15 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
" " #op " %0, %0, %2\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32i %[tmp], %[mem]\n" \
" wsr %[tmp], scompare1\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %[result], %[mem]\n" \
" bne %[result], %[tmp], 1b\n" \
" " #op " %[result], %[result], %[i]\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp), \
[mem] "+m" (*v) \
: [i] "a" (i) \
: "memory" \
); \
\
......@@ -163,13 +165,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32i %[tmp], %[mem]\n" \
" wsr %[tmp], scompare1\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %[result], %[mem]\n" \
" bne %[result], %[tmp], 1b\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp), \
[mem] "+m" (*v) \
: [i] "a" (i) \
: "memory" \
); \
\
......@@ -184,14 +187,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \
\
__asm__ __volatile__( \
" rsil a15, "__stringify(TOPLEVEL)"\n"\
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
" rsil a15, "__stringify(TOPLEVEL)"\n" \
" l32i %[result], %[mem]\n" \
" " #op " %[result], %[result], %[i]\n" \
" s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval) \
: "a" (i), "a" (v) \
: [result] "=&a" (vval), [mem] "+m" (*v) \
: [i] "a" (i) \
: "a15", "memory" \
); \
} \
......@@ -203,13 +206,13 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
" l32i %[result], %[mem]\n" \
" " #op " %[result], %[result], %[i]\n" \
" s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval) \
: "a" (i), "a" (v) \
: [result] "=&a" (vval), [mem] "+m" (*v) \
: [i] "a" (i) \
: "a15", "memory" \
); \
\
......@@ -223,13 +226,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %3, 0\n" \
" " #op " %1, %0, %2\n" \
" s32i %1, %3, 0\n" \
" l32i %[result], %[mem]\n" \
" " #op " %[tmp], %[result], %[i]\n" \
" s32i %[tmp], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval), "=&a" (tmp) \
: "a" (i), "a" (v) \
: [result] "=&a" (vval), [tmp] "=&a" (tmp), \
[mem] "+m" (*v) \
: [i] "a" (i) \
: "a15", "memory" \
); \
\
......
......@@ -98,247 +98,112 @@ static inline unsigned long __fls(unsigned long word)
#if XCHAL_HAVE_EXCLUSIVE
static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %0, %2\n"
" or %0, %0, %1\n"
" s32ex %0, %2\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (mask), "a" (p)
: "memory");
}
static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %0, %2\n"
" and %0, %0, %1\n"
" s32ex %0, %2\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (~mask), "a" (p)
: "memory");
}
static inline void change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %0, %2\n"
" xor %0, %0, %1\n"
" s32ex %0, %2\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (mask), "a" (p)
: "memory");
}
static inline int
test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" or %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return value & mask;
}
static inline int
test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" and %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
return value & mask;
}
static inline int
test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" xor %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return value & mask;
#define BIT_OP(op, insn, inv) \
static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \
unsigned long tmp; \
unsigned long mask = 1UL << (bit & 31); \
\
p += bit >> 5; \
\
__asm__ __volatile__( \
"1: l32ex %[tmp], %[addr]\n" \
" "insn" %[tmp], %[tmp], %[mask]\n" \
" s32ex %[tmp], %[addr]\n" \
" getex %[tmp]\n" \
" beqz %[tmp], 1b\n" \
: [tmp] "=&a" (tmp) \
: [mask] "a" (inv mask), [addr] "a" (p) \
: "memory"); \
}
#define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \
test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
\
p += bit >> 5; \
\
__asm__ __volatile__( \
"1: l32ex %[value], %[addr]\n" \
" "insn" %[tmp], %[value], %[mask]\n" \
" s32ex %[tmp], %[addr]\n" \
" getex %[tmp]\n" \
" beqz %[tmp], 1b\n" \
: [tmp] "=&a" (tmp), [value] "=&a" (value) \
: [mask] "a" (inv mask), [addr] "a" (p) \
: "memory"); \
\
return value & mask; \
}
#elif XCHAL_HAVE_S32C1I
static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
}
static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
#define BIT_OP(op, insn, inv) \
static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
\
p += bit >> 5; \
\
__asm__ __volatile__( \
"1: l32i %[value], %[mem]\n" \
" wsr %[value], scompare1\n" \
" "insn" %[tmp], %[value], %[mask]\n" \
" s32c1i %[tmp], %[mem]\n" \
" bne %[tmp], %[value], 1b\n" \
: [tmp] "=&a" (tmp), [value] "=&a" (value), \
[mem] "+m" (*p) \
: [mask] "a" (inv mask) \
: "memory"); \
}
#define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \
test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
\
p += bit >> 5; \
\
__asm__ __volatile__( \
"1: l32i %[value], %[mem]\n" \
" wsr %[value], scompare1\n" \
" "insn" %[tmp], %[value], %[mask]\n" \
" s32c1i %[tmp], %[mem]\n" \
" bne %[tmp], %[value], 1b\n" \
: [tmp] "=&a" (tmp), [value] "=&a" (value), \
[mem] "+m" (*p) \
: [mask] "a" (inv mask) \
: "memory"); \
\
return tmp & mask; \
}
static inline void change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" xor %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
}
#else
static inline int
test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return tmp & mask;
}
#define BIT_OP(op, insn, inv)
#define TEST_AND_BIT_OP(op, insn, inv)
static inline int
test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
return tmp & mask;
}
#include <asm-generic/bitops/atomic.h>
static inline int
test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" xor %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return tmp & mask;
}
#endif /* XCHAL_HAVE_S32C1I */
#else
#define BIT_OPS(op, insn, inv) \
BIT_OP(op, insn, inv) \
TEST_AND_BIT_OP(op, insn, inv)
#include <asm-generic/bitops/atomic.h>
BIT_OPS(set, "or", )
BIT_OPS(clear, "and", ~)
BIT_OPS(change, "xor", )
#endif /* XCHAL_HAVE_S32C1I */
#undef BIT_OPS
#undef BIT_OP
#undef TEST_AND_BIT_OP
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
......
......@@ -31,4 +31,10 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/*
* R/O after init is actually writable, it cannot go to .rodata
* according to vmlinux linker script.
*/
#define __ro_after_init __read_mostly
#endif /* _XTENSA_CACHE_H */
......@@ -27,25 +27,25 @@ __cmpxchg_u32(volatile int *p, int old, int new)
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32ex %0, %3\n"
" bne %0, %4, 2f\n"
" mov %1, %2\n"
" s32ex %1, %3\n"
" getex %1\n"
" beqz %1, 1b\n"
"1: l32ex %[result], %[addr]\n"
" bne %[result], %[cmp], 2f\n"
" mov %[tmp], %[new]\n"
" s32ex %[tmp], %[addr]\n"
" getex %[tmp]\n"
" beqz %[tmp], 1b\n"
"2:\n"
: "=&a" (result), "=&a" (tmp)
: "a" (new), "a" (p), "a" (old)
: [result] "=&a" (result), [tmp] "=&a" (tmp)
: [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
: "memory"
);
return result;
#elif XCHAL_HAVE_S32C1I
__asm__ __volatile__(
" wsr %2, scompare1\n"
" s32c1i %0, %1, 0\n"
: "+a" (new)
: "a" (p), "a" (old)
" wsr %[cmp], scompare1\n"
" s32c1i %[new], %[mem]\n"
: [new] "+a" (new), [mem] "+m" (*p)
: [cmp] "a" (old)
: "memory"
);
......@@ -53,14 +53,14 @@ __cmpxchg_u32(volatile int *p, int old, int new)
#else
__asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n"
" bne %0, %2, 1f\n"
" s32i %3, %1, 0\n"
" l32i %[old], %[mem]\n"
" bne %[old], %[cmp], 1f\n"
" s32i %[new], %[mem]\n"
"1:\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (old)
: "a" (p), "a" (old), "r" (new)
: [old] "=&a" (old), [mem] "+m" (*p)
: [cmp] "a" (old), [new] "r" (new)
: "a15", "memory");
return old;
#endif
......@@ -129,13 +129,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32ex %0, %3\n"
" mov %1, %2\n"
" s32ex %1, %3\n"
" getex %1\n"
" beqz %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (val), "a" (m)
"1: l32ex %[result], %[addr]\n"
" mov %[tmp], %[val]\n"
" s32ex %[tmp], %[addr]\n"
" getex %[tmp]\n"
" beqz %[tmp], 1b\n"
: [result] "=&a" (result), [tmp] "=&a" (tmp)
: [val] "a" (val), [addr] "a" (m)
: "memory"
);
......@@ -143,13 +143,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#elif XCHAL_HAVE_S32C1I
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
" mov %0, %3\n"
" wsr %1, scompare1\n"
" s32c1i %0, %2, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (m), "a" (val)
"1: l32i %[tmp], %[mem]\n"
" mov %[result], %[val]\n"
" wsr %[tmp], scompare1\n"
" s32c1i %[result], %[mem]\n"
" bne %[result], %[tmp], 1b\n"
: [result] "=&a" (result), [tmp] "=&a" (tmp),
[mem] "+m" (*m)
: [val] "a" (val)
: "memory"
);
return result;
......@@ -157,12 +158,12 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp;
__asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n"
" s32i %2, %1, 0\n"
" l32i %[tmp], %[mem]\n"
" s32i %[val], %[mem]\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (tmp)
: "a" (m), "a" (val)
: [tmp] "=&a" (tmp), [mem] "+m" (*m)
: [val] "a" (val)
: "a15", "memory");
return tmp;
#endif
......
......@@ -78,8 +78,10 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel( \
pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
(vaddr) \
)
pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \
(vaddr)), \
(vaddr)), \
(vaddr)), \
(vaddr))
#endif
......@@ -43,10 +43,10 @@
#elif XCHAL_HAVE_S32C1I
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \
"1: l32i %[oldval], %[addr], 0\n" \
"1: l32i %[oldval], %[mem]\n" \
insn "\n" \
" wsr %[oldval], scompare1\n" \
"2: s32c1i %[newval], %[addr], 0\n" \
"2: s32c1i %[newval], %[mem]\n" \
" bne %[newval], %[oldval], 1b\n" \
" movi %[newval], 0\n" \
"3:\n" \
......@@ -60,9 +60,9 @@
" .section __ex_table,\"a\"\n" \
" .long 1b, 5b, 2b, 5b\n" \
" .previous\n" \
: [oldval] "=&r" (old), [newval] "=&r" (ret) \
: [addr] "r" (uaddr), [oparg] "r" (arg), \
[fault] "I" (-EFAULT) \
: [oldval] "=&r" (old), [newval] "=&r" (ret), \
[mem] "+m" (*(uaddr)) \
: [oparg] "r" (arg), [fault] "I" (-EFAULT) \
: "memory")
#endif
......
/*
* include/asm-xtensa/hw_irq.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_HW_IRQ_H
#define _XTENSA_HW_IRQ_H
#endif
......@@ -23,6 +23,7 @@
#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H
#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/vectors.h>
......@@ -183,7 +184,7 @@
#endif
#if XCHAL_HAVE_MPU
.data
__REFCONST
.align 4
.Lattribute_table:
.long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
......
......@@ -11,6 +11,7 @@
#ifndef _XTENSA_KMEM_LAYOUT_H
#define _XTENSA_KMEM_LAYOUT_H
#include <asm/core.h>
#include <asm/types.h>
#ifdef CONFIG_MMU
......@@ -65,6 +66,34 @@
#endif
/* KIO definition */
#if XCHAL_HAVE_PTP_MMU
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
#else
#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
#endif
#define XCHAL_KIO_SIZE 0x10000000
#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#ifndef __ASSEMBLY__
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
{
return xtensa_kio_paddr;
}
#endif
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
#endif
/* KERNEL_STACK definition */
#ifndef CONFIG_KASAN
#define KERNEL_STACK_SHIFT 13
#else
......
......@@ -169,7 +169,18 @@ static inline unsigned long ___pa(unsigned long va)
if (off >= XCHAL_KSEG_SIZE)
off -= XCHAL_KSEG_SIZE;
#ifndef CONFIG_XIP_KERNEL
return off + PHYS_OFFSET;
#else
if (off < XCHAL_KSEG_SIZE)
return off + PHYS_OFFSET;
off -= XCHAL_KSEG_SIZE;
if (off >= XCHAL_KIO_SIZE)
off -= XCHAL_KIO_SIZE;
return off + XCHAL_KIO_PADDR;
#endif
}
#define __pa(x) ___pa((unsigned long)(x))
#else
......
......@@ -8,7 +8,6 @@
#ifndef _XTENSA_PGTABLE_H
#define _XTENSA_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm/page.h>
#include <asm/kmem_layout.h>
#include <asm-generic/pgtable-nopmd.h>
......@@ -371,9 +370,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,address) ((pmd_t*)(dir))
/* Find an entry in the third-level page table.. */
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) \
......
......@@ -195,6 +195,7 @@ struct thread_struct {
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
do { \
unsigned long syscall = (regs)->syscall; \
memset((regs), 0, sizeof(*(regs))); \
(regs)->pc = (new_pc); \
(regs)->ps = USER_PS_VALUE; \
......@@ -204,7 +205,7 @@ struct thread_struct {
(regs)->depc = 0; \
(regs)->windowbase = 0; \
(regs)->windowstart = 1; \
(regs)->syscall = NO_SYSCALL; \
(regs)->syscall = syscall; \
} while (0)
/* Forward declaration */
......
......@@ -51,7 +51,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
regs->areg[0] = (long) error ? error : val;
regs->areg[2] = (long) error ? error : val;
}
#define SYSCALL_MAX_ARGS 6
......@@ -79,7 +79,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->areg[reg[i]] = args[i];
}
asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
asmlinkage long xtensa_rt_sigreturn(void);
asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long);
......
......@@ -132,13 +132,13 @@ do { \
#define __check_align_1 ""
#define __check_align_2 \
" _bbci.l %[addr], 0, 1f \n" \
" _bbci.l %[mem] * 0, 1f \n" \
" movi %[err], %[efault] \n" \
" _j 2f \n"
#define __check_align_4 \
" _bbsi.l %[addr], 0, 0f \n" \
" _bbci.l %[addr], 1, 1f \n" \
" _bbsi.l %[mem] * 0, 0f \n" \
" _bbci.l %[mem] * 0 + 1, 1f \n" \
"0: movi %[err], %[efault] \n" \
" _j 2f \n"
......@@ -154,7 +154,7 @@ do { \
#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %[x], %[addr], 0 \n" \
"1: "insn" %[x], %[mem] \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
......@@ -167,8 +167,8 @@ __asm__ __volatile__( \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
:[err] "+r"(err_), [tmp] "=r"(cb) \
:[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
:[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
:[x] "r"(x_), [efault] "i"(-EFAULT))
#define __get_user_nocheck(x, ptr, size) \
({ \
......@@ -222,7 +222,7 @@ do { \
u32 __x = 0; \
__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %[x], %[addr], 0 \n" \
"1: "insn" %[x], %[mem] \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
......@@ -236,7 +236,7 @@ do { \
" .long 1b, 5b \n" \
" .previous" \
:[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
:[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
:[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
(x_) = (__force __typeof__(*(addr_)))__x; \
} while (0)
......
/*
* include/asm-xtensa/user.h
*
* Xtensa Processor version.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_USER_H
#define _XTENSA_USER_H
/* This file usually defines a 'struct user' structure. However, it it only
* used for a.out file, which are not supported on Xtensa.
*/
#endif /* _XTENSA_USER_H */
......@@ -21,50 +21,18 @@
#include <asm/core.h>
#include <asm/kmem_layout.h>
#if XCHAL_HAVE_PTP_MMU
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
#ifdef CONFIG_KERNEL_VIRTUAL_ADDRESS
#define KERNELOFFSET CONFIG_KERNEL_VIRTUAL_ADDRESS
#else
#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
#endif
#define XCHAL_KIO_SIZE 0x10000000
#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#ifndef __ASSEMBLY__
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
{
return xtensa_kio_paddr;
}
#endif
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
#endif
#if defined(CONFIG_MMU)
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/* Image Virtual Start Address */
#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
CONFIG_KERNEL_LOAD_ADDRESS - \
#define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \
XCHAL_KSEG_CACHED_VADDR - \
XCHAL_KSEG_PADDR)
#endif
#else
#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif
#else /* !defined(CONFIG_MMU) */
/* MMU Not being used - Virtual == Physical */
/* Location of the start of the kernel text, _start */
#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif /* CONFIG_MMU */
#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
#ifdef CONFIG_VECTORS_OFFSET
#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)
......
......@@ -15,17 +15,9 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/processor.h>
#include <asm/coprocessor.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/signal.h>
#include <asm/tlbflush.h>
#include <asm/regs.h>
#if XTENSA_HAVE_COPROCESSORS
......
......@@ -529,7 +529,7 @@ common_exception_return:
l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f
call4 preempt_schedule_irq
j 1b
j 4f
#endif
#if XTENSA_FAKE_NMI
......@@ -1876,8 +1876,7 @@ ENDPROC(fast_store_prohibited)
ENTRY(system_call)
/* reserve 4 bytes on stack for function parameter */
abi_entry(4)
abi_entry_default
/* regs->syscall = regs->areg[2] */
......@@ -1892,11 +1891,10 @@ ENTRY(system_call)
mov a6, a2
call4 do_syscall_trace_enter
beqz a6, .Lsyscall_exit
l32i a7, a2, PT_SYSCALL
1:
s32i a7, a1, 4
/* syscall = sys_call_table[syscall_nr] */
movi a4, sys_call_table
......@@ -1906,8 +1904,6 @@ ENTRY(system_call)
addx4 a4, a7, a4
l32i a4, a4, 0
movi a5, sys_ni_syscall;
beq a4, a5, 1f
/* Load args: arg0 - arg5 are passed via regs. */
......@@ -1918,25 +1914,19 @@ ENTRY(system_call)
l32i a10, a2, PT_AREG8
l32i a11, a2, PT_AREG9
/* Pass one additional argument to the syscall: pt_regs (on stack) */
s32i a2, a1, 0
callx4 a4
1: /* regs->areg[2] = return_value */
s32i a6, a2, PT_AREG2
bnez a3, 1f
abi_ret(4)
.Lsyscall_exit:
abi_ret_default
1:
l32i a4, a1, 4
l32i a3, a2, PT_SYSCALL
s32i a4, a2, PT_SYSCALL
mov a6, a2
call4 do_syscall_trace_leave
s32i a3, a2, PT_SYSCALL
abi_ret(4)
abi_ret_default
ENDPROC(system_call)
......
......@@ -260,6 +260,13 @@ ENTRY(_startup)
___invalidate_icache_all a2 a3
isync
#ifdef CONFIG_XIP_KERNEL
/* Setup bootstrap CPU stack in XIP kernel */
movi a1, start_info
l32i a1, a1, 0
#endif
movi a6, 0
xsr a6, excsave1
......@@ -355,7 +362,7 @@ ENDPROC(cpu_restart)
* DATA section
*/
.section ".data.init.refok"
__REFDATA
.align 4
ENTRY(start_info)
.long init_thread_union + KERNEL_STACK_SIZE
......
......@@ -264,6 +264,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
&regs->areg[XCHAL_NUM_AREGS - len/4], len);
}
childregs->syscall = regs->syscall;
/* The thread pointer is passed in the '4th argument' (= a5) */
if (clone_flags & CLONE_SETTLS)
childregs->threadptr = childregs->areg[5];
......
......@@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
void do_syscall_trace_enter(struct pt_regs *regs)
void do_syscall_trace_leave(struct pt_regs *regs);
int do_syscall_trace_enter(struct pt_regs *regs)
{
if (regs->syscall == NO_SYSCALL)
regs->areg[2] = -ENOSYS;
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
tracehook_report_syscall_entry(regs)) {
regs->areg[2] = -ENOSYS;
regs->syscall = NO_SYSCALL;
return 0;
}
if (regs->syscall == NO_SYSCALL) {
do_syscall_trace_leave(regs);
return 0;
}
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs));
return 1;
}
void do_syscall_trace_leave(struct pt_regs *regs)
......
......@@ -308,6 +308,10 @@ extern char _Level6InterruptVector_text_end;
extern char _SecondaryResetVector_text_start;
extern char _SecondaryResetVector_text_end;
#endif
#ifdef CONFIG_XIP_KERNEL
extern char _xip_start[];
extern char _xip_end[];
#endif
static inline int __init_memblock mem_reserve(unsigned long start,
unsigned long end)
......@@ -339,6 +343,9 @@ void __init setup_arch(char **cmdline_p)
#endif
mem_reserve(__pa(_stext), __pa(_end));
#ifdef CONFIG_XIP_KERNEL
mem_reserve(__pa(_xip_start), __pa(_xip_end));
#endif
#ifdef CONFIG_VECTORS_OFFSET
mem_reserve(__pa(&_WindowVectors_text_start),
......
......@@ -236,9 +236,9 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
* Do a signal return; undo the signal stack.
*/
asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
long a4, long a5, struct pt_regs *regs)
asmlinkage long xtensa_rt_sigreturn(void)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
sigset_t set;
int ret;
......
......@@ -491,32 +491,27 @@ void show_trace(struct task_struct *task, unsigned long *sp)
pr_info("Call Trace:\n");
walk_stackframe(sp, show_trace_cb, NULL);
#ifndef CONFIG_KALLSYMS
pr_cont("\n");
#endif
}
static int kstack_depth_to_print = 24;
#define STACK_DUMP_ENTRY_SIZE 4
#define STACK_DUMP_LINE_SIZE 32
static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void show_stack(struct task_struct *task, unsigned long *sp)
{
int i = 0;
unsigned long *stack;
size_t len;
if (!sp)
sp = stack_pointer(task);
stack = sp;
pr_info("Stack:\n");
len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
for (i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(sp))
break;
pr_cont(" %08lx", *sp++);
if (i % 8 == 7)
pr_cont("\n");
}
show_trace(task, stack);
pr_info("Stack:\n");
print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
sp, len, false);
show_trace(task, sp);
}
DEFINE_SPINLOCK(die_lock);
......
......@@ -119,7 +119,7 @@ SECTIONS
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
}
_etext = .;
PROVIDE (etext = .);
......@@ -128,12 +128,11 @@ SECTIONS
RO_DATA(4096)
/* Relocation table */
.fixup : { *(.fixup) }
/* Data section */
#ifdef CONFIG_XIP_KERNEL
INIT_TEXT_SECTION(PAGE_SIZE)
#else
_sdata = .;
RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
......@@ -147,6 +146,11 @@ SECTIONS
.init.data :
{
INIT_DATA
}
#endif
.init.rodata :
{
. = ALIGN(0x4);
__tagtable_begin = .;
*(.taglist)
......@@ -187,11 +191,15 @@ SECTIONS
RELOCATE_ENTRY(_DebugInterruptVector_text,
.DebugInterruptVector.text);
#endif
#ifdef CONFIG_XIP_KERNEL
RELOCATE_ENTRY(_xip_data, .data);
RELOCATE_ENTRY(_xip_init_data, .init.data);
#else
#if defined(CONFIG_SMP)
RELOCATE_ENTRY(_SecondaryResetVector_text,
.SecondaryResetVector.text);
#endif
#endif
__boot_reloc_table_end = ABSOLUTE(.) ;
......@@ -278,7 +286,7 @@ SECTIONS
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
#endif
#if defined(CONFIG_SMP)
#if !defined(CONFIG_XIP_KERNEL) && defined(CONFIG_SMP)
SECTION_VECTOR (_SecondaryResetVector_text,
.SecondaryResetVector.text,
......@@ -291,12 +299,48 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
#ifndef CONFIG_XIP_KERNEL
__init_end = .;
BSS_SECTION(0, 8192, 0)
#endif
_end = .;
#ifdef CONFIG_XIP_KERNEL
. = CONFIG_XIP_DATA_ADDR;
_xip_start = .;
#undef LOAD_OFFSET
#define LOAD_OFFSET \
(CONFIG_XIP_DATA_ADDR - (LOADADDR(.dummy) + SIZEOF(.dummy) + 3) & ~ 3)
_xip_data_start = .;
_sdata = .;
RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
_xip_data_end = .;
/* Initialization data: */
STRUCT_ALIGN();
_xip_init_data_start = .;
__init_begin = .;
.init.data :
{
INIT_DATA
}
_xip_init_data_end = .;
__init_end = .;
BSS_SECTION(0, 8192, 0)
_xip_end = .;
#undef LOAD_OFFSET
#endif
DWARF_DEBUG
.xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) }
......
......@@ -197,6 +197,8 @@ void do_page_fault(struct pt_regs *regs)
struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
......@@ -211,8 +213,18 @@ void do_page_fault(struct pt_regs *regs)
pgd_val(*pgd) = pgd_val(*pgd_k);
pmd = pmd_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address);
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
goto bad_page_fault;
pud = pud_offset(p4d, address);
pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud) || !pud_present(*pud_k))
goto bad_page_fault;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_page_fault;
......
......@@ -193,8 +193,8 @@ void __init mem_init(void)
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
(unsigned long)_text, (unsigned long)_etext,
(unsigned long)(_etext - _text) >> 10,
(unsigned long)__start_rodata, (unsigned long)_sdata,
(unsigned long)(_sdata - __start_rodata) >> 10,
(unsigned long)__start_rodata, (unsigned long)__end_rodata,
(unsigned long)(__end_rodata - __start_rodata) >> 10,
(unsigned long)_sdata, (unsigned long)_edata,
(unsigned long)(_edata - _sdata) >> 10,
(unsigned long)__init_begin, (unsigned long)__init_end,
......
......@@ -20,7 +20,9 @@ void __init kasan_early_init(void)
{
unsigned long vaddr = KASAN_SHADOW_START;
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
int i;
for (i = 0; i < PTRS_PER_PTE; ++i)
......@@ -42,7 +44,9 @@ static void __init populate(void *start, void *end)
unsigned long i, j;
unsigned long vaddr = (unsigned long)start;
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
if (!pte)
......@@ -56,7 +60,9 @@ static void __init populate(void *start, void *end)
for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
phys_addr_t phys =
memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
0,
MEMBLOCK_ALLOC_ANYWHERE);
if (!phys)
panic("Failed to allocate page table page\n");
......
......@@ -22,7 +22,9 @@
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte;
unsigned long i;
......
......@@ -169,6 +169,8 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
struct task_struct *task = get_current();
struct mm_struct *mm = task->mm;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
......@@ -177,7 +179,13 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
pgd = pgd_offset(mm, vaddr);
if (pgd_none_or_clear_bad(pgd))
return 0;
pmd = pmd_offset(pgd, vaddr);
p4d = p4d_offset(pgd, vaddr);
if (p4d_none_or_clear_bad(p4d))
return 0;
pud = pud_offset(p4d, vaddr);
if (pud_none_or_clear_bad(pud))
return 0;
pmd = pmd_offset(pud, vaddr);
if (pmd_none_or_clear_bad(pmd))
return 0;
pte = pte_offset_map(pmd, vaddr);
......@@ -216,6 +224,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
unsigned tlbidx = w | (e << PAGE_SHIFT);
unsigned r0 = dtlb ?
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
unsigned r1 = dtlb ?
read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
unsigned pte = get_pte_for_vaddr(vpn);
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
......@@ -231,8 +241,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
}
if (tlb_asid == mm_asid) {
unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
read_itlb_translation(tlbidx);
if ((pte ^ r1) & PAGE_MASK) {
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment