Commit 17a05c8f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-20220523' of https://github.com/jcmvbkbc/linux-xtensa

Pull xtensa architecture updates from Max Filippov:

 - support coprocessors on SMP

 - support KCSAN

 - support handling protection faults in noMMU configurations

 - support using coprocessors in the kernel mode

 - support hibernation

 - enable context tracking

 - enable HAVE_VIRT_CPU_ACCOUNTING_GEN

 - support division by 0 exception on cores without HW division option

 - clean up locking in the ISS network driver

 - clean up kernel entry assemly code

 - various minor fixes

* tag 'xtensa-20220523' of https://github.com/jcmvbkbc/linux-xtensa: (36 commits)
  xtensa: Return true/false (not 1/0) from bool function
  xtensa: improve call0 ABI probing
  xtensa: support artificial division by 0 exception
  xtensa: add trap handler for division by zero
  xtensa/simdisk: fix proc_read_simdisk()
  xtensa: no need to initialise statics to 0
  xtensa: clean up labels in the kernel entry assembly
  xtensa: don't leave invalid TLB entry in fast_store_prohibited
  xtensa: fix declaration of _SecondaryResetVector_text_*
  irqchip: irq-xtensa-mx: fix initial IRQ affinity
  xtensa: enable ARCH_HAS_DEBUG_VM_PGTABLE
  xtensa: add hibernation support
  xtensa: support coprocessors on SMP
  xtensa: get rid of stack frame in coprocessor_flush
  xtensa: merge SAVE_CP_REGS_TAB and LOAD_CP_REGS_TAB
  xtensa: add xtensa_xsr macro
  xtensa: handle coprocessor exceptions in kernel mode
  xtensa: use callx0 opcode in fast_coprocessor
  xtensa: clean up excsave1 initialization
  xtensa: clean up declarations in coprocessor.h
  ...
parents d6130604 dc60001e
...@@ -27,5 +27,5 @@ ...@@ -27,5 +27,5 @@
| sparc: | TODO | | sparc: | TODO |
| um: | TODO | | um: | TODO |
| x86: | ok | | x86: | ok |
| xtensa: | TODO | | xtensa: | ok |
----------------------- -----------------------
...@@ -27,5 +27,5 @@ ...@@ -27,5 +27,5 @@
| sparc: | ok | | sparc: | ok |
| um: | TODO | | um: | TODO |
| x86: | ok | | x86: | ok |
| xtensa: | TODO | | xtensa: | ok |
----------------------- -----------------------
...@@ -27,5 +27,5 @@ ...@@ -27,5 +27,5 @@
| sparc: | ok | | sparc: | ok |
| um: | TODO | | um: | TODO |
| x86: | ok | | x86: | ok |
| xtensa: | TODO | | xtensa: | ok |
----------------------- -----------------------
...@@ -4,6 +4,7 @@ config XTENSA ...@@ -4,6 +4,7 @@ config XTENSA
select ARCH_32BIT_OFF_T select ARCH_32BIT_OFF_T
select ARCH_HAS_BINFMT_FLAT if !MMU select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT if MMU select ARCH_HAS_DMA_PREP_COHERENT if MMU
select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
...@@ -29,8 +30,10 @@ config XTENSA ...@@ -29,8 +30,10 @@ config XTENSA
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_ARCH_KCSAN
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_CONTEXT_TRACKING
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
...@@ -42,6 +45,7 @@ config XTENSA ...@@ -42,6 +45,7 @@ config XTENSA
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_DOMAIN select IRQ_DOMAIN
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
...@@ -79,6 +83,7 @@ config STACKTRACE_SUPPORT ...@@ -79,6 +83,7 @@ config STACKTRACE_SUPPORT
config MMU config MMU
def_bool n def_bool n
select PFAULT
config HAVE_XTENSA_GPIO32 config HAVE_XTENSA_GPIO32
def_bool n def_bool n
...@@ -178,6 +183,16 @@ config XTENSA_FAKE_NMI ...@@ -178,6 +183,16 @@ config XTENSA_FAKE_NMI
If unsure, say N. If unsure, say N.
config PFAULT
bool "Handle protection faults" if EXPERT && !MMU
default y
help
Handle protection faults. MMU configurations must enable it.
noMMU configurations may disable it if used memory map never
generates protection faults or faults are always fatal.
If unsure, say Y.
config XTENSA_UNALIGNED_USER config XTENSA_UNALIGNED_USER
bool "Unaligned memory access in user space" bool "Unaligned memory access in user space"
help help
...@@ -773,6 +788,9 @@ endmenu ...@@ -773,6 +788,9 @@ endmenu
menu "Power management options" menu "Power management options"
config ARCH_HIBERNATION_POSSIBLE
def_bool y
source "kernel/power/Kconfig" source "kernel/power/Kconfig"
endmenu endmenu
...@@ -16,6 +16,7 @@ CFLAGS_REMOVE_inffast.o = -pg ...@@ -16,6 +16,7 @@ CFLAGS_REMOVE_inffast.o = -pg
endif endif
KASAN_SANITIZE := n KASAN_SANITIZE := n
KCSAN_SANITIZE := n
CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
......
...@@ -11,9 +11,15 @@ ...@@ -11,9 +11,15 @@
#include <asm/core.h> #include <asm/core.h>
#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) #define __mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() barrier() #define __rmb() barrier()
#define wmb() mb() #define __wmb() __mb()
#ifdef CONFIG_SMP
#define __smp_mb() __mb()
#define __smp_rmb() __rmb()
#define __smp_wmb() __wmb()
#endif
#if XCHAL_HAVE_S32C1I #if XCHAL_HAVE_S32C1I
#define __smp_mb__before_atomic() barrier() #define __smp_mb__before_atomic() barrier()
......
...@@ -99,7 +99,7 @@ static inline unsigned long __fls(unsigned long word) ...@@ -99,7 +99,7 @@ static inline unsigned long __fls(unsigned long word)
#if XCHAL_HAVE_EXCLUSIVE #if XCHAL_HAVE_EXCLUSIVE
#define BIT_OP(op, insn, inv) \ #define BIT_OP(op, insn, inv) \
static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\ static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
unsigned long mask = 1UL << (bit & 31); \ unsigned long mask = 1UL << (bit & 31); \
...@@ -119,7 +119,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\ ...@@ -119,7 +119,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
#define TEST_AND_BIT_OP(op, insn, inv) \ #define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \ static inline int \
test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \ arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \ { \
unsigned long tmp, value; \ unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \ unsigned long mask = 1UL << (bit & 31); \
...@@ -142,7 +142,7 @@ test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \ ...@@ -142,7 +142,7 @@ test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
#elif XCHAL_HAVE_S32C1I #elif XCHAL_HAVE_S32C1I
#define BIT_OP(op, insn, inv) \ #define BIT_OP(op, insn, inv) \
static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\ static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \ { \
unsigned long tmp, value; \ unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \ unsigned long mask = 1UL << (bit & 31); \
...@@ -163,7 +163,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\ ...@@ -163,7 +163,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
#define TEST_AND_BIT_OP(op, insn, inv) \ #define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \ static inline int \
test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \ arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \ { \
unsigned long tmp, value; \ unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \ unsigned long mask = 1UL << (bit & 31); \
...@@ -205,6 +205,8 @@ BIT_OPS(change, "xor", ) ...@@ -205,6 +205,8 @@ BIT_OPS(change, "xor", )
#undef BIT_OP #undef BIT_OP
#undef TEST_AND_BIT_OP #undef TEST_AND_BIT_OP
#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h> #include <asm-generic/bitops/ext2-atomic-setbit.h>
......
...@@ -142,11 +142,12 @@ typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t ...@@ -142,11 +142,12 @@ typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN))); __attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX]; struct thread_info;
extern void coprocessor_flush(struct thread_info*, int); void coprocessor_flush(struct thread_info *ti, int cp_index);
void coprocessor_release_all(struct thread_info *ti);
extern void coprocessor_release_all(struct thread_info*); void coprocessor_flush_all(struct thread_info *ti);
extern void coprocessor_flush_all(struct thread_info*); void coprocessor_flush_release_all(struct thread_info *ti);
void local_coprocessors_flush_release_all(void);
#endif /* XTENSA_HAVE_COPROCESSORS */ #endif /* XTENSA_HAVE_COPROCESSORS */
......
...@@ -246,6 +246,13 @@ extern unsigned long __get_wchan(struct task_struct *p); ...@@ -246,6 +246,13 @@ extern unsigned long __get_wchan(struct task_struct *p);
v; \ v; \
}) })
#define xtensa_xsr(x, sr) \
({ \
unsigned int __v__ = (unsigned int)(x); \
__asm__ __volatile__ ("xsr %0, " __stringify(sr) : "+a"(__v__)); \
__v__; \
})
#if XCHAL_HAVE_EXTERN_REGS #if XCHAL_HAVE_EXTERN_REGS
static inline void set_er(unsigned long value, unsigned long addr) static inline void set_er(unsigned long value, unsigned long addr)
......
...@@ -29,7 +29,7 @@ extern char _Level5InterruptVector_text_end[]; ...@@ -29,7 +29,7 @@ extern char _Level5InterruptVector_text_end[];
extern char _Level6InterruptVector_text_start[]; extern char _Level6InterruptVector_text_start[];
extern char _Level6InterruptVector_text_end[]; extern char _Level6InterruptVector_text_end[];
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SECONDARY_RESET_VECTOR
extern char _SecondaryResetVector_text_start[]; extern char _SecondaryResetVector_text_start[];
extern char _SecondaryResetVector_text_end[]; extern char _SecondaryResetVector_text_end[];
#endif #endif
......
...@@ -52,12 +52,21 @@ struct thread_info { ...@@ -52,12 +52,21 @@ struct thread_info {
__u32 cpu; /* current CPU */ __u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/ __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
unsigned long cpenable;
#if XCHAL_HAVE_EXCLUSIVE #if XCHAL_HAVE_EXCLUSIVE
/* result of the most recent exclusive store */ /* result of the most recent exclusive store */
unsigned long atomctl8; unsigned long atomctl8;
#endif #endif
#ifdef CONFIG_USER_ABI_CALL0_PROBE
/* Address where PS.WOE was enabled by the ABI probing code */
unsigned long ps_woe_fix_addr;
#endif
/*
* If i-th bit is set then coprocessor state is loaded into the
* coprocessor i on CPU cp_owner_cpu.
*/
unsigned long cpenable;
u32 cp_owner_cpu;
/* Allocate storage for extra user states and coprocessor states. */ /* Allocate storage for extra user states and coprocessor states. */
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
xtregs_coprocessor_t xtregs_cp; xtregs_coprocessor_t xtregs_cp;
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
typedef void xtensa_exception_handler(struct pt_regs *regs);
/* /*
* Per-CPU exception handling data structure. * Per-CPU exception handling data structure.
* EXCSAVE1 points to it. * EXCSAVE1 points to it.
...@@ -25,31 +27,47 @@ struct exc_table { ...@@ -25,31 +27,47 @@ struct exc_table {
void *fixup; void *fixup;
/* For passing a parameter to fixup */ /* For passing a parameter to fixup */
void *fixup_param; void *fixup_param;
#if XTENSA_HAVE_COPROCESSORS
/* Pointers to owner struct thread_info */
struct thread_info *coprocessor_owner[XCHAL_CP_MAX];
#endif
/* Fast user exception handlers */ /* Fast user exception handlers */
void *fast_user_handler[EXCCAUSE_N]; void *fast_user_handler[EXCCAUSE_N];
/* Fast kernel exception handlers */ /* Fast kernel exception handlers */
void *fast_kernel_handler[EXCCAUSE_N]; void *fast_kernel_handler[EXCCAUSE_N];
/* Default C-Handlers */ /* Default C-Handlers */
void *default_handler[EXCCAUSE_N]; xtensa_exception_handler *default_handler[EXCCAUSE_N];
}; };
/* DECLARE_PER_CPU(struct exc_table, exc_table);
* handler must be either of the following:
* void (*)(struct pt_regs *regs); xtensa_exception_handler *
* void (*)(struct pt_regs *regs, unsigned long exccause); __init trap_set_handler(int cause, xtensa_exception_handler *handler);
*/
extern void * __init trap_set_handler(int cause, void *handler); asmlinkage void fast_illegal_instruction_user(void);
extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); asmlinkage void fast_syscall_user(void);
void fast_second_level_miss(void); asmlinkage void fast_alloca(void);
asmlinkage void fast_unaligned(void);
asmlinkage void fast_second_level_miss(void);
asmlinkage void fast_store_prohibited(void);
asmlinkage void fast_coprocessor(void);
asmlinkage void kernel_exception(void);
asmlinkage void user_exception(void);
asmlinkage void system_call(struct pt_regs *regs);
void do_IRQ(int hwirq, struct pt_regs *regs);
void do_page_fault(struct pt_regs *regs);
void do_unhandled(struct pt_regs *regs);
/* Initialize minimal exc_table structure sufficient for basic paging */ /* Initialize minimal exc_table structure sufficient for basic paging */
static inline void __init early_trap_init(void) static inline void __init early_trap_init(void)
{ {
static struct exc_table exc_table __initdata = { static struct exc_table init_exc_table __initdata = {
.fast_kernel_handler[EXCCAUSE_DTLB_MISS] = .fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
fast_second_level_miss, fast_second_level_miss,
}; };
__asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table)); xtensa_set_sr(&init_exc_table, excsave1);
} }
void secondary_trap_init(void); void secondary_trap_init(void);
......
...@@ -19,6 +19,7 @@ obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o ...@@ -19,6 +19,7 @@ obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_HIBERNATION) += hibernate.o
# In the Xtensa architecture, assembly generates literals which must always # In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB. # precede the L32R instruction with a relative offset less than 256 kB.
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <linux/suspend.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/traps.h> #include <asm/traps.h>
...@@ -87,14 +88,19 @@ int main(void) ...@@ -87,14 +88,19 @@ int main(void)
OFFSET(TI_STSTUS, thread_info, status); OFFSET(TI_STSTUS, thread_info, status);
OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
#ifdef CONFIG_USER_ABI_CALL0_PROBE
OFFSET(TI_PS_WOE_FIX_ADDR, thread_info, ps_woe_fix_addr);
#endif
/* struct thread_info (offset from start_struct) */ /* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
#if XCHAL_HAVE_EXCLUSIVE #if XCHAL_HAVE_EXCLUSIVE
DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8)); DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8));
#endif #endif
DEFINE(THREAD_CPENABLE, offsetof(struct thread_info, cpenable));
DEFINE(THREAD_CPU, offsetof(struct thread_info, cpu));
DEFINE(THREAD_CP_OWNER_CPU, offsetof(struct thread_info, cp_owner_cpu));
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0)); DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1)); DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
...@@ -137,11 +143,22 @@ int main(void) ...@@ -137,11 +143,22 @@ int main(void)
DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save)); DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save));
DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup)); DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup));
DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param)); DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param));
#if XTENSA_HAVE_COPROCESSORS
DEFINE(EXC_TABLE_COPROCESSOR_OWNER,
offsetof(struct exc_table, coprocessor_owner));
#endif
DEFINE(EXC_TABLE_FAST_USER, DEFINE(EXC_TABLE_FAST_USER,
offsetof(struct exc_table, fast_user_handler)); offsetof(struct exc_table, fast_user_handler));
DEFINE(EXC_TABLE_FAST_KERNEL, DEFINE(EXC_TABLE_FAST_KERNEL,
offsetof(struct exc_table, fast_kernel_handler)); offsetof(struct exc_table, fast_kernel_handler));
DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler)); DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler));
#ifdef CONFIG_HIBERNATION
DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
DEFINE(PBE_NEXT, offsetof(struct pbe, next));
DEFINE(PBE_SIZE, sizeof(struct pbe));
#endif
return 0; return 0;
} }
...@@ -19,6 +19,26 @@ ...@@ -19,6 +19,26 @@
#include <asm/current.h> #include <asm/current.h>
#include <asm/regs.h> #include <asm/regs.h>
/*
* Rules for coprocessor state manipulation on SMP:
*
* - a task may have live coprocessors only on one CPU.
*
* - whether coprocessor context of task T is live on some CPU is
* denoted by T's thread_info->cpenable.
*
* - non-zero thread_info->cpenable means that thread_info->cp_owner_cpu
* is valid in the T's thread_info. Zero thread_info->cpenable means that
* coprocessor context is valid in the T's thread_info.
*
* - if a coprocessor context of task T is live on CPU X, only CPU X changes
* T's thread_info->cpenable, cp_owner_cpu and coprocessor save area.
* This is done by making sure that for the task T with live coprocessor
* on CPU X cpenable SR is 0 when T runs on any other CPU Y.
* When fast_coprocessor exception is taken on CPU Y it goes to the
* C-level do_coprocessor that uses IPI to make CPU X flush T's coprocessors.
*/
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
/* /*
...@@ -30,34 +50,30 @@ ...@@ -30,34 +50,30 @@
.align 4; \ .align 4; \
.Lsave_cp_regs_cp##x: \ .Lsave_cp_regs_cp##x: \
xchal_cp##x##_store a2 a3 a4 a5 a6; \ xchal_cp##x##_store a2 a3 a4 a5 a6; \
jx a0; \ ret; \
.endif .endif
#define SAVE_CP_REGS_TAB(x) \
.if XTENSA_HAVE_COPROCESSOR(x); \
.long .Lsave_cp_regs_cp##x; \
.else; \
.long 0; \
.endif; \
.long THREAD_XTREGS_CP##x
#define LOAD_CP_REGS(x) \ #define LOAD_CP_REGS(x) \
.if XTENSA_HAVE_COPROCESSOR(x); \ .if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \ .align 4; \
.Lload_cp_regs_cp##x: \ .Lload_cp_regs_cp##x: \
xchal_cp##x##_load a2 a3 a4 a5 a6; \ xchal_cp##x##_load a2 a3 a4 a5 a6; \
jx a0; \ ret; \
.endif .endif
#define LOAD_CP_REGS_TAB(x) \ #define CP_REGS_TAB(x) \
.if XTENSA_HAVE_COPROCESSOR(x); \ .if XTENSA_HAVE_COPROCESSOR(x); \
.long .Lsave_cp_regs_cp##x; \
.long .Lload_cp_regs_cp##x; \ .long .Lload_cp_regs_cp##x; \
.else; \ .else; \
.long 0; \ .long 0, 0; \
.endif; \ .endif; \
.long THREAD_XTREGS_CP##x .long THREAD_XTREGS_CP##x
#define CP_REGS_TAB_SAVE 0
#define CP_REGS_TAB_LOAD 4
#define CP_REGS_TAB_OFFSET 8
__XTENSA_HANDLER __XTENSA_HANDLER
SAVE_CP_REGS(0) SAVE_CP_REGS(0)
...@@ -79,25 +95,15 @@ ...@@ -79,25 +95,15 @@
LOAD_CP_REGS(7) LOAD_CP_REGS(7)
.align 4 .align 4
.Lsave_cp_regs_jump_table: .Lcp_regs_jump_table:
SAVE_CP_REGS_TAB(0) CP_REGS_TAB(0)
SAVE_CP_REGS_TAB(1) CP_REGS_TAB(1)
SAVE_CP_REGS_TAB(2) CP_REGS_TAB(2)
SAVE_CP_REGS_TAB(3) CP_REGS_TAB(3)
SAVE_CP_REGS_TAB(4) CP_REGS_TAB(4)
SAVE_CP_REGS_TAB(5) CP_REGS_TAB(5)
SAVE_CP_REGS_TAB(6) CP_REGS_TAB(6)
SAVE_CP_REGS_TAB(7) CP_REGS_TAB(7)
.Lload_cp_regs_jump_table:
LOAD_CP_REGS_TAB(0)
LOAD_CP_REGS_TAB(1)
LOAD_CP_REGS_TAB(2)
LOAD_CP_REGS_TAB(3)
LOAD_CP_REGS_TAB(4)
LOAD_CP_REGS_TAB(5)
LOAD_CP_REGS_TAB(6)
LOAD_CP_REGS_TAB(7)
/* /*
* Entry condition: * Entry condition:
...@@ -115,9 +121,37 @@ ...@@ -115,9 +121,37 @@
ENTRY(fast_coprocessor) ENTRY(fast_coprocessor)
s32i a3, a2, PT_AREG3
#ifdef CONFIG_SMP
/*
* Check if any coprocessor context is live on another CPU
* and if so go through the C-level coprocessor exception handler
* to flush it to memory.
*/
GET_THREAD_INFO (a0, a2)
l32i a3, a0, THREAD_CPENABLE
beqz a3, .Lload_local
/*
* Pairs with smp_wmb in local_coprocessor_release_all
* and with both memws below.
*/
memw
l32i a3, a0, THREAD_CPU
l32i a0, a0, THREAD_CP_OWNER_CPU
beq a0, a3, .Lload_local
rsr a0, ps
l32i a3, a2, PT_AREG3
bbci.l a0, PS_UM_BIT, 1f
call0 user_exception
1: call0 kernel_exception
#endif
/* Save remaining registers a1-a3 and SAR */ /* Save remaining registers a1-a3 and SAR */
s32i a3, a2, PT_AREG3 .Lload_local:
rsr a3, sar rsr a3, sar
s32i a1, a2, PT_AREG1 s32i a1, a2, PT_AREG1
s32i a3, a2, PT_SAR s32i a3, a2, PT_SAR
...@@ -125,13 +159,15 @@ ENTRY(fast_coprocessor) ...@@ -125,13 +159,15 @@ ENTRY(fast_coprocessor)
rsr a2, depc rsr a2, depc
s32i a2, a1, PT_AREG2 s32i a2, a1, PT_AREG2
/* /* The hal macros require up to 4 temporary registers. We use a3..a6. */
* The hal macros require up to 4 temporary registers. We use a3..a6.
*/
s32i a4, a1, PT_AREG4 s32i a4, a1, PT_AREG4
s32i a5, a1, PT_AREG5 s32i a5, a1, PT_AREG5
s32i a6, a1, PT_AREG6 s32i a6, a1, PT_AREG6
s32i a7, a1, PT_AREG7
s32i a8, a1, PT_AREG8
s32i a9, a1, PT_AREG9
s32i a10, a1, PT_AREG10
/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
...@@ -148,58 +184,74 @@ ENTRY(fast_coprocessor) ...@@ -148,58 +184,74 @@ ENTRY(fast_coprocessor)
wsr a0, cpenable wsr a0, cpenable
rsync rsync
/* Retrieve previous owner. (a3 still holds CP number) */ /* Get coprocessor save/load table entry (a7). */
movi a0, coprocessor_owner # list of owners movi a7, .Lcp_regs_jump_table
addx4 a0, a3, a0 # entry for CP addx8 a7, a3, a7
l32i a4, a0, 0 addx4 a7, a3, a7
beqz a4, 1f # skip 'save' if no previous owner /* Retrieve previous owner (a8). */
/* Disable coprocessor for previous owner. (a2 = 1 << CP number) */ rsr a0, excsave1 # exc_table
addx4 a0, a3, a0 # entry for CP
l32i a8, a0, EXC_TABLE_COPROCESSOR_OWNER
/* Set new owner (a9). */
l32i a5, a4, THREAD_CPENABLE GET_THREAD_INFO (a9, a1)
xor a5, a5, a2 # (1 << cp-id) still in a2 l32i a4, a9, THREAD_CPU
s32i a5, a4, THREAD_CPENABLE s32i a9, a0, EXC_TABLE_COPROCESSOR_OWNER
s32i a4, a9, THREAD_CP_OWNER_CPU
/* /*
* Get context save area and 'call' save routine. * Enable coprocessor for the new owner. (a2 = 1 << CP number)
* (a4 still holds previous owner (thread_info), a3 CP number) * This can be done before loading context into the coprocessor.
*/ */
l32i a4, a9, THREAD_CPENABLE
or a4, a4, a2
movi a5, .Lsave_cp_regs_jump_table /*
movi a0, 2f # a0: 'return' address * Make sure THREAD_CP_OWNER_CPU is in memory before updating
addx8 a3, a3, a5 # a3: coprocessor number * THREAD_CPENABLE
l32i a2, a3, 4 # a2: xtregs offset */
l32i a3, a3, 0 # a3: jump address memw # (2)
add a2, a2, a4 s32i a4, a9, THREAD_CPENABLE
jx a3
/* Note that only a0 and a1 were preserved. */ beqz a8, 1f # skip 'save' if no previous owner
2: rsr a3, exccause /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
movi a0, coprocessor_owner
addx4 a0, a3, a0
/* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */ l32i a10, a8, THREAD_CPENABLE
xor a10, a10, a2
1: GET_THREAD_INFO (a4, a1) /* Get context save area and call save routine. */
s32i a4, a0, 0
/* Get context save area and 'call' load routine. */ l32i a2, a7, CP_REGS_TAB_OFFSET
l32i a3, a7, CP_REGS_TAB_SAVE
add a2, a2, a8
callx0 a3
movi a5, .Lload_cp_regs_jump_table /*
movi a0, 1f * Make sure coprocessor context and THREAD_CP_OWNER_CPU are in memory
addx8 a3, a3, a5 * before updating THREAD_CPENABLE
l32i a2, a3, 4 # a2: xtregs offset */
l32i a3, a3, 0 # a3: jump address memw # (3)
add a2, a2, a4 s32i a10, a8, THREAD_CPENABLE
jx a3 1:
/* Get context save area and call load routine. */
l32i a2, a7, CP_REGS_TAB_OFFSET
l32i a3, a7, CP_REGS_TAB_LOAD
add a2, a2, a9
callx0 a3
/* Restore all registers and return from exception handler. */ /* Restore all registers and return from exception handler. */
1: l32i a6, a1, PT_AREG6 l32i a10, a1, PT_AREG10
l32i a9, a1, PT_AREG9
l32i a8, a1, PT_AREG8
l32i a7, a1, PT_AREG7
l32i a6, a1, PT_AREG6
l32i a5, a1, PT_AREG5 l32i a5, a1, PT_AREG5
l32i a4, a1, PT_AREG4 l32i a4, a1, PT_AREG4
...@@ -230,29 +282,21 @@ ENDPROC(fast_coprocessor) ...@@ -230,29 +282,21 @@ ENDPROC(fast_coprocessor)
ENTRY(coprocessor_flush) ENTRY(coprocessor_flush)
/* reserve 4 bytes on stack to save a0 */ abi_entry_default
abi_entry(4)
movi a4, .Lcp_regs_jump_table
s32i a0, a1, 0 addx8 a4, a3, a4
movi a0, .Lsave_cp_regs_jump_table addx4 a3, a3, a4
addx8 a3, a3, a0 l32i a4, a3, CP_REGS_TAB_SAVE
l32i a4, a3, 4 beqz a4, 1f
l32i a3, a3, 0 l32i a3, a3, CP_REGS_TAB_OFFSET
add a2, a2, a4 add a2, a2, a3
beqz a3, 1f mov a7, a0
callx0 a3 callx0 a4
1: l32i a0, a1, 0 mov a0, a7
1:
abi_ret(4) abi_ret_default
ENDPROC(coprocessor_flush) ENDPROC(coprocessor_flush)
.data
ENTRY(coprocessor_owner)
.fill XCHAL_CP_MAX, 4, 0
END(coprocessor_owner)
#endif /* XTENSA_HAVE_COPROCESSORS */ #endif /* XTENSA_HAVE_COPROCESSORS */
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/mm.h>
#include <linux/suspend.h>
#include <asm/coprocessor.h>
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
void notrace save_processor_state(void)
{
WARN_ON(num_online_cpus() != 1);
#if XTENSA_HAVE_COPROCESSORS
local_coprocessors_flush_release_all();
#endif
}
void notrace restore_processor_state(void)
{
}
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/regs.h> #include <asm/regs.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
#include <asm/traps.h>
extern void ret_from_fork(void); extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void); extern void ret_from_kernel_thread(void);
...@@ -63,52 +64,114 @@ EXPORT_SYMBOL(__stack_chk_guard); ...@@ -63,52 +64,114 @@ EXPORT_SYMBOL(__stack_chk_guard);
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
void coprocessor_release_all(struct thread_info *ti) void local_coprocessors_flush_release_all(void)
{ {
unsigned long cpenable; struct thread_info **coprocessor_owner;
int i; struct thread_info *unique_owner[XCHAL_CP_MAX];
int n = 0;
int i, j;
/* Make sure we don't switch tasks during this operation. */ coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
xtensa_set_sr(XCHAL_CP_MASK, cpenable);
preempt_disable(); for (i = 0; i < XCHAL_CP_MAX; i++) {
struct thread_info *ti = coprocessor_owner[i];
/* Walk through all cp owners and release it for the requested one. */ if (ti) {
coprocessor_flush(ti, i);
cpenable = ti->cpenable; for (j = 0; j < n; j++)
if (unique_owner[j] == ti)
break;
if (j == n)
unique_owner[n++] = ti;
for (i = 0; i < XCHAL_CP_MAX; i++) { coprocessor_owner[i] = NULL;
if (coprocessor_owner[i] == ti) {
coprocessor_owner[i] = 0;
cpenable &= ~(1 << i);
} }
} }
for (i = 0; i < n; i++) {
/* pairs with memw (1) in fast_coprocessor and memw in switch_to */
smp_wmb();
unique_owner[i]->cpenable = 0;
}
xtensa_set_sr(0, cpenable);
}
ti->cpenable = cpenable; static void local_coprocessor_release_all(void *info)
{
struct thread_info *ti = info;
struct thread_info **coprocessor_owner;
int i;
coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
/* Walk through all cp owners and release it for the requested one. */
for (i = 0; i < XCHAL_CP_MAX; i++) {
if (coprocessor_owner[i] == ti)
coprocessor_owner[i] = NULL;
}
/* pairs with memw (1) in fast_coprocessor and memw in switch_to */
smp_wmb();
ti->cpenable = 0;
if (ti == current_thread_info()) if (ti == current_thread_info())
xtensa_set_sr(0, cpenable); xtensa_set_sr(0, cpenable);
}
preempt_enable(); void coprocessor_release_all(struct thread_info *ti)
{
if (ti->cpenable) {
/* pairs with memw (2) in fast_coprocessor */
smp_rmb();
smp_call_function_single(ti->cp_owner_cpu,
local_coprocessor_release_all,
ti, true);
}
} }
void coprocessor_flush_all(struct thread_info *ti) static void local_coprocessor_flush_all(void *info)
{ {
unsigned long cpenable, old_cpenable; struct thread_info *ti = info;
struct thread_info **coprocessor_owner;
unsigned long old_cpenable;
int i; int i;
preempt_disable(); coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
old_cpenable = xtensa_xsr(ti->cpenable, cpenable);
old_cpenable = xtensa_get_sr(cpenable);
cpenable = ti->cpenable;
xtensa_set_sr(cpenable, cpenable);
for (i = 0; i < XCHAL_CP_MAX; i++) { for (i = 0; i < XCHAL_CP_MAX; i++) {
if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) if (coprocessor_owner[i] == ti)
coprocessor_flush(ti, i); coprocessor_flush(ti, i);
cpenable >>= 1;
} }
xtensa_set_sr(old_cpenable, cpenable); xtensa_set_sr(old_cpenable, cpenable);
}
preempt_enable(); void coprocessor_flush_all(struct thread_info *ti)
{
if (ti->cpenable) {
/* pairs with memw (2) in fast_coprocessor */
smp_rmb();
smp_call_function_single(ti->cp_owner_cpu,
local_coprocessor_flush_all,
ti, true);
}
}
static void local_coprocessor_flush_release_all(void *info)
{
local_coprocessor_flush_all(info);
local_coprocessor_release_all(info);
}
void coprocessor_flush_release_all(struct thread_info *ti)
{
if (ti->cpenable) {
/* pairs with memw (2) in fast_coprocessor */
smp_rmb();
smp_call_function_single(ti->cp_owner_cpu,
local_coprocessor_flush_release_all,
ti, true);
}
} }
#endif #endif
...@@ -140,8 +203,7 @@ void flush_thread(void) ...@@ -140,8 +203,7 @@ void flush_thread(void)
{ {
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
struct thread_info *ti = current_thread_info(); struct thread_info *ti = current_thread_info();
coprocessor_flush_all(ti); coprocessor_flush_release_all(ti);
coprocessor_release_all(ti);
#endif #endif
flush_ptrace_hw_breakpoint(current); flush_ptrace_hw_breakpoint(current);
} }
......
...@@ -171,8 +171,7 @@ static int tie_set(struct task_struct *target, ...@@ -171,8 +171,7 @@ static int tie_set(struct task_struct *target,
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
/* Flush all coprocessors before we overwrite them. */ /* Flush all coprocessors before we overwrite them. */
coprocessor_flush_all(ti); coprocessor_flush_release_all(ti);
coprocessor_release_all(ti);
ti->xtregs_cp.cp0 = newregs->cp0; ti->xtregs_cp.cp0 = newregs->cp0;
ti->xtregs_cp.cp1 = newregs->cp1; ti->xtregs_cp.cp1 = newregs->cp1;
ti->xtregs_cp.cp2 = newregs->cp2; ti->xtregs_cp.cp2 = newregs->cp2;
......
...@@ -40,14 +40,13 @@ static inline int probed_compare_swap(int *v, int cmp, int set) ...@@ -40,14 +40,13 @@ static inline int probed_compare_swap(int *v, int cmp, int set)
/* Handle probed exception */ /* Handle probed exception */
static void __init do_probed_exception(struct pt_regs *regs, static void __init do_probed_exception(struct pt_regs *regs)
unsigned long exccause)
{ {
if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */ if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
regs->pc += 3; /* skip the s32c1i instruction */ regs->pc += 3; /* skip the s32c1i instruction */
rcw_exc = exccause; rcw_exc = regs->exccause;
} else { } else {
do_unhandled(regs, exccause); do_unhandled(regs);
} }
} }
......
...@@ -162,8 +162,7 @@ setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs) ...@@ -162,8 +162,7 @@ setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
return err; return err;
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
coprocessor_flush_all(ti); coprocessor_flush_release_all(ti);
coprocessor_release_all(ti);
err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp, err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
sizeof (frame->xtregs.cp)); sizeof (frame->xtregs.cp));
#endif #endif
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/coprocessor.h>
#include <asm/kdebug.h> #include <asm/kdebug.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/mxregs.h> #include <asm/mxregs.h>
...@@ -272,6 +273,12 @@ int __cpu_disable(void) ...@@ -272,6 +273,12 @@ int __cpu_disable(void)
*/ */
set_cpu_online(cpu, false); set_cpu_online(cpu, false);
#if XTENSA_HAVE_COPROCESSORS
/*
* Flush coprocessor contexts that are active on the current CPU.
*/
local_coprocessors_flush_release_all();
#endif
/* /*
* OK - migrate IRQs away from this CPU * OK - migrate IRQs away from this CPU
*/ */
......
...@@ -48,25 +48,20 @@ ...@@ -48,25 +48,20 @@
* Machine specific interrupt handlers * Machine specific interrupt handlers
*/ */
extern void kernel_exception(void); static void do_illegal_instruction(struct pt_regs *regs);
extern void user_exception(void); static void do_div0(struct pt_regs *regs);
static void do_interrupt(struct pt_regs *regs);
extern void fast_illegal_instruction_user(void); #if XTENSA_FAKE_NMI
extern void fast_syscall_user(void); static void do_nmi(struct pt_regs *regs);
extern void fast_alloca(void); #endif
extern void fast_unaligned(void); #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
extern void fast_second_level_miss(void); static void do_unaligned_user(struct pt_regs *regs);
extern void fast_store_prohibited(void); #endif
extern void fast_coprocessor(void); static void do_multihit(struct pt_regs *regs);
#if XTENSA_HAVE_COPROCESSORS
extern void do_illegal_instruction (struct pt_regs*); static void do_coprocessor(struct pt_regs *regs);
extern void do_interrupt (struct pt_regs*); #endif
extern void do_nmi(struct pt_regs *); static void do_debug(struct pt_regs *regs);
extern void do_unaligned_user (struct pt_regs*);
extern void do_multihit (struct pt_regs*, unsigned long);
extern void do_page_fault (struct pt_regs*, unsigned long);
extern void do_debug (struct pt_regs*);
extern void system_call (struct pt_regs*);
/* /*
* The vector table must be preceded by a save area (which * The vector table must be preceded by a save area (which
...@@ -78,7 +73,8 @@ extern void system_call (struct pt_regs*); ...@@ -78,7 +73,8 @@ extern void system_call (struct pt_regs*);
#define USER 0x02 #define USER 0x02
#define COPROCESSOR(x) \ #define COPROCESSOR(x) \
{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor } { EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER|KRNL, fast_coprocessor },\
{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, 0, do_coprocessor }
typedef struct { typedef struct {
int cause; int cause;
...@@ -100,7 +96,7 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { ...@@ -100,7 +96,7 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
#ifdef SUPPORT_WINDOWED #ifdef SUPPORT_WINDOWED
{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, { EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
#endif #endif
/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */ { EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0, do_div0 },
/* EXCCAUSE_PRIVILEGED unhandled */ /* EXCCAUSE_PRIVILEGED unhandled */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
#ifdef CONFIG_XTENSA_UNALIGNED_USER #ifdef CONFIG_XTENSA_UNALIGNED_USER
...@@ -110,21 +106,21 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { ...@@ -110,21 +106,21 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
#endif #endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
{ EXCCAUSE_ITLB_MISS, 0, do_page_fault }, { EXCCAUSE_ITLB_MISS, 0, do_page_fault },
{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss}, { EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
#endif /* CONFIG_MMU */
#ifdef CONFIG_PFAULT
{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit }, { EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault }, { EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
/* EXCCAUSE_SIZE_RESTRICTION unhandled */
{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit }, { EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault }, { EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
#endif /* CONFIG_MMU */ #endif
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ /* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
#if XTENSA_HAVE_COPROCESSOR(0) #if XTENSA_HAVE_COPROCESSOR(0)
COPROCESSOR(0), COPROCESSOR(0),
...@@ -179,7 +175,7 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err) ...@@ -179,7 +175,7 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err)
* Unhandled Exceptions. Kill user task or panic if in kernel space. * Unhandled Exceptions. Kill user task or panic if in kernel space.
*/ */
void do_unhandled(struct pt_regs *regs, unsigned long exccause) void do_unhandled(struct pt_regs *regs)
{ {
__die_if_kernel("Caught unhandled exception - should not happen", __die_if_kernel("Caught unhandled exception - should not happen",
regs, SIGKILL); regs, SIGKILL);
...@@ -189,7 +185,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause) ...@@ -189,7 +185,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause)
"(pid = %d, pc = %#010lx) - should not happen\n" "(pid = %d, pc = %#010lx) - should not happen\n"
"\tEXCCAUSE is %ld\n", "\tEXCCAUSE is %ld\n",
current->comm, task_pid_nr(current), regs->pc, current->comm, task_pid_nr(current), regs->pc,
exccause); regs->exccause);
force_sig(SIGILL); force_sig(SIGILL);
} }
...@@ -197,7 +193,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause) ...@@ -197,7 +193,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause)
* Multi-hit exception. This if fatal! * Multi-hit exception. This if fatal!
*/ */
void do_multihit(struct pt_regs *regs, unsigned long exccause) static void do_multihit(struct pt_regs *regs)
{ {
die("Caught multihit exception", regs, SIGKILL); die("Caught multihit exception", regs, SIGKILL);
} }
...@@ -206,8 +202,6 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) ...@@ -206,8 +202,6 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
* IRQ handler. * IRQ handler.
*/ */
extern void do_IRQ(int, struct pt_regs *);
#if XTENSA_FAKE_NMI #if XTENSA_FAKE_NMI
#define IS_POW2(v) (((v) & ((v) - 1)) == 0) #define IS_POW2(v) (((v) & ((v) - 1)) == 0)
...@@ -240,14 +234,10 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id); ...@@ -240,14 +234,10 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
DEFINE_PER_CPU(unsigned long, nmi_count); DEFINE_PER_CPU(unsigned long, nmi_count);
void do_nmi(struct pt_regs *regs) static void do_nmi(struct pt_regs *regs)
{ {
struct pt_regs *old_regs; struct pt_regs *old_regs = set_irq_regs(regs);
if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
trace_hardirqs_off();
old_regs = set_irq_regs(regs);
nmi_enter(); nmi_enter();
++*this_cpu_ptr(&nmi_count); ++*this_cpu_ptr(&nmi_count);
check_valid_nmi(); check_valid_nmi();
...@@ -257,7 +247,7 @@ void do_nmi(struct pt_regs *regs) ...@@ -257,7 +247,7 @@ void do_nmi(struct pt_regs *regs)
} }
#endif #endif
void do_interrupt(struct pt_regs *regs) static void do_interrupt(struct pt_regs *regs)
{ {
static const unsigned int_level_mask[] = { static const unsigned int_level_mask[] = {
0, 0,
...@@ -269,12 +259,9 @@ void do_interrupt(struct pt_regs *regs) ...@@ -269,12 +259,9 @@ void do_interrupt(struct pt_regs *regs)
XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL6_MASK,
XCHAL_INTLEVEL7_MASK, XCHAL_INTLEVEL7_MASK,
}; };
struct pt_regs *old_regs; struct pt_regs *old_regs = set_irq_regs(regs);
unsigned unhandled = ~0u; unsigned unhandled = ~0u;
trace_hardirqs_off();
old_regs = set_irq_regs(regs);
irq_enter(); irq_enter();
for (;;) { for (;;) {
...@@ -306,13 +293,47 @@ void do_interrupt(struct pt_regs *regs) ...@@ -306,13 +293,47 @@ void do_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
static bool check_div0(struct pt_regs *regs)
{
static const u8 pattern[] = {'D', 'I', 'V', '0'};
const u8 *p;
u8 buf[5];
if (user_mode(regs)) {
if (copy_from_user(buf, (void __user *)regs->pc + 2, 5))
return false;
p = buf;
} else {
p = (const u8 *)regs->pc + 2;
}
return memcmp(p, pattern, sizeof(pattern)) == 0 ||
memcmp(p + 1, pattern, sizeof(pattern)) == 0;
}
/* /*
* Illegal instruction. Fatal if in kernel space. * Illegal instruction. Fatal if in kernel space.
*/ */
void static void do_illegal_instruction(struct pt_regs *regs)
do_illegal_instruction(struct pt_regs *regs)
{ {
#ifdef CONFIG_USER_ABI_CALL0_PROBE
/*
* When call0 application encounters an illegal instruction fast
* exception handler will attempt to set PS.WOE and retry failing
* instruction.
* If we get here we know that that instruction is also illegal
* with PS.WOE set, so it's not related to the windowed option
* hence PS.WOE may be cleared.
*/
if (regs->pc == current_thread_info()->ps_woe_fix_addr)
regs->ps &= ~PS_WOE_MASK;
#endif
if (check_div0(regs)) {
do_div0(regs);
return;
}
__die_if_kernel("Illegal instruction in kernel", regs, SIGKILL); __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
/* If in user mode, send SIGILL signal to current process. */ /* If in user mode, send SIGILL signal to current process. */
...@@ -322,6 +343,11 @@ do_illegal_instruction(struct pt_regs *regs) ...@@ -322,6 +343,11 @@ do_illegal_instruction(struct pt_regs *regs)
force_sig(SIGILL); force_sig(SIGILL);
} }
static void do_div0(struct pt_regs *regs)
{
__die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL);
force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc);
}
/* /*
* Handle unaligned memory accesses from user space. Kill task. * Handle unaligned memory accesses from user space. Kill task.
...@@ -331,8 +357,7 @@ do_illegal_instruction(struct pt_regs *regs) ...@@ -331,8 +357,7 @@ do_illegal_instruction(struct pt_regs *regs)
*/ */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
void static void do_unaligned_user(struct pt_regs *regs)
do_unaligned_user (struct pt_regs *regs)
{ {
__die_if_kernel("Unhandled unaligned exception in kernel", __die_if_kernel("Unhandled unaligned exception in kernel",
regs, SIGKILL); regs, SIGKILL);
...@@ -347,14 +372,20 @@ do_unaligned_user (struct pt_regs *regs) ...@@ -347,14 +372,20 @@ do_unaligned_user (struct pt_regs *regs)
} }
#endif #endif
#if XTENSA_HAVE_COPROCESSORS
static void do_coprocessor(struct pt_regs *regs)
{
coprocessor_flush_release_all(current_thread_info());
}
#endif
/* Handle debug events. /* Handle debug events.
* When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with
* preemption disabled to avoid rescheduling and keep mapping of hardware * preemption disabled to avoid rescheduling and keep mapping of hardware
* breakpoint structures to debug registers intact, so that * breakpoint structures to debug registers intact, so that
* DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit. * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit.
*/ */
void static void do_debug(struct pt_regs *regs)
do_debug(struct pt_regs *regs)
{ {
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret = check_hw_breakpoint(regs); int ret = check_hw_breakpoint(regs);
...@@ -381,7 +412,8 @@ do_debug(struct pt_regs *regs) ...@@ -381,7 +412,8 @@ do_debug(struct pt_regs *regs)
/* Set exception C handler - for temporary use when probing exceptions */ /* Set exception C handler - for temporary use when probing exceptions */
void * __init trap_set_handler(int cause, void *handler) xtensa_exception_handler *
__init trap_set_handler(int cause, xtensa_exception_handler *handler)
{ {
void *previous = per_cpu(exc_table, 0).default_handler[cause]; void *previous = per_cpu(exc_table, 0).default_handler[cause];
...@@ -392,8 +424,7 @@ void * __init trap_set_handler(int cause, void *handler) ...@@ -392,8 +424,7 @@ void * __init trap_set_handler(int cause, void *handler)
static void trap_init_excsave(void) static void trap_init_excsave(void)
{ {
unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table); xtensa_set_sr(this_cpu_ptr(&exc_table), excsave1);
__asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
} }
static void trap_init_debug(void) static void trap_init_debug(void)
......
...@@ -8,3 +8,5 @@ lib-y += memcopy.o memset.o checksum.o \ ...@@ -8,3 +8,5 @@ lib-y += memcopy.o memset.o checksum.o \
divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \ divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \
usercopy.o strncpy_user.o strnlen_user.o usercopy.o strncpy_user.o strnlen_user.o
lib-$(CONFIG_PCI) += pci-auto.o lib-$(CONFIG_PCI) += pci-auto.o
lib-$(CONFIG_KCSAN) += kcsan-stubs.o
KCSAN_SANITIZE_kcsan-stubs.o := n
// SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
#include <linux/types.h>
void __atomic_store_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_load_8(const volatile void *p, int i)
{
BUG();
}
u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
{
BUG();
}
bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2)
{
BUG();
}
u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
{
BUG();
}
...@@ -402,13 +402,13 @@ WEAK(memmove) ...@@ -402,13 +402,13 @@ WEAK(memmove)
*/ */
# copy 16 bytes per iteration for word-aligned dst and word-aligned src # copy 16 bytes per iteration for word-aligned dst and word-aligned src
#if XCHAL_HAVE_LOOPS #if XCHAL_HAVE_LOOPS
loopnez a7, .backLoop1done loopnez a7, .LbackLoop1done
#else /* !XCHAL_HAVE_LOOPS */ #else /* !XCHAL_HAVE_LOOPS */
beqz a7, .backLoop1done beqz a7, .LbackLoop1done
slli a8, a7, 4 slli a8, a7, 4
sub a8, a3, a8 # a8 = start of first 16B source chunk sub a8, a3, a8 # a8 = start of first 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.backLoop1: .LbackLoop1:
addi a3, a3, -16 addi a3, a3, -16
l32i a7, a3, 12 l32i a7, a3, 12
l32i a6, a3, 8 l32i a6, a3, 8
...@@ -420,9 +420,9 @@ WEAK(memmove) ...@@ -420,9 +420,9 @@ WEAK(memmove)
s32i a7, a5, 4 s32i a7, a5, 4
s32i a6, a5, 0 s32i a6, a5, 0
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
bne a3, a8, .backLoop1 # continue loop if a3:src != a8:src_start bne a3, a8, .LbackLoop1 # continue loop if a3:src != a8:src_start
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.backLoop1done: .LbackLoop1done:
bbci.l a4, 3, .Lback2 bbci.l a4, 3, .Lback2
# copy 8 bytes # copy 8 bytes
addi a3, a3, -8 addi a3, a3, -8
...@@ -479,13 +479,13 @@ WEAK(memmove) ...@@ -479,13 +479,13 @@ WEAK(memmove)
#endif #endif
l32i a6, a3, 0 # load first word l32i a6, a3, 0 # load first word
#if XCHAL_HAVE_LOOPS #if XCHAL_HAVE_LOOPS
loopnez a7, .backLoop2done loopnez a7, .LbackLoop2done
#else /* !XCHAL_HAVE_LOOPS */ #else /* !XCHAL_HAVE_LOOPS */
beqz a7, .backLoop2done beqz a7, .LbackLoop2done
slli a10, a7, 4 slli a10, a7, 4
sub a10, a3, a10 # a10 = start of first 16B source chunk sub a10, a3, a10 # a10 = start of first 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.backLoop2: .LbackLoop2:
addi a3, a3, -16 addi a3, a3, -16
l32i a7, a3, 12 l32i a7, a3, 12
l32i a8, a3, 8 l32i a8, a3, 8
...@@ -501,9 +501,9 @@ WEAK(memmove) ...@@ -501,9 +501,9 @@ WEAK(memmove)
__src_b a9, a6, a9 __src_b a9, a6, a9
s32i a9, a5, 0 s32i a9, a5, 0
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start bne a3, a10, .LbackLoop2 # continue loop if a3:src != a10:src_start
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.backLoop2done: .LbackLoop2done:
bbci.l a4, 3, .Lback12 bbci.l a4, 3, .Lback12
# copy 8 bytes # copy 8 bytes
addi a3, a3, -8 addi a3, a3, -8
......
...@@ -4,7 +4,8 @@ ...@@ -4,7 +4,8 @@
# #
obj-y := init.o misc.o obj-y := init.o misc.o
obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o obj-$(CONFIG_PFAULT) += fault.o
obj-$(CONFIG_MMU) += cache.o ioremap.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_KASAN) += kasan_init.o obj-$(CONFIG_KASAN) += kasan_init.o
......
...@@ -21,9 +21,61 @@ ...@@ -21,9 +21,61 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int); void bad_page_fault(struct pt_regs*, unsigned long, int);
static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
{
#ifdef CONFIG_MMU
/* Synchronize this task's top level page-table
* with the 'reference' page table.
*/
struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
if (act_mm == NULL)
goto bad_page_fault;
pgd = act_mm->pgd + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
goto bad_page_fault;
pgd_val(*pgd) = pgd_val(*pgd_k);
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
goto bad_page_fault;
pud = pud_offset(p4d, address);
pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud) || !pud_present(*pud_k))
goto bad_page_fault;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_page_fault;
pmd_val(*pmd) = pmd_val(*pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto bad_page_fault;
return;
bad_page_fault:
bad_page_fault(regs, address, SIGKILL);
#else
WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
#endif
}
/* /*
* This routine handles page faults. It determines the address, * This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate * and the problem, and then passes it off to one of the appropriate
...@@ -49,8 +101,10 @@ void do_page_fault(struct pt_regs *regs) ...@@ -49,8 +101,10 @@ void do_page_fault(struct pt_regs *regs)
/* We fault-in kernel-space virtual memory on-demand. The /* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd. * 'reference' page table is init_mm.pgd.
*/ */
if (address >= TASK_SIZE && !user_mode(regs)) if (address >= TASK_SIZE && !user_mode(regs)) {
goto vmalloc_fault; vmalloc_fault(regs, address);
return;
}
/* If we're in an interrupt or have no user /* If we're in an interrupt or have no user
* context, we must not take the fault.. * context, we must not take the fault..
...@@ -114,7 +168,7 @@ void do_page_fault(struct pt_regs *regs) ...@@ -114,7 +168,7 @@ void do_page_fault(struct pt_regs *regs)
if (fault_signal_pending(fault, regs)) { if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs)) if (!user_mode(regs))
goto bad_page_fault; bad_page_fault(regs, address, SIGKILL);
return; return;
} }
...@@ -181,56 +235,6 @@ void do_page_fault(struct pt_regs *regs) ...@@ -181,56 +235,6 @@ void do_page_fault(struct pt_regs *regs)
if (!user_mode(regs)) if (!user_mode(regs))
bad_page_fault(regs, address, SIGBUS); bad_page_fault(regs, address, SIGBUS);
return; return;
vmalloc_fault:
{
/* Synchronize this task's top level page-table
* with the 'reference' page table.
*/
struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
if (act_mm == NULL)
goto bad_page_fault;
pgd = act_mm->pgd + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
goto bad_page_fault;
pgd_val(*pgd) = pgd_val(*pgd_k);
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
goto bad_page_fault;
pud = pud_offset(p4d, address);
pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud) || !pud_present(*pud_k))
goto bad_page_fault;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_page_fault;
pmd_val(*pmd) = pmd_val(*pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto bad_page_fault;
return;
}
bad_page_fault:
bad_page_fault(regs, address, SIGKILL);
return;
} }
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <asm/initialize_mmu.h> #include <asm/initialize_mmu.h>
#include <asm/io.h> #include <asm/io.h>
DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
#if defined(CONFIG_HIGHMEM) #if defined(CONFIG_HIGHMEM)
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{ {
......
...@@ -38,9 +38,6 @@ ...@@ -38,9 +38,6 @@
#define ISS_NET_TIMER_VALUE (HZ / 10) #define ISS_NET_TIMER_VALUE (HZ / 10)
static DEFINE_SPINLOCK(opened_lock);
static LIST_HEAD(opened);
static DEFINE_SPINLOCK(devices_lock); static DEFINE_SPINLOCK(devices_lock);
static LIST_HEAD(devices); static LIST_HEAD(devices);
...@@ -59,17 +56,27 @@ struct tuntap_info { ...@@ -59,17 +56,27 @@ struct tuntap_info {
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
struct iss_net_private;
struct iss_net_ops {
int (*open)(struct iss_net_private *lp);
void (*close)(struct iss_net_private *lp);
int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
unsigned short (*protocol)(struct sk_buff *skb);
int (*poll)(struct iss_net_private *lp);
};
/* This structure contains out private information for the driver. */ /* This structure contains out private information for the driver. */
struct iss_net_private { struct iss_net_private {
struct list_head device_list; struct list_head device_list;
struct list_head opened_list;
spinlock_t lock; spinlock_t lock;
struct net_device *dev; struct net_device *dev;
struct platform_device pdev; struct platform_device pdev;
struct timer_list tl; struct timer_list tl;
struct net_device_stats stats; struct rtnl_link_stats64 stats;
struct timer_list timer; struct timer_list timer;
unsigned int timer_val; unsigned int timer_val;
...@@ -82,12 +89,7 @@ struct iss_net_private { ...@@ -82,12 +89,7 @@ struct iss_net_private {
struct tuntap_info tuntap; struct tuntap_info tuntap;
} info; } info;
int (*open)(struct iss_net_private *lp); const struct iss_net_ops *net_ops;
void (*close)(struct iss_net_private *lp);
int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
unsigned short (*protocol)(struct sk_buff *skb);
int (*poll)(struct iss_net_private *lp);
} tp; } tp;
}; };
...@@ -215,6 +217,15 @@ static int tuntap_poll(struct iss_net_private *lp) ...@@ -215,6 +217,15 @@ static int tuntap_poll(struct iss_net_private *lp)
return simc_poll(lp->tp.info.tuntap.fd); return simc_poll(lp->tp.info.tuntap.fd);
} }
static const struct iss_net_ops tuntap_ops = {
.open = tuntap_open,
.close = tuntap_close,
.read = tuntap_read,
.write = tuntap_write,
.protocol = tuntap_protocol,
.poll = tuntap_poll,
};
/* /*
* ethX=tuntap,[mac address],device name * ethX=tuntap,[mac address],device name
*/ */
...@@ -257,13 +268,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init) ...@@ -257,13 +268,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
lp->mtu = TRANSPORT_TUNTAP_MTU; lp->mtu = TRANSPORT_TUNTAP_MTU;
lp->tp.info.tuntap.fd = -1; lp->tp.info.tuntap.fd = -1;
lp->tp.net_ops = &tuntap_ops;
lp->tp.open = tuntap_open;
lp->tp.close = tuntap_close;
lp->tp.read = tuntap_read;
lp->tp.write = tuntap_write;
lp->tp.protocol = tuntap_protocol;
lp->tp.poll = tuntap_poll;
return 1; return 1;
} }
...@@ -278,14 +283,16 @@ static int iss_net_rx(struct net_device *dev) ...@@ -278,14 +283,16 @@ static int iss_net_rx(struct net_device *dev)
/* Check if there is any new data. */ /* Check if there is any new data. */
if (lp->tp.poll(lp) == 0) if (lp->tp.net_ops->poll(lp) == 0)
return 0; return 0;
/* Try to allocate memory, if it fails, try again next round. */ /* Try to allocate memory, if it fails, try again next round. */
skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER); skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER);
if (skb == NULL) { if (skb == NULL) {
spin_lock_bh(&lp->lock);
lp->stats.rx_dropped++; lp->stats.rx_dropped++;
spin_unlock_bh(&lp->lock);
return 0; return 0;
} }
...@@ -295,15 +302,17 @@ static int iss_net_rx(struct net_device *dev) ...@@ -295,15 +302,17 @@ static int iss_net_rx(struct net_device *dev)
skb->dev = dev; skb->dev = dev;
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
pkt_len = lp->tp.read(lp, &skb); pkt_len = lp->tp.net_ops->read(lp, &skb);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
if (pkt_len > 0) { if (pkt_len > 0) {
skb_trim(skb, pkt_len); skb_trim(skb, pkt_len);
skb->protocol = lp->tp.protocol(skb); skb->protocol = lp->tp.net_ops->protocol(skb);
spin_lock_bh(&lp->lock);
lp->stats.rx_bytes += skb->len; lp->stats.rx_bytes += skb->len;
lp->stats.rx_packets++; lp->stats.rx_packets++;
spin_unlock_bh(&lp->lock);
netif_rx(skb); netif_rx(skb);
return pkt_len; return pkt_len;
} }
...@@ -311,38 +320,24 @@ static int iss_net_rx(struct net_device *dev) ...@@ -311,38 +320,24 @@ static int iss_net_rx(struct net_device *dev)
return pkt_len; return pkt_len;
} }
static int iss_net_poll(void) static int iss_net_poll(struct iss_net_private *lp)
{ {
struct list_head *ele;
int err, ret = 0; int err, ret = 0;
spin_lock(&opened_lock); if (!netif_running(lp->dev))
return 0;
list_for_each(ele, &opened) {
struct iss_net_private *lp;
lp = list_entry(ele, struct iss_net_private, opened_list);
if (!netif_running(lp->dev))
break;
spin_lock(&lp->lock);
while ((err = iss_net_rx(lp->dev)) > 0)
ret++;
spin_unlock(&lp->lock); while ((err = iss_net_rx(lp->dev)) > 0)
ret++;
if (err < 0) { if (err < 0) {
pr_err("Device '%s' read returned %d, shutting it down\n", pr_err("Device '%s' read returned %d, shutting it down\n",
lp->dev->name, err); lp->dev->name, err);
dev_close(lp->dev); dev_close(lp->dev);
} else { } else {
/* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */ /* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */
}
} }
spin_unlock(&opened_lock);
return ret; return ret;
} }
...@@ -351,10 +346,8 @@ static void iss_net_timer(struct timer_list *t) ...@@ -351,10 +346,8 @@ static void iss_net_timer(struct timer_list *t)
{ {
struct iss_net_private *lp = from_timer(lp, t, timer); struct iss_net_private *lp = from_timer(lp, t, timer);
iss_net_poll(); iss_net_poll(lp);
spin_lock(&lp->lock);
mod_timer(&lp->timer, jiffies + lp->timer_val); mod_timer(&lp->timer, jiffies + lp->timer_val);
spin_unlock(&lp->lock);
} }
...@@ -363,11 +356,9 @@ static int iss_net_open(struct net_device *dev) ...@@ -363,11 +356,9 @@ static int iss_net_open(struct net_device *dev)
struct iss_net_private *lp = netdev_priv(dev); struct iss_net_private *lp = netdev_priv(dev);
int err; int err;
spin_lock_bh(&lp->lock); err = lp->tp.net_ops->open(lp);
err = lp->tp.open(lp);
if (err < 0) if (err < 0)
goto out; return err;
netif_start_queue(dev); netif_start_queue(dev);
...@@ -378,36 +369,21 @@ static int iss_net_open(struct net_device *dev) ...@@ -378,36 +369,21 @@ static int iss_net_open(struct net_device *dev)
while ((err = iss_net_rx(dev)) > 0) while ((err = iss_net_rx(dev)) > 0)
; ;
spin_unlock_bh(&lp->lock);
spin_lock_bh(&opened_lock);
list_add(&lp->opened_list, &opened);
spin_unlock_bh(&opened_lock);
spin_lock_bh(&lp->lock);
timer_setup(&lp->timer, iss_net_timer, 0); timer_setup(&lp->timer, iss_net_timer, 0);
lp->timer_val = ISS_NET_TIMER_VALUE; lp->timer_val = ISS_NET_TIMER_VALUE;
mod_timer(&lp->timer, jiffies + lp->timer_val); mod_timer(&lp->timer, jiffies + lp->timer_val);
out:
spin_unlock_bh(&lp->lock);
return err; return err;
} }
static int iss_net_close(struct net_device *dev) static int iss_net_close(struct net_device *dev)
{ {
struct iss_net_private *lp = netdev_priv(dev); struct iss_net_private *lp = netdev_priv(dev);
netif_stop_queue(dev);
spin_lock_bh(&lp->lock);
spin_lock(&opened_lock);
list_del(&opened);
spin_unlock(&opened_lock);
netif_stop_queue(dev);
del_timer_sync(&lp->timer); del_timer_sync(&lp->timer);
lp->tp.net_ops->close(lp);
lp->tp.close(lp);
spin_unlock_bh(&lp->lock);
return 0; return 0;
} }
...@@ -417,13 +393,14 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -417,13 +393,14 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
int len; int len;
netif_stop_queue(dev); netif_stop_queue(dev);
spin_lock_bh(&lp->lock);
len = lp->tp.write(lp, &skb); len = lp->tp.net_ops->write(lp, &skb);
if (len == skb->len) { if (len == skb->len) {
spin_lock_bh(&lp->lock);
lp->stats.tx_packets++; lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len; lp->stats.tx_bytes += skb->len;
spin_unlock_bh(&lp->lock);
netif_trans_update(dev); netif_trans_update(dev);
netif_start_queue(dev); netif_start_queue(dev);
...@@ -432,24 +409,29 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -432,24 +409,29 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else if (len == 0) { } else if (len == 0) {
netif_start_queue(dev); netif_start_queue(dev);
spin_lock_bh(&lp->lock);
lp->stats.tx_dropped++; lp->stats.tx_dropped++;
spin_unlock_bh(&lp->lock);
} else { } else {
netif_start_queue(dev); netif_start_queue(dev);
pr_err("%s: %s failed(%d)\n", dev->name, __func__, len); pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
} }
spin_unlock_bh(&lp->lock);
dev_kfree_skb(skb); dev_kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static struct net_device_stats *iss_net_get_stats(struct net_device *dev) static void iss_net_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{ {
struct iss_net_private *lp = netdev_priv(dev); struct iss_net_private *lp = netdev_priv(dev);
return &lp->stats;
spin_lock_bh(&lp->lock);
*stats = lp->stats;
spin_unlock_bh(&lp->lock);
} }
static void iss_net_set_multicast_list(struct net_device *dev) static void iss_net_set_multicast_list(struct net_device *dev)
...@@ -460,19 +442,6 @@ static void iss_net_tx_timeout(struct net_device *dev, unsigned int txqueue) ...@@ -460,19 +442,6 @@ static void iss_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
{ {
} }
static int iss_net_set_mac(struct net_device *dev, void *addr)
{
struct iss_net_private *lp = netdev_priv(dev);
struct sockaddr *hwaddr = addr;
if (!is_valid_ether_addr(hwaddr->sa_data))
return -EADDRNOTAVAIL;
spin_lock_bh(&lp->lock);
eth_hw_addr_set(dev, hwaddr->sa_data);
spin_unlock_bh(&lp->lock);
return 0;
}
static int iss_net_change_mtu(struct net_device *dev, int new_mtu) static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
{ {
return -EINVAL; return -EINVAL;
...@@ -494,11 +463,11 @@ static int driver_registered; ...@@ -494,11 +463,11 @@ static int driver_registered;
static const struct net_device_ops iss_netdev_ops = { static const struct net_device_ops iss_netdev_ops = {
.ndo_open = iss_net_open, .ndo_open = iss_net_open,
.ndo_stop = iss_net_close, .ndo_stop = iss_net_close,
.ndo_get_stats = iss_net_get_stats, .ndo_get_stats64 = iss_net_get_stats64,
.ndo_start_xmit = iss_net_start_xmit, .ndo_start_xmit = iss_net_start_xmit,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = iss_net_change_mtu, .ndo_change_mtu = iss_net_change_mtu,
.ndo_set_mac_address = iss_net_set_mac, .ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = iss_net_tx_timeout, .ndo_tx_timeout = iss_net_tx_timeout,
.ndo_set_rx_mode = iss_net_set_multicast_list, .ndo_set_rx_mode = iss_net_set_multicast_list,
}; };
...@@ -520,7 +489,6 @@ static int iss_net_configure(int index, char *init) ...@@ -520,7 +489,6 @@ static int iss_net_configure(int index, char *init)
lp = netdev_priv(dev); lp = netdev_priv(dev);
*lp = (struct iss_net_private) { *lp = (struct iss_net_private) {
.device_list = LIST_HEAD_INIT(lp->device_list), .device_list = LIST_HEAD_INIT(lp->device_list),
.opened_list = LIST_HEAD_INIT(lp->opened_list),
.dev = dev, .dev = dev,
.index = index, .index = index,
}; };
......
...@@ -211,12 +211,18 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf, ...@@ -211,12 +211,18 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf,
struct simdisk *dev = pde_data(file_inode(file)); struct simdisk *dev = pde_data(file_inode(file));
const char *s = dev->filename; const char *s = dev->filename;
if (s) { if (s) {
ssize_t n = simple_read_from_buffer(buf, size, ppos, ssize_t len = strlen(s);
s, strlen(s)); char *temp = kmalloc(len + 2, GFP_KERNEL);
if (n < 0)
return n; if (!temp)
buf += n; return -ENOMEM;
size -= n;
len = scnprintf(temp, len + 2, "%s\n", s);
len = simple_read_from_buffer(buf, size, ppos,
temp, len);
kfree(temp);
return len;
} }
return simple_read_from_buffer(buf, size, ppos, "\n", 1); return simple_read_from_buffer(buf, size, ppos, "\n", 1);
} }
......
...@@ -78,7 +78,7 @@ void __init platform_init(bp_tag_t *first) ...@@ -78,7 +78,7 @@ void __init platform_init(bp_tag_t *first)
void platform_heartbeat(void) void platform_heartbeat(void)
{ {
static int i=0, t = 0; static int i, t;
if (--t < 0) if (--t < 0)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment