Commit 3f86ed6e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC updates from Vineet Gupta:

 - fixes for -Wmissing-prototype warnings

 - missing compiler barrier in relaxed atomics

 - some uaccess simplification, declutter

 - removal of massive glocal struct cpuinfo_arc from bootlog code

 - __switch_to consolidation (removal of inline asm variant)

 - use GP to cache task pointer (vs. r25)

 - misc rework of entry code

* tag 'arc-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (24 commits)
  ARC: boot log: fix warning
  arc: Explicitly include correct DT includes
  ARC: pt_regs: create seperate type for ecr
  ARCv2: entry: rearrange pt_regs slightly
  ARC: entry: replace 8 byte ADD.ne with 4 byte ADD2.ne
  ARC: entry: replace 8 byte OR with 4 byte BSET
  ARC: entry: Add more common chores to EXCEPTION_PROLOGUE
  ARC: entry: EV_MachineCheck dont re-read ECR
  ARC: entry: ARcompact EV_ProtV to use r10 directly
  ARC: entry: rework (non-functional)
  ARC: __switch_to: move ksp to thread_info from thread_struct
  ARC: __switch_to: asm with dwarf ops (vs. inline asm)
  ARC: kernel stack: INIT_THREAD need not setup @init_stack in @ksp
  ARC: entry: use gp to cache task pointer (vs. r25)
  ARC: boot log: eliminate struct cpuinfo_arc #4: boot log per ISA
  ARC: boot log: eliminate struct cpuinfo_arc #3: don't export
  ARC: boot log: eliminate struct cpuinfo_arc #2: cache
  ARC: boot log: eliminate struct cpuinfo_arc #1: mm
  ARCv2: memset: don't prefetch for len == 0 which happens a alot
  ARC: uaccess: elide unaliged handling if hardware supports
  ...
parents ea4f9c37 c40cad3b
......@@ -27,6 +27,8 @@ config ARC
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_IOREMAP
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4
......@@ -491,11 +493,11 @@ config ARC_KVADDR_SIZE
kernel-user gutter)
config ARC_CURR_IN_REG
bool "Dedicate Register r25 for current_task pointer"
bool "cache current task pointer in gp"
default y
help
This reserved Register R25 to point to Current Task in
kernel mode. This saves memory access for each such access
This reserves gp register to point to Current Task in
kernel mode eliding memory access for each access
config ARC_EMUL_UNALIGNED
......
......@@ -28,14 +28,14 @@ cflags-y += $(tune-mcpu-def-y)
endif
endif
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register definition, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
# any kernel headers, and missing the r25 global register
# any kernel headers, and missing the global register
# Can't do unconditionally because of recursive include issues
# due to <linux/thread_info.h>
LINUXINCLUDE += -include $(srctree)/arch/arc/include/asm/current.h
cflags-y += -ffixed-gp
endif
cflags-y += -fsection-anchors
......@@ -67,7 +67,7 @@ cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi)
# small data is default for elf32 tool-chain. If not usable, disable it
# This also allows repurposing GP as scratch reg to gcc reg allocator
disable_small_data := y
cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
cflags-$(disable_small_data) += -mno-sdata
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
......
......@@ -23,7 +23,7 @@
#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */
#define ARC_REG_XY_MEM_BCR 0x79
#define ARC_REG_MAC_BCR 0x7a
#define ARC_REG_MUL_BCR 0x7b
#define ARC_REG_MPY_BCR 0x7b
#define ARC_REG_SWAP_BCR 0x7c
#define ARC_REG_NORM_BCR 0x7d
#define ARC_REG_MIXMAX_BCR 0x7e
......@@ -177,7 +177,7 @@ struct bcr_isa_arcv2 {
#endif
};
struct bcr_uarch_build_arcv2 {
struct bcr_uarch_build {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, prod:8, maj:8, min:8;
#else
......@@ -185,6 +185,59 @@ struct bcr_uarch_build_arcv2 {
#endif
};
struct bcr_mmu_3 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
u_itlb:4, u_dtlb:4;
#else
unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
ways:4, ver:8;
#endif
};
struct bcr_mmu_4 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
#else
/* DTLB ITLB JES JE JA */
unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
#endif
};
struct bcr_cache {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
#else
unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
#endif
};
struct bcr_slc_cfg {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:24, way:2, lsz:2, sz:4;
#else
unsigned int sz:4, lsz:2, way:2, pad:24;
#endif
};
struct bcr_clust_cfg {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
#else
unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
#endif
};
struct bcr_volatile {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int start:4, limit:4, pad:22, order:1, disable:1;
#else
unsigned int disable:1, order:1, pad:22, limit:4, start:4;
#endif
};
struct bcr_mpy {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
......@@ -302,48 +355,6 @@ struct bcr_generic {
#endif
};
/*
*******************************************************************
* Generic structures to hold build configuration used at runtime
*/
struct cpuinfo_arc_mmu {
unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1;
unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8;
};
struct cpuinfo_arc_cache {
unsigned int sz_k:14, line_len:8, assoc:4, alias:1, vipt:1, pad:4;
};
struct cpuinfo_arc_bpu {
unsigned int ver, full, num_cache, num_pred, ret_stk;
};
struct cpuinfo_arc_ccm {
unsigned int base_addr, sz;
};
struct cpuinfo_arc {
struct cpuinfo_arc_cache icache, dcache, slc;
struct cpuinfo_arc_mmu mmu;
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
struct bcr_isa_arcv2 isa;
const char *release, *name;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn;
struct bcr_mpy extn_mpy;
};
extern struct cpuinfo_arc cpuinfo_arc700[];
static inline int is_isa_arcv2(void)
{
return IS_ENABLED(CONFIG_ISA_ARCV2);
......
......@@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
: "cc", "memory"); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
......@@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
: [val] "=&r" (val) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
: "cc", "memory"); \
\
return val; \
}
......@@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
[orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
: "cc", "memory"); \
\
return orig; \
}
......
......@@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
: "cc"); \
: "cc", "memory"); \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
......@@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: [val] "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
: "cc"); /* memory clobber comes from smp_mb() */ \
: "cc", "memory"); \
\
return val; \
}
......@@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(orig), "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
: "cc"); /* memory clobber comes from smp_mb() */ \
: "cc", "memory"); \
\
return orig; \
}
......
......@@ -13,7 +13,7 @@
#ifdef CONFIG_ARC_CURR_IN_REG
register struct task_struct *curr_arc asm("r25");
register struct task_struct *curr_arc asm("gp");
#define current (curr_arc)
#else
......
......@@ -10,23 +10,31 @@
#ifdef ARC_DW2_UNWIND_AS_CFI
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_REGISTER .cfi_register
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_UNDEFINED .cfi_undefined
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_UNDEFINED .cfi_undefined
#else
#define CFI_IGNORE #
#define CFI_STARTPROC CFI_IGNORE
#define CFI_ENDPROC CFI_IGNORE
#define CFI_DEF_CFA CFI_IGNORE
#define CFI_REGISTER CFI_IGNORE
#define CFI_REL_OFFSET CFI_IGNORE
#define CFI_UNDEFINED CFI_IGNORE
#define CFI_STARTPROC CFI_IGNORE
#define CFI_ENDPROC CFI_IGNORE
#define CFI_DEF_CFA CFI_IGNORE
#define CFI_DEF_CFA_OFFSET CFI_IGNORE
#define CFI_DEF_CFA_REGISTER CFI_IGNORE
#define CFI_OFFSET CFI_IGNORE
#define CFI_REL_OFFSET CFI_IGNORE
#define CFI_REGISTER CFI_IGNORE
#define CFI_RESTORE CFI_IGNORE
#define CFI_UNDEFINED CFI_IGNORE
#endif /* !ARC_DW2_UNWIND_AS_CFI */
......
......@@ -18,7 +18,6 @@
* | orig_r0 |
* | event/ECR |
* | bta |
* | user_r25 |
* | gp |
* | fp |
* | sp |
......@@ -49,14 +48,18 @@
/*------------------------------------------------------------------------*/
.macro INTERRUPT_PROLOGUE
; (A) Before jumping to Interrupt Vector, hardware micro-ops did following:
; Before jumping to Interrupt Vector, hardware micro-ops did following:
; 1. SP auto-switched to kernel mode stack
; 2. STATUS32.Z flag set if in U mode at time of interrupt (U:1,K:0)
; 3. Auto save: (mandatory) Push PC and STAT32 on stack
; hardware does even if CONFIG_ARC_IRQ_NO_AUTOSAVE
; 4. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
; 4a. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
;
; (B) Manually saved some regs: r12,r25,r30, sp,fp,gp, ACCL pair
; Now
; 4b. If Auto-save (optional) not enabled in hw, manually save them
; 5. Manually save: r12,r30, sp,fp,gp, ACCL pair
;
; At the end, SP points to pt_regs
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
; carve pt_regs on stack (case #3), PC/STAT32 already on stack
......@@ -72,15 +75,16 @@
.endm
/*------------------------------------------------------------------------*/
.macro EXCEPTION_PROLOGUE
.macro EXCEPTION_PROLOGUE_KEEP_AE
; (A) Before jumping to Exception Vector, hardware micro-ops did following:
; Before jumping to Exception Vector, hardware micro-ops did following:
; 1. SP auto-switched to kernel mode stack
; 2. STATUS32.Z flag set if in U mode at time of exception (U:1,K:0)
;
; (B) Manually save the complete reg file below
; Now manually save rest of reg file
; At the end, SP points to pt_regs
sub sp, sp, SZ_PT_REGS ; carve pt_regs
sub sp, sp, SZ_PT_REGS ; carve space for pt_regs
; _HARD saves r10 clobbered by _SOFT as scratch hence comes first
......@@ -100,6 +104,16 @@
; OUTPUT: r10 has ECR expected by EV_Trap
.endm
.macro EXCEPTION_PROLOGUE
EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN ; clobbers r9
.endm
/*------------------------------------------------------------------------
* This macro saves the registers manually which would normally be autosaved
* by hardware on taken interrupts. It is used by
......@@ -135,10 +149,10 @@
*/
.macro __SAVE_REGFILE_SOFT
ST2 gp, fp, PT_r26 ; gp (r26), fp (r27)
st r12, [sp, PT_sp + 4]
st r30, [sp, PT_sp + 8]
st fp, [sp, PT_fp] ; r27
st r30, [sp, PT_r30]
st r12, [sp, PT_r12]
st r26, [sp, PT_r26] ; gp
; Saving pt_regs->sp correctly requires some extra work due to the way
; Auto stack switch works
......@@ -153,30 +167,30 @@
; ISA requires ADD.nz to have same dest and src reg operands
mov.nz r10, sp
add.nz r10, r10, SZ_PT_REGS ; K mode SP
add2.nz r10, r10, SZ_PT_REGS/4 ; K mode SP
st r10, [sp, PT_sp] ; SP (pt_regs->sp)
#ifdef CONFIG_ARC_CURR_IN_REG
st r25, [sp, PT_user_r25]
GET_CURR_TASK_ON_CPU r25
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS
ST2 r58, r59, PT_r58
#endif
/* clobbers r10, r11 registers pair */
DSP_SAVE_REGFILE_IRQ
#ifdef CONFIG_ARC_CURR_IN_REG
GET_CURR_TASK_ON_CPU gp
#endif
.endm
/*------------------------------------------------------------------------*/
.macro __RESTORE_REGFILE_SOFT
LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
ld r12, [sp, PT_r12]
ld fp, [sp, PT_fp]
ld r30, [sp, PT_r30]
ld r12, [sp, PT_r12]
ld r26, [sp, PT_r26]
; Restore SP (into AUX_USER_SP) only if returning to U mode
; - for K mode, it will be implicitly restored as stack is unwound
......@@ -188,10 +202,6 @@
sr r10, [AUX_USER_SP]
1:
#ifdef CONFIG_ARC_CURR_IN_REG
ld r25, [sp, PT_user_r25]
#endif
/* clobbers r10, r11 registers pair */
DSP_RESTORE_REGFILE_IRQ
......@@ -249,7 +259,7 @@
btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP
ld r10, [sp, PT_event + 4]
ld r10, [sp, PT_bta]
sr r10, [erbta]
LD2 r10, r11, PT_ret
......@@ -264,8 +274,8 @@
.macro FAKE_RET_FROM_EXCPN
lr r9, [status32]
bic r9, r9, STATUS_AE_MASK
or r9, r9, STATUS_IE_MASK
bclr r9, r9, STATUS_AE_BIT
bset r9, r9, STATUS_IE_BIT
kflag r9
.endm
......
......@@ -140,7 +140,7 @@
*
* After this it is safe to call the "C" handlers
*-------------------------------------------------------------*/
.macro EXCEPTION_PROLOGUE
.macro EXCEPTION_PROLOGUE_KEEP_AE
/* Need at least 1 reg to code the early exception prologue */
PROLOG_FREEUP_REG r9, @ex_saved_reg1
......@@ -151,14 +151,6 @@
/* ARC700 doesn't provide auto-stack switching */
SWITCH_TO_KERNEL_STK
#ifdef CONFIG_ARC_CURR_IN_REG
/* Treat r25 as scratch reg (save on stack) and load with "current" */
PUSH r25
GET_CURR_TASK_ON_CPU r25
#else
sub sp, sp, 4
#endif
st.a r0, [sp, -8] /* orig_r0 needed for syscall (skip ECR slot) */
sub sp, sp, 4 /* skip pt_regs->sp, already saved above */
......@@ -178,7 +170,23 @@
PUSHAX erbta
lr r10, [ecr]
st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */
st r10, [sp, PT_event]
#ifdef CONFIG_ARC_CURR_IN_REG
/* gp already saved on stack: now load with "current" */
GET_CURR_TASK_ON_CPU gp
#endif
; OUTPUT: r10 has ECR expected by EV_Trap
.endm
.macro EXCEPTION_PROLOGUE
EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN ; clobbers r9
.endm
/*--------------------------------------------------------------
......@@ -208,11 +216,8 @@
POP gp
RESTORE_R12_TO_R0
#ifdef CONFIG_ARC_CURR_IN_REG
ld r25, [sp, 12]
#endif
ld sp, [sp] /* restore original sp */
/* orig_r0, ECR, user_r25 skipped automatically */
/* orig_r0, ECR skipped automatically */
.endm
/* Dummy ECR values for Interrupts */
......@@ -229,13 +234,6 @@
SWITCH_TO_KERNEL_STK
#ifdef CONFIG_ARC_CURR_IN_REG
/* Treat r25 as scratch reg (save on stack) and load with "current" */
PUSH r25
GET_CURR_TASK_ON_CPU r25
#else
sub sp, sp, 4
#endif
PUSH 0x003\LVL\()abcd /* Dummy ECR */
sub sp, sp, 8 /* skip orig_r0 (not needed)
......@@ -255,6 +253,10 @@
PUSHAX lp_start
PUSHAX bta_l\LVL\()
#ifdef CONFIG_ARC_CURR_IN_REG
/* gp already saved on stack: now load with "current" */
GET_CURR_TASK_ON_CPU gp
#endif
.endm
/*--------------------------------------------------------------
......@@ -282,11 +284,7 @@
POP gp
RESTORE_R12_TO_R0
#ifdef CONFIG_ARC_CURR_IN_REG
ld r25, [sp, 12]
#endif
ld sp, [sp] /* restore original sp */
/* orig_r0, ECR, user_r25 skipped automatically */
ld sp, [sp] /* restore original sp; orig_r0, ECR skipped implicitly */
.endm
/* Get thread_info of "current" tsk */
......
......@@ -13,6 +13,8 @@
#include <asm/processor.h> /* For VMALLOC_START */
#include <asm/mmu.h>
#ifdef __ASSEMBLY__
#ifdef CONFIG_ISA_ARCOMPACT
#include <asm/entry-compact.h> /* ISA specific bits */
#else
......@@ -89,7 +91,7 @@
* Helpers to save/restore callee-saved regs:
* used by several macros below
*-------------------------------------------------------------*/
.macro SAVE_R13_TO_R24
.macro SAVE_R13_TO_R25
PUSH r13
PUSH r14
PUSH r15
......@@ -102,9 +104,11 @@
PUSH r22
PUSH r23
PUSH r24
PUSH r25
.endm
.macro RESTORE_R24_TO_R13
.macro RESTORE_R25_TO_R13
POP r25
POP r24
POP r23
POP r22
......@@ -119,81 +123,31 @@
POP r13
.endm
/*--------------------------------------------------------------
* Collect User Mode callee regs as struct callee_regs - needed by
* fork/do_signal/unaligned-access-emulation.
* (By default only scratch regs are saved on entry to kernel)
*
* Special handling for r25 if used for caching Task Pointer.
* It would have been saved in task->thread.user_r25 already, but to keep
* the interface same it is copied into regular r25 placeholder in
* struct callee_regs.
*-------------------------------------------------------------*/
/*
* save user mode callee regs as struct callee_regs
* - needed by fork/do_signal/unaligned-access-emulation.
*/
.macro SAVE_CALLEE_SAVED_USER
SAVE_R13_TO_R25
.endm
mov r12, sp ; save SP as ref to pt_regs
SAVE_R13_TO_R24
#ifdef CONFIG_ARC_CURR_IN_REG
; Retrieve orig r25 and save it with rest of callee_regs
ld r12, [r12, PT_user_r25]
PUSH r12
#else
PUSH r25
#endif
/*
* restore user mode callee regs as struct callee_regs
* - could have been changed by ptrace tracer or unaligned-access fixup
*/
.macro RESTORE_CALLEE_SAVED_USER
RESTORE_R25_TO_R13
.endm
/*--------------------------------------------------------------
* Save kernel Mode callee regs at the time of Contect Switch.
*
* Special handling for r25 if used for caching Task Pointer.
* Kernel simply skips saving it since it will be loaded with
* incoming task pointer anyways
*-------------------------------------------------------------*/
/*
* save/restore kernel mode callee regs at the time of context switch
*/
.macro SAVE_CALLEE_SAVED_KERNEL
SAVE_R13_TO_R24
#ifdef CONFIG_ARC_CURR_IN_REG
sub sp, sp, 4
#else
PUSH r25
#endif
SAVE_R13_TO_R25
.endm
/*--------------------------------------------------------------
* Opposite of SAVE_CALLEE_SAVED_KERNEL
*-------------------------------------------------------------*/
.macro RESTORE_CALLEE_SAVED_KERNEL
#ifdef CONFIG_ARC_CURR_IN_REG
add sp, sp, 4 /* skip usual r25 placeholder */
#else
POP r25
#endif
RESTORE_R24_TO_R13
.endm
/*--------------------------------------------------------------
* Opposite of SAVE_CALLEE_SAVED_USER
*
* ptrace tracer or unaligned-access fixup might have changed a user mode
* callee reg which is saved back to usual r25 storage location
*-------------------------------------------------------------*/
.macro RESTORE_CALLEE_SAVED_USER
#ifdef CONFIG_ARC_CURR_IN_REG
POP r12
#else
POP r25
#endif
RESTORE_R24_TO_R13
; SP is back to start of pt_regs
#ifdef CONFIG_ARC_CURR_IN_REG
st r12, [sp, PT_user_r25]
#endif
RESTORE_R25_TO_R13
.endm
/*--------------------------------------------------------------
......@@ -229,10 +183,10 @@
#ifdef CONFIG_SMP
/*-------------------------------------------------
/*
* Retrieve the current running task on this CPU
* 1. Determine curr CPU id.
* 2. Use it to index into _current_task[ ]
* - loads it from backing _current_task[] (and can't use the
* caching reg for current task
*/
.macro GET_CURR_TASK_ON_CPU reg
GET_CPU_ID \reg
......@@ -254,7 +208,7 @@
add2 \tmp, @_current_task, \tmp
st \tsk, [\tmp]
#ifdef CONFIG_ARC_CURR_IN_REG
mov r25, \tsk
mov gp, \tsk
#endif
.endm
......@@ -269,21 +223,20 @@
.macro SET_CURR_TASK_ON_CPU tsk, tmp
st \tsk, [@_current_task]
#ifdef CONFIG_ARC_CURR_IN_REG
mov r25, \tsk
mov gp, \tsk
#endif
.endm
#endif /* SMP / UNI */
/* ------------------------------------------------------------------
/*
* Get the ptr to some field of Current Task at @off in task struct
* -Uses r25 for Current task ptr if that is enabled
* - Uses current task cached in reg if enabled
*/
#ifdef CONFIG_ARC_CURR_IN_REG
.macro GET_CURR_TASK_FIELD_PTR off, reg
add \reg, r25, \off
add \reg, gp, \off
.endm
#else
......@@ -295,4 +248,23 @@
#endif /* CONFIG_ARC_CURR_IN_REG */
#else /* !__ASSEMBLY__ */
extern void do_signal(struct pt_regs *);
extern void do_notify_resume(struct pt_regs *);
extern int do_privilege_fault(unsigned long, struct pt_regs *);
extern int do_extension_fault(unsigned long, struct pt_regs *);
extern int insterror_is_error(unsigned long, struct pt_regs *);
extern int do_memory_error(unsigned long, struct pt_regs *);
extern int trap_is_brkpt(unsigned long, struct pt_regs *);
extern int do_misaligned_error(unsigned long, struct pt_regs *);
extern int do_trap5_error(unsigned long, struct pt_regs *);
extern int do_misaligned_access(unsigned long, struct pt_regs *, struct callee_regs *);
extern void do_machine_check_fault(unsigned long, struct pt_regs *);
extern void do_non_swi_trap(unsigned long, struct pt_regs *);
extern void do_insterror_or_kprobe(unsigned long, struct pt_regs *);
extern void do_page_fault(unsigned long, struct pt_regs *);
#endif
#endif /* __ASM_ARC_ENTRY_H */
......@@ -25,5 +25,6 @@
#include <asm-generic/irq.h>
extern void arc_init_IRQ(void);
extern void arch_do_IRQ(unsigned int, struct pt_regs *);
#endif
......@@ -14,6 +14,8 @@ typedef struct {
unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
extern void do_tlb_overlap_fault(unsigned long, unsigned long, struct pt_regs *);
#endif
#include <asm/mmu-arcv2.h>
......
......@@ -22,7 +22,6 @@
* struct thread_info
*/
struct thread_struct {
unsigned long ksp; /* kernel mode stack pointer */
unsigned long callee_reg; /* pointer to callee regs */
unsigned long fault_address; /* dbls as brkpt holder as well */
#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
......@@ -33,9 +32,7 @@ struct thread_struct {
#endif
};
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
}
#define INIT_THREAD { }
/* Forward declaration, a strange C thing */
struct task_struct;
......@@ -56,7 +53,7 @@ struct task_struct;
* Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
#define TSK_K_ESP(tsk) (tsk->thread.ksp)
#define TSK_K_ESP(tsk) (task_thread_info(tsk)->ksp)
#define TSK_K_REG(tsk, off) (*((unsigned long *)(TSK_K_ESP(tsk) + \
sizeof(struct callee_regs) + off)))
......
......@@ -12,6 +12,17 @@
#ifndef __ASSEMBLY__
typedef union {
struct {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned long state:8, vec:8, cause:8, param:8;
#else
unsigned long param:8, cause:8, vec:8, state:8;
#endif
};
unsigned long full;
} ecr_reg;
/* THE pt_regs: Defines how regs are saved during entry into kernel */
#ifdef CONFIG_ISA_ARCOMPACT
......@@ -40,23 +51,10 @@ struct pt_regs {
* Last word used by Linux for extra state mgmt (syscall-restart)
* For interrupts, use artificial ECR values to note current prio-level
*/
union {
struct {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned long state:8, ecr_vec:8,
ecr_cause:8, ecr_param:8;
#else
unsigned long ecr_param:8, ecr_cause:8,
ecr_vec:8, state:8;
#endif
};
unsigned long event;
};
unsigned long user_r25;
ecr_reg ecr;
};
#define MAX_REG_OFFSET offsetof(struct pt_regs, user_r25)
#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
#else
......@@ -64,28 +62,14 @@ struct pt_regs {
unsigned long orig_r0;
union {
struct {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned long state:8, ecr_vec:8,
ecr_cause:8, ecr_param:8;
#else
unsigned long ecr_param:8, ecr_cause:8,
ecr_vec:8, state:8;
#endif
};
unsigned long event;
};
unsigned long bta; /* bta_l1, bta_l2, erbta */
ecr_reg ecr; /* Exception Cause Reg */
unsigned long user_r25;
unsigned long bta; /* erbta */
unsigned long r26; /* gp */
unsigned long fp;
unsigned long sp; /* user/kernel sp depending on where we came from */
unsigned long r12, r30;
unsigned long r30;
unsigned long r12;
unsigned long r26; /* gp */
#ifdef CONFIG_ARC_HAS_ACCL_REGS
unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */
......@@ -94,6 +78,8 @@ struct pt_regs {
unsigned long DSP_CTRL;
#endif
unsigned long sp; /* user/kernel sp depending on entry */
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
......@@ -134,13 +120,13 @@ struct callee_regs {
/* return 1 if PC in delay slot */
#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
#define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param)
#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param)
#define in_syscall(regs) ((regs->ecr.vec == ECR_V_TRAP) && !regs->ecr.param)
#define in_brkpt_trap(regs) ((regs->ecr.vec == ECR_V_TRAP) && regs->ecr.param)
#define STATE_SCALL_RESTARTED 0x01
#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED)
#define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED)
#define syscall_wont_restart(regs) (regs->ecr.state |= STATE_SCALL_RESTARTED)
#define syscall_restartable(regs) !(regs->ecr.state & STATE_SCALL_RESTARTED)
#define current_pt_regs() \
({ \
......@@ -181,6 +167,9 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
extern int syscall_trace_entry(struct pt_regs *);
extern void syscall_trace_exit(struct pt_regs *);
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PTRACE_H */
......@@ -35,11 +35,11 @@ long __init arc_get_mem_sz(void);
#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
extern void arc_mmu_init(void);
extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_mmu_bcr(void);
extern int arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
extern int arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void __init handle_uboot_args(void);
#endif /* __ASMARC_SETUP_H */
......@@ -29,6 +29,8 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void __init smp_init_cpus(void);
extern void first_lines_of_secondary(void);
extern const char *arc_platform_smp_cpuinfo(void);
extern void arc_platform_smp_wait_to_boot(int);
extern void start_kernel_secondary(void);
/*
* API expected BY platform smp code (FROM arch smp code)
......
......@@ -37,16 +37,16 @@
*/
struct thread_info {
unsigned long flags; /* low level flags */
unsigned long ksp; /* kernel mode stack top in __switch_to */
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct task_struct *task; /* main task structure */
__u32 cpu; /* current CPU */
int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
struct task_struct *task; /* main task structure */
};
/*
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
* initilaize thread_info for any @tsk
* - this is not related to init_task per se
*/
#define INIT_THREAD_INFO(tsk) \
{ \
......
......@@ -146,8 +146,9 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
if (n == 0)
return 0;
/* unaligned */
if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
/* fallback for unaligned access when hardware doesn't support */
if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
(((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
unsigned char tmp;
......@@ -373,8 +374,9 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
if (n == 0)
return 0;
/* unaligned */
if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
/* fallback for unaligned access when hardware doesn't support */
if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
(((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
unsigned char tmp;
......@@ -584,7 +586,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
return res;
}
static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
static inline unsigned long __clear_user(void __user *to, unsigned long n)
{
long res = n;
unsigned char *d_char = to;
......@@ -626,17 +628,10 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
return res;
}
#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
#define __clear_user(d, n) __arc_clear_user(d, n)
#else
extern unsigned long arc_clear_user_noinline(void __user *to,
unsigned long n);
#define __clear_user(d, n) arc_clear_user_noinline(d, n)
#endif
#define __clear_user __clear_user
#include <asm-generic/uaccess.h>
......
......@@ -5,6 +5,8 @@
obj-y := head.o arcksyms.o setup.o irq.o reset.o ptrace.o process.o devtree.o
obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o
obj-y += ctx_sw_asm.o
obj-$(CONFIG_ISA_ARCOMPACT) += entry-compact.o intc-compact.o
obj-$(CONFIG_ISA_ARCV2) += entry-arcv2.o intc-arcv2.o
......@@ -24,11 +26,4 @@ ifdef CONFIG_ISA_ARCOMPACT
CFLAGS_fpu.o += -mdpfp
endif
ifdef CONFIG_ARC_DW2_UNWIND
CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
obj-y += ctx_sw.o
else
obj-y += ctx_sw_asm.o
endif
extra-y := vmlinux.lds
......@@ -20,13 +20,13 @@ int main(void)
BLANK();
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
DEFINE(THREAD_FAULT_ADDR,
offsetof(struct thread_struct, fault_address));
BLANK();
DEFINE(THREAD_INFO_KSP, offsetof(struct thread_info, ksp));
DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
DEFINE(THREAD_INFO_PREEMPT_COUNT,
offsetof(struct thread_info, preempt_count));
......@@ -46,7 +46,8 @@ int main(void)
BLANK();
DEFINE(PT_status32, offsetof(struct pt_regs, status32));
DEFINE(PT_event, offsetof(struct pt_regs, event));
DEFINE(PT_event, offsetof(struct pt_regs, ecr));
DEFINE(PT_bta, offsetof(struct pt_regs, bta));
DEFINE(PT_sp, offsetof(struct pt_regs, sp));
DEFINE(PT_r0, offsetof(struct pt_regs, r0));
DEFINE(PT_r1, offsetof(struct pt_regs, r1));
......@@ -61,13 +62,9 @@ int main(void)
DEFINE(PT_r26, offsetof(struct pt_regs, r26));
DEFINE(PT_ret, offsetof(struct pt_regs, ret));
DEFINE(PT_blink, offsetof(struct pt_regs, blink));
OFFSET(PT_fp, pt_regs, fp);
DEFINE(PT_lpe, offsetof(struct pt_regs, lp_end));
DEFINE(PT_lpc, offsetof(struct pt_regs, lp_count));
DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
#ifdef CONFIG_ISA_ARCV2
OFFSET(PT_r12, pt_regs, r12);
OFFSET(PT_r30, pt_regs, r30);
......@@ -80,5 +77,8 @@ int main(void)
OFFSET(PT_DSP_CTRL, pt_regs, DSP_CTRL);
#endif
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
return 0;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* Vineetg: Aug 2009
* -"C" version of lowest level context switch asm macro called by schedular
* gcc doesn't generate the dward CFI info for hand written asm, hence can't
* backtrace out of it (e.g. tasks sleeping in kernel).
* So we cheat a bit by writing almost similar code in inline-asm.
* -This is a hacky way of doing things, but there is no other simple way.
* I don't want/intend to extend unwinding code to understand raw asm
*/
#include <asm/asm-offsets.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
struct task_struct *__sched
__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
{
unsigned int tmp;
unsigned int prev = (unsigned int)prev_task;
unsigned int next = (unsigned int)next_task;
__asm__ __volatile__(
/* FP/BLINK save generated by gcc (standard function prologue */
"st.a r13, [sp, -4] \n\t"
"st.a r14, [sp, -4] \n\t"
"st.a r15, [sp, -4] \n\t"
"st.a r16, [sp, -4] \n\t"
"st.a r17, [sp, -4] \n\t"
"st.a r18, [sp, -4] \n\t"
"st.a r19, [sp, -4] \n\t"
"st.a r20, [sp, -4] \n\t"
"st.a r21, [sp, -4] \n\t"
"st.a r22, [sp, -4] \n\t"
"st.a r23, [sp, -4] \n\t"
"st.a r24, [sp, -4] \n\t"
#ifndef CONFIG_ARC_CURR_IN_REG
"st.a r25, [sp, -4] \n\t"
#else
"sub sp, sp, 4 \n\t" /* usual r25 placeholder */
#endif
/* set ksp of outgoing task in tsk->thread.ksp */
#if KSP_WORD_OFF <= 255
"st.as sp, [%3, %1] \n\t"
#else
/*
* Workaround for NR_CPUS=4k
* %1 is bigger than 255 (S9 offset for st.as)
*/
"add2 r24, %3, %1 \n\t"
"st sp, [r24] \n\t"
#endif
/*
* setup _current_task with incoming tsk.
* optionally, set r25 to that as well
* For SMP extra work to get to &_current_task[cpu]
* (open coded SET_CURR_TASK_ON_CPU)
*/
#ifndef CONFIG_SMP
"st %2, [@_current_task] \n\t"
#else
"lr r24, [identity] \n\t"
"lsr r24, r24, 8 \n\t"
"bmsk r24, r24, 7 \n\t"
"add2 r24, @_current_task, r24 \n\t"
"st %2, [r24] \n\t"
#endif
#ifdef CONFIG_ARC_CURR_IN_REG
"mov r25, %2 \n\t"
#endif
/* get ksp of incoming task from tsk->thread.ksp */
"ld.as sp, [%2, %1] \n\t"
/* start loading it's CALLEE reg file */
#ifndef CONFIG_ARC_CURR_IN_REG
"ld.ab r25, [sp, 4] \n\t"
#else
"add sp, sp, 4 \n\t"
#endif
"ld.ab r24, [sp, 4] \n\t"
"ld.ab r23, [sp, 4] \n\t"
"ld.ab r22, [sp, 4] \n\t"
"ld.ab r21, [sp, 4] \n\t"
"ld.ab r20, [sp, 4] \n\t"
"ld.ab r19, [sp, 4] \n\t"
"ld.ab r18, [sp, 4] \n\t"
"ld.ab r17, [sp, 4] \n\t"
"ld.ab r16, [sp, 4] \n\t"
"ld.ab r15, [sp, 4] \n\t"
"ld.ab r14, [sp, 4] \n\t"
"ld.ab r13, [sp, 4] \n\t"
/* last (ret value) = prev : although for ARC it mov r0, r0 */
"mov %0, %3 \n\t"
/* FP/BLINK restore generated by gcc (standard func epilogue */
: "=r"(tmp)
: "n"(KSP_WORD_OFF), "r"(next), "r"(prev)
: "blink"
);
return (struct task_struct *)tmp;
}
......@@ -11,50 +11,54 @@
#include <asm/entry.h> /* For the SAVE_* macros */
#include <asm/asm-offsets.h>
#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
;################### Low Level Context Switch ##########################
; IN
; - r0: prev task (also current)
; - r1: next task
; OUT
; - r0: prev task (so r0 not touched)
.section .sched.text,"ax",@progbits
.align 4
.global __switch_to
.type __switch_to, @function
__switch_to:
CFI_STARTPROC
/* Save regs on kernel mode stack of task */
st.a blink, [sp, -4]
st.a fp, [sp, -4]
SAVE_CALLEE_SAVED_KERNEL
ENTRY_CFI(__switch_to)
/* Save the now KSP in task->thread.ksp */
#if KSP_WORD_OFF <= 255
st.as sp, [r0, KSP_WORD_OFF]
#else
/* Workaround for NR_CPUS=4k as ST.as can only take s9 offset */
add2 r24, r0, KSP_WORD_OFF
st sp, [r24]
#endif
/*
* Return last task in r0 (return reg)
* On ARC, Return reg = First Arg reg = r0.
* Since we already have last task in r0,
* don't need to do anything special to return it
*/
/* save kernel stack frame regs of @prev task */
push blink
CFI_DEF_CFA_OFFSET 4
CFI_OFFSET r31, -4
push fp
CFI_DEF_CFA_OFFSET 8
CFI_OFFSET r27, -8
mov fp, sp
CFI_DEF_CFA_REGISTER r27
/* kernel mode callee regs of @prev */
SAVE_CALLEE_SAVED_KERNEL
/*
* switch to new task, contained in r1
* Temp reg r3 is required to get the ptr to store val
* save final SP to @prev->thread_info.ksp
* @prev is "current" so thread_info derived from SP
*/
SET_CURR_TASK_ON_CPU r1, r3
GET_CURR_THR_INFO_FROM_SP r10
st sp, [r10, THREAD_INFO_KSP]
/* update @next in _current_task[] and GP register caching it */
SET_CURR_TASK_ON_CPU r1, r10
/* reload SP with kernel mode stack pointer in task->thread.ksp */
ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
/* load SP from @next->thread_info.ksp */
ld r10, [r1, TASK_THREAD_INFO]
ld sp, [r10, THREAD_INFO_KSP]
/* restore the registers */
/* restore callee regs, stack frame regs of @next */
RESTORE_CALLEE_SAVED_KERNEL
ld.ab fp, [sp, 4]
ld.ab blink, [sp, 4]
j [blink]
pop fp
CFI_RESTORE r27
CFI_DEF_CFA r28, 4
pop blink
CFI_RESTORE r31
CFI_DEF_CFA_OFFSET 0
j [blink]
END_CFI(__switch_to)
......@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/mach_desc.h>
#include <asm/serial.h>
#ifdef CONFIG_SERIAL_EARLYCON
......
......@@ -125,11 +125,6 @@ ENTRY(mem_service)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN
bl do_memory_error
b ret_from_exception
END(mem_service)
......@@ -138,11 +133,6 @@ ENTRY(EV_Misaligned)
EXCEPTION_PROLOGUE
lr r0, [efa] ; Faulting Data address
mov r1, sp
FAKE_RET_FROM_EXCPN
SAVE_CALLEE_SAVED_USER
mov r2, sp ; callee_regs
......@@ -163,11 +153,6 @@ ENTRY(EV_TLBProtV)
EXCEPTION_PROLOGUE
lr r0, [efa] ; Faulting Data address
mov r1, sp ; pt_regs
FAKE_RET_FROM_EXCPN
mov blink, ret_from_exception
b do_page_fault
......
......@@ -254,18 +254,7 @@ END(handle_interrupt_level1)
ENTRY(EV_TLBProtV)
EXCEPTION_PROLOGUE
mov r2, r10 ; ECR set into r10 already
lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above)
; Exception auto-disables further Intr/exceptions.
; Re-enable them by pretending to return from exception
; (so rest of handler executes in pure K mode)
FAKE_RET_FROM_EXCPN
mov r1, sp ; Handle to pt_regs
EXCEPTION_PROLOGUE ; ECR returned in r10
;------ (5) Type of Protection Violation? ----------
;
......@@ -273,8 +262,7 @@ ENTRY(EV_TLBProtV)
; -Access Violation : 00_23_(00|01|02|03)_00
; x r w r+w
; -Unaligned Access : 00_23_04_00
;
bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
bbit1 r10, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
;========= (6a) Access Violation Processing ========
bl do_page_fault
......@@ -303,9 +291,6 @@ END(EV_TLBProtV)
ENTRY(call_do_page_fault)
EXCEPTION_PROLOGUE
lr r0, [efa] ; Faulting Data address
mov r1, sp
FAKE_RET_FROM_EXCPN
mov blink, ret_from_exception
b do_page_fault
......
......@@ -80,11 +80,6 @@ ENTRY(instr_service)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN
bl do_insterror_or_kprobe
b ret_from_exception
END(instr_service)
......@@ -95,16 +90,15 @@ END(instr_service)
ENTRY(EV_MachineCheck)
EXCEPTION_PROLOGUE
EXCEPTION_PROLOGUE_KEEP_AE ; ECR returned in r10
lr r2, [ecr]
lr r0, [efa]
mov r1, sp
; MC excpetions disable MMU
ARC_MMU_REENABLE r3
lsr r3, r2, 8
lsr r3, r10, 8
bmsk r3, r3, 7
brne r3, ECR_C_MCHK_DUP_TLB, 1f
......@@ -129,11 +123,6 @@ ENTRY(EV_PrivilegeV)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN
bl do_privilege_fault
b ret_from_exception
END(EV_PrivilegeV)
......@@ -145,11 +134,6 @@ ENTRY(EV_Extension)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN
bl do_extension_fault
b ret_from_exception
END(EV_Extension)
......@@ -160,20 +144,19 @@ END(EV_Extension)
; syscall Tracing
; ---------------------------------------------
tracesys:
; save EFA in case tracer wants the PC of traced task
; using ERET won't work since next-PC has already committed
; safekeep EFA (r12) if syscall tracer wanted PC
; for traps, ERET is pre-commit so points to next-PC
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
; PRE Sys Call Ptrace hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_entry
; PRE syscall trace hook
mov r0, sp ; pt_regs
bl @syscall_trace_enter
; Tracing code now returns the syscall num (orig or modif)
mov r8, r0
; Do the Sys Call as we normally would.
; Validate the Sys Call number
cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi tracesys_exit
......@@ -190,37 +173,36 @@ tracesys:
ld r6, [sp, PT_r6]
ld r7, [sp, PT_r7]
ld.as r9, [sys_call_table, r8]
jl [r9] ; Entry into Sys Call Handler
jl [r9]
tracesys_exit:
st r0, [sp, PT_r0] ; sys call return value in pt_regs
st r0, [sp, PT_r0]
;POST Sys Call Ptrace Hook
; POST syscall trace hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_exit
b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
; we'd done before calling post hook above
; don't call ret_from_system_call as it saves r0, already done above
b ret_from_exception
; ---------------------------------------------
; Breakpoint TRAP
; ---------------------------------------------
trap_with_param:
mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
mov r1, sp
mov r1, sp ; pt_regs
; Save callee regs in case gdb wants to have a look
; SP will grow up by size of CALLEE Reg-File
; NOTE: clobbers r12
; save callee regs in case tracer/gdb wants to peek
SAVE_CALLEE_SAVED_USER
; save location of saved Callee Regs @ thread_struct->pc
; safekeep ref to callee regs
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
st sp, [r10, THREAD_CALLEE_REG]
; Call the trap handler
; call the non syscall trap handler
bl do_non_swi_trap
; unwind stack to discard Callee saved Regs
; unwind stack to discard callee regs
DISCARD_CALLEE_SAVED_USER
b ret_from_exception
......@@ -232,37 +214,33 @@ trap_with_param:
ENTRY(EV_Trap)
EXCEPTION_PROLOGUE
EXCEPTION_PROLOGUE_KEEP_AE
lr r12, [efa]
FAKE_RET_FROM_EXCPN
;============ TRAP 1 :breakpoints
; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
;============ TRAP N : breakpoints, kprobes etc
bmsk.f 0, r10, 7
bnz trap_with_param
;============ TRAP (no param): syscall top level
;============ TRAP 0 (no param): syscall
; If syscall tracing ongoing, invoke pre-post-hooks
; syscall tracing ongoing, invoke pre-post-hooks around syscall
GET_CURR_THR_INFO_FLAGS r10
and.f 0, r10, _TIF_SYSCALL_WORK
bnz tracesys ; this never comes back
;============ Normal syscall case
; syscall num shd not exceed the total system calls avail
cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi .Lret_from_system_call
; Offset into the syscall_table and call handler
ld.as r9,[sys_call_table, r8]
jl [r9] ; Entry into Sys Call Handler
jl [r9]
.Lret_from_system_call:
st r0, [sp, PT_r0] ; sys call return value in pt_regs
; fall through to ret_from_exception
......@@ -318,7 +296,7 @@ resume_user_mode_begin:
; tracer might call PEEKUSR(CALLEE reg)
;
; NOTE: SP will grow up by size of CALLEE Reg-File
SAVE_CALLEE_SAVED_USER ; clobbers r12
SAVE_CALLEE_SAVED_USER
; save location of saved Callee Regs @ thread_struct->callee
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
......
......@@ -108,7 +108,7 @@ static void arcv2_irq_unmask(struct irq_data *data)
write_aux_reg(AUX_IRQ_ENABLE, 1);
}
void arcv2_irq_enable(struct irq_data *data)
static void arcv2_irq_enable(struct irq_data *data)
{
/* set default priority */
write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
......
......@@ -175,7 +175,7 @@ void kgdb_trap(struct pt_regs *regs)
* with trap_s 4 (compiled) breakpoints, continuation needs to
* start after the breakpoint.
*/
if (regs->ecr_param == 3)
if (regs->ecr.param == 3)
instruction_pointer(regs) -= BREAK_INSTR_SIZE;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
......
......@@ -165,8 +165,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.idu, "IDU "),
IS_AVAIL1(mp.dbg, "DEBUG "),
IS_AVAIL1(mp.gfrc, "GFRC"));
cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
}
struct plat_smp_ops plat_smp_ops = {
......
......@@ -141,7 +141,7 @@ asmlinkage void ret_from_fork(void);
* | unused |
* | |
* ------------------
* | r25 | <==== top of Stack (thread.ksp)
* | r25 | <==== top of Stack (thread_info.ksp)
* ~ ~
* | --to-- | (CALLEE Regs of kernel mode)
* | r13 |
......@@ -162,7 +162,6 @@ asmlinkage void ret_from_fork(void);
* | SP |
* | orig_r0 |
* | event/ECR |
* | user_r25 |
* ------------------ <===== END of PAGE
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
......@@ -182,14 +181,14 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
c_callee = ((struct callee_regs *)childksp) - 1;
/*
* __switch_to() uses thread.ksp to start unwinding stack
* __switch_to() uses thread_info.ksp to start unwinding stack
* For kernel threads we don't need to create callee regs, the
* stack layout nevertheless needs to remain the same.
* Also, since __switch_to anyways unwinds callee regs, we use
* this to populate kernel thread entry-pt/args into callee regs,
* so that ret_from_kernel_thread() becomes simpler.
*/
p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
task_thread_info(p)->ksp = (unsigned long)c_callee; /* THREAD_INFO_KSP */
/* __switch_to expects FP(0), BLINK(return addr) at top */
childksp[0] = 0; /* fp */
......@@ -243,16 +242,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
*/
c_callee->r25 = task_thread_info(p)->thr_ptr;
#ifdef CONFIG_ARC_CURR_IN_REG
/*
* setup usermode thread pointer #2:
* however for this special use of r25 in kernel, __switch_to() sets
* r25 for kernel needs and only in the final return path is usermode
* r25 setup, from pt_regs->user_r25. So set that up as well
*/
c_regs->user_r25 = c_callee->r25;
#endif
return 0;
}
......
......@@ -46,8 +46,7 @@ static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(r0),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(orig_r0),
REG_OFFSET_NAME(event),
REG_OFFSET_NAME(user_r25),
REG_OFFSET_NAME(ecr),
REG_OFFSET_END,
};
......@@ -55,9 +54,8 @@ static const struct pt_regs_offset regoffset_table[] = {
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(orig_r0),
REG_OFFSET_NAME(event),
REG_OFFSET_NAME(ecr),
REG_OFFSET_NAME(bta),
REG_OFFSET_NAME(user_r25),
REG_OFFSET_NAME(r26),
REG_OFFSET_NAME(fp),
REG_OFFSET_NAME(sp),
......@@ -341,7 +339,7 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
asmlinkage int syscall_trace_entry(struct pt_regs *regs)
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (ptrace_report_syscall_entry(regs))
......
This diff is collapsed.
......@@ -53,6 +53,7 @@
#include <linux/sched/task_stack.h>
#include <asm/ucontext.h>
#include <asm/entry.h>
struct rt_sigframe {
struct siginfo info;
......
......@@ -23,9 +23,10 @@
#include <linux/export.h>
#include <linux/of_fdt.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mach_desc.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/processor.h>
#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
......@@ -351,7 +352,7 @@ static inline int __do_IPI(unsigned long msg)
* arch-common ISR to handle for inter-processor interrupts
* Has hooks for platform specific IPI
*/
irqreturn_t do_IPI(int irq, void *dev_id)
static irqreturn_t do_IPI(int irq, void *dev_id)
{
unsigned long pending;
unsigned long __maybe_unused copy;
......
......@@ -29,6 +29,7 @@
#include <asm/arcregs.h>
#include <asm/unwind.h>
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
/*-------------------------------------------------------------------------
......
......@@ -16,6 +16,7 @@
#include <linux/ptrace.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
#include <asm/entry.h>
#include <asm/setup.h>
#include <asm/unaligned.h>
#include <asm/kprobes.h>
......@@ -109,9 +110,7 @@ void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
*/
void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
{
unsigned int param = regs->ecr_param;
switch (param) {
switch (regs->ecr.param) {
case 1:
trap_is_brkpt(address, regs);
break;
......
......@@ -115,8 +115,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
/* For Data fault, this is data address not instruction addr */
address = current->thread.fault_address;
vec = regs->ecr_vec;
cause_code = regs->ecr_cause;
vec = regs->ecr.vec;
cause_code = regs->ecr.cause;
/* For DTLB Miss or ProtV, display the memory involved too */
if (vec == ECR_V_DTLB_MISS) {
......@@ -154,7 +154,7 @@ static void show_ecr_verbose(struct pt_regs *regs)
pr_cont("Misaligned r/w from 0x%08lx\n", address);
#endif
} else if (vec == ECR_V_TRAP) {
if (regs->ecr_param == 5)
if (regs->ecr.param == 5)
pr_cont("gcc generated __builtin_trap\n");
} else {
pr_cont("Check Programmer's Manual\n");
......@@ -184,9 +184,10 @@ void show_regs(struct pt_regs *regs)
if (user_mode(regs))
show_faulting_vma(regs->ret); /* faulting code, not data */
pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\nSTAT: 0x%08lx",
regs->event, current->thread.fault_address, regs->ret,
regs->status32);
pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
regs->ecr.full, current->thread.fault_address, regs->ret);
pr_info("STAT32: 0x%08lx", regs->status32);
#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
......
......@@ -36,12 +36,13 @@
#endif
ENTRY_CFI(memset)
PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
;;; if length < 8
brls.d.nt r2, 8, .Lsmallchunk
mov.f lp_count,r2
......
......@@ -28,6 +28,10 @@ int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
static struct cpuinfo_arc_cache {
unsigned int sz_k, line_len, colors;
} ic_info, dc_info, slc_info;
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op, const int full_page);
......@@ -35,78 +39,24 @@ void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
char *arc_cache_mumbojumbo(int c, char *buf, int len)
{
int n = 0;
struct cpuinfo_arc_cache *p;
#define PR_CACHE(p, cfg, str) \
if (!(p)->line_len) \
n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
else \
n += scnprintf(buf + n, len - n, \
str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
(p)->sz_k, (p)->assoc, (p)->line_len, \
(p)->vipt ? "VIPT" : "PIPT", \
(p)->alias ? " aliasing" : "", \
IS_USED_CFG(cfg));
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
p = &cpuinfo_arc700[c].slc;
if (p->line_len)
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
perip_base,
IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
return buf;
}
/*
* Read the Cache Build Confuration Registers, Decode them and save into
* the cpuinfo structure for later use.
* No Validation done here, simply read/convert the BCRs
*/
static void read_decode_cache_bcr_arcv2(int cpu)
static int read_decode_cache_bcr_arcv2(int c, char *buf, int len)
{
struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
struct cpuinfo_arc_cache *p_slc = &slc_info;
struct bcr_identity ident;
struct bcr_generic sbcr;
struct bcr_slc_cfg {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:24, way:2, lsz:2, sz:4;
#else
unsigned int sz:4, lsz:2, way:2, pad:24;
#endif
} slc_cfg;
struct bcr_clust_cfg {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
#else
unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
#endif
} cbcr;
struct bcr_volatile {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int start:4, limit:4, pad:22, order:1, disable:1;
#else
unsigned int disable:1, order:1, pad:22, limit:4, start:4;
#endif
} vol;
struct bcr_clust_cfg cbcr;
struct bcr_volatile vol;
int n = 0;
READ_BCR(ARC_REG_SLC_BCR, sbcr);
if (sbcr.ver) {
struct bcr_slc_cfg slc_cfg;
READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
p_slc->sz_k = 128 << slc_cfg.sz;
l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable));
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
......@@ -129,70 +79,83 @@ static void read_decode_cache_bcr_arcv2(int cpu)
ioc_enable = 0;
}
READ_BCR(AUX_IDENTITY, ident);
/* HS 2.0 didn't have AUX_VOL */
if (cpuinfo_arc700[cpu].core.family > 0x51) {
if (ident.family > 0x51) {
READ_BCR(AUX_VOL, vol);
perip_base = vol.start << 28;
/* HS 3.0 has limit and strict-ordering fields */
if (cpuinfo_arc700[cpu].core.family > 0x52)
if (ident.family > 0x52)
perip_end = (vol.limit << 28) - 1;
}
n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
perip_base,
IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
return n;
}
void read_decode_cache_bcr(void)
int arc_cache_mumbojumbo(int c, char *buf, int len)
{
struct cpuinfo_arc_cache *p_ic, *p_dc;
unsigned int cpu = smp_processor_id();
struct bcr_cache {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
#else
unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
#endif
} ibcr, dbcr;
struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info;
struct bcr_cache ibcr, dbcr;
int vipt, assoc;
int n = 0;
p_ic = &cpuinfo_arc700[cpu].icache;
READ_BCR(ARC_REG_IC_BCR, ibcr);
if (!ibcr.ver)
goto dc_chk;
if (ibcr.ver <= 3) {
if (is_isa_arcompact() && (ibcr.ver <= 3)) {
BUG_ON(ibcr.config != 3);
p_ic->assoc = 2; /* Fixed to 2w set assoc */
} else if (ibcr.ver >= 4) {
p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
assoc = 2; /* Fixed to 2w set assoc */
} else if (is_isa_arcv2() && (ibcr.ver >= 4)) {
assoc = 1 << ibcr.config; /* 1,2,4,8 */
}
p_ic->line_len = 8 << ibcr.line_len;
p_ic->sz_k = 1 << (ibcr.sz - 1);
p_ic->vipt = 1;
p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
n += scnprintf(buf + n, len - n,
"I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n",
p_ic->sz_k, assoc, p_ic->line_len,
p_ic->colors > 1 ? " aliasing" : "",
IS_USED_CFG(CONFIG_ARC_HAS_ICACHE));
dc_chk:
p_dc = &cpuinfo_arc700[cpu].dcache;
READ_BCR(ARC_REG_DC_BCR, dbcr);
if (!dbcr.ver)
goto slc_chk;
if (dbcr.ver <= 3) {
if (is_isa_arcompact() && (dbcr.ver <= 3)) {
BUG_ON(dbcr.config != 2);
p_dc->assoc = 4; /* Fixed to 4w set assoc */
p_dc->vipt = 1;
p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
} else if (dbcr.ver >= 4) {
p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
p_dc->vipt = 0;
p_dc->alias = 0; /* PIPT so can't VIPT alias */
vipt = 1;
assoc = 4; /* Fixed to 4w set assoc */
p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
} else if (is_isa_arcv2() && (dbcr.ver >= 4)) {
vipt = 0;
assoc = 1 << dbcr.config; /* 1,2,4,8 */
p_dc->colors = 1; /* PIPT so can't VIPT alias */
}
p_dc->line_len = 16 << dbcr.line_len;
p_dc->sz_k = 1 << (dbcr.sz - 1);
n += scnprintf(buf + n, len - n,
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
p_dc->sz_k, assoc, p_dc->line_len,
vipt ? "VIPT" : "PIPT",
p_dc->colors > 1 ? " aliasing" : "",
IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
slc_chk:
if (is_isa_arcv2())
read_decode_cache_bcr_arcv2(cpu);
n += read_decode_cache_bcr_arcv2(c, buf + n, len - n);
return n;
}
/*
......@@ -581,7 +544,7 @@ static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
#endif /* CONFIG_ARC_HAS_ICACHE */
noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
......@@ -644,7 +607,7 @@ noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
#endif
}
noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
......@@ -1082,7 +1045,7 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
* 3. All Caches need to be disabled when setting up IOC to elide any in-flight
* Coherency transactions
*/
noinline void __init arc_ioc_setup(void)
static noinline void __init arc_ioc_setup(void)
{
unsigned int ioc_base, mem_sz;
......@@ -1144,12 +1107,10 @@ noinline void __init arc_ioc_setup(void)
* one core suffices for all
* - IOC setup / dma callbacks only need to be done once
*/
void __init arc_cache_init_master(void)
static noinline void __init arc_cache_init_master(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
struct cpuinfo_arc_cache *ic = &ic_info;
if (!ic->line_len)
panic("cache support enabled but non-existent cache\n");
......@@ -1162,14 +1123,14 @@ void __init arc_cache_init_master(void)
* In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
if (is_isa_arcv2() && ic->alias)
if (is_isa_arcv2() && ic->colors > 1)
_cache_line_loop_ic_fn = __cache_line_loop_v3;
else
_cache_line_loop_ic_fn = __cache_line_loop;
}
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
struct cpuinfo_arc_cache *dc = &dc_info;
if (!dc->line_len)
panic("cache support enabled but non-existent cache\n");
......@@ -1181,14 +1142,13 @@ void __init arc_cache_init_master(void)
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
if (is_isa_arcompact()) {
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
if (dc->alias) {
if (dc->colors > 1) {
if (!handled)
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
if (CACHE_COLORS_NUM != num_colors)
if (CACHE_COLORS_NUM != dc->colors)
panic("CACHE_COLORS_NUM not optimized for config\n");
} else if (!dc->alias && handled) {
} else if (handled && dc->colors == 1) {
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
}
}
......@@ -1231,9 +1191,6 @@ void __init arc_cache_init_master(void)
void __ref arc_cache_init(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
char str[256];
pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
if (!cpu)
arc_cache_init_master();
......
......@@ -22,14 +22,3 @@ int fixup_exception(struct pt_regs *regs)
return 0;
}
#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
unsigned long arc_clear_user_noinline(void __user *to,
unsigned long n)
{
return __arc_clear_user(to, n);
}
EXPORT_SYMBOL(arc_clear_user_noinline);
#endif
......@@ -13,6 +13,7 @@
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/mm_types.h>
#include <asm/entry.h>
#include <asm/mmu.h>
/*
......@@ -99,10 +100,10 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
if (faulthandler_disabled() || !mm)
goto no_context;
if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
write = 1;
else if ((regs->ecr_vec == ECR_V_PROTV) &&
(regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
else if ((regs->ecr.vec == ECR_V_PROTV) &&
(regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
exec = 1;
flags = FAULT_FLAG_DEFAULT;
......
......@@ -15,6 +15,7 @@
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/arcregs.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
......
......@@ -18,7 +18,9 @@
/* A copy of the ASID from the PID reg is kept in asid_cache */
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
static int __read_mostly pae_exists;
static struct cpuinfo_arc_mmu {
unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
} mmuinfo;
/*
* Utility Routine to erase a J-TLB entry
......@@ -131,7 +133,7 @@ static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
noinline void local_flush_tlb_all(void)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
unsigned int entry;
int num_tlb = mmu->sets * mmu->ways;
......@@ -389,7 +391,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
/*
* Routine to create a TLB entry
*/
void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
{
unsigned long flags;
unsigned int asid_or_sasid, rwx;
......@@ -564,89 +566,64 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
*/
void read_decode_mmu_bcr(void)
int arc_mmu_mumbojumbo(int c, char *buf, int len)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
unsigned int tmp;
struct bcr_mmu_3 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
u_itlb:4, u_dtlb:4;
#else
unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
ways:4, ver:8;
#endif
} *mmu3;
struct bcr_mmu_4 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
#else
/* DTLB ITLB JES JE JA */
unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
#endif
} *mmu4;
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned int bcr, u_dtlb, u_itlb, sasid;
struct bcr_mmu_3 *mmu3;
struct bcr_mmu_4 *mmu4;
char super_pg[64] = "";
int n = 0;
tmp = read_aux_reg(ARC_REG_MMU_BCR);
mmu->ver = (tmp >> 24);
bcr = read_aux_reg(ARC_REG_MMU_BCR);
mmu->ver = (bcr >> 24);
if (is_isa_arcompact() && mmu->ver == 3) {
mmu3 = (struct bcr_mmu_3 *)&tmp;
mmu3 = (struct bcr_mmu_3 *)&bcr;
mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
mmu->sets = 1 << mmu3->sets;
mmu->ways = 1 << mmu3->ways;
mmu->u_dtlb = mmu3->u_dtlb;
mmu->u_itlb = mmu3->u_itlb;
mmu->sasid = mmu3->sasid;
u_dtlb = mmu3->u_dtlb;
u_itlb = mmu3->u_itlb;
sasid = mmu3->sasid;
} else {
mmu4 = (struct bcr_mmu_4 *)&tmp;
mmu4 = (struct bcr_mmu_4 *)&bcr;
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
mmu->sets = 64 << mmu4->n_entry;
mmu->ways = mmu4->n_ways * 2;
mmu->u_dtlb = mmu4->u_dtlb * 4;
mmu->u_itlb = mmu4->u_itlb * 4;
mmu->sasid = mmu4->sasid;
pae_exists = mmu->pae = mmu4->pae;
u_dtlb = mmu4->u_dtlb * 4;
u_itlb = mmu4->u_itlb * 4;
sasid = mmu4->sasid;
mmu->pae = mmu4->pae;
}
}
char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
{
int n = 0;
struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
char super_pg[64] = "";
if (p_mmu->s_pg_sz_m)
scnprintf(super_pg, 64, "%dM Super Page %s",
p_mmu->s_pg_sz_m,
IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
if (mmu->s_pg_sz_m)
scnprintf(super_pg, 64, "/%dM%s",
mmu->s_pg_sz_m,
IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
n += scnprintf(buf + n, len - n,
"MMU [v%x]\t: %dk PAGE, %s, swalk %d lvl, JTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
p_mmu->ver, p_mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb,
IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
return buf;
"MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
mmu->sets, mmu->ways,
u_dtlb, u_itlb,
IS_AVAIL1(sasid, ", SASID"),
IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
return n;
}
int pae40_exist_but_not_enab(void)
{
return pae_exists && !is_pae40_enabled();
return mmuinfo.pae && !is_pae40_enabled();
}
void arc_mmu_init(void)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
char str[256];
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
int compat = 0;
pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
/*
* Can't be done in processor.h due to header include dependencies
*/
......@@ -723,7 +700,7 @@ volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
int set, n_ways = mmu->ways;
......
......@@ -6,7 +6,6 @@
*/
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/libfdt.h>
#include <asm/asm-offsets.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment