Commit 52e7d46c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-4.7-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC fixes from Vineet Gupta:

 - Revert of ll-sc backoff retry workaround in atomics/spinlocks as
   hardware is now proven to work just fine

 - Typo fixes (Thanks Andrea Gelmini)

 - Removal of obsolete DT property (Alexey)

 - Other minor fixes

* tag 'arc-4.7-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  Revert "ARCv2: spinlock/rwlock/atomics: Delayed retry of failed SCOND with exponential backoff"
  Revert "ARCv2: spinlock/rwlock: Reset retry delay when starting a new spin-wait cycle"
  Revert "ARCv2: spinlock/rwlock/atomics: reduce 1 instruction in exponential backoff"
  ARC: don't enable DISCONTIGMEM unconditionally
  ARC: [intc-compact] simplify code for 2 priority levels
  arc: Get rid of root core-frequency property
  Fix typos
parents c8ae067f ed6aefed
...@@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK ...@@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
def_bool y def_bool y
config ARCH_DISCONTIGMEM_ENABLE config ARCH_DISCONTIGMEM_ENABLE
def_bool y def_bool n
config ARCH_FLATMEM_ENABLE config ARCH_FLATMEM_ENABLE
def_bool y def_bool y
...@@ -186,9 +186,6 @@ if SMP ...@@ -186,9 +186,6 @@ if SMP
config ARC_HAS_COH_CACHES config ARC_HAS_COH_CACHES
def_bool n def_bool n
config ARC_HAS_REENTRANT_IRQ_LV2
def_bool n
config ARC_MCIP config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support " bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2 depends on ISA_ARCV2
...@@ -366,25 +363,10 @@ config NODES_SHIFT ...@@ -366,25 +363,10 @@ config NODES_SHIFT
if ISA_ARCOMPACT if ISA_ARCOMPACT
config ARC_COMPACT_IRQ_LEVELS config ARC_COMPACT_IRQ_LEVELS
bool "ARCompact IRQ Priorities: High(2)/Low(1)" bool "Setup Timer IRQ as high Priority"
default n default n
# Timer HAS to be high priority, for any other high priority config
select ARC_IRQ3_LV2
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2 depends on !SMP
if ARC_COMPACT_IRQ_LEVELS
config ARC_IRQ3_LV2
bool
config ARC_IRQ5_LV2
bool
config ARC_IRQ6_LV2
bool
endif #ARC_COMPACT_IRQ_LEVELS
config ARC_FPU_SAVE_RESTORE config ARC_FPU_SAVE_RESTORE
bool "Enable FPU state persistence across context switch" bool "Enable FPU state persistence across context switch"
...@@ -407,11 +389,6 @@ config ARC_HAS_LLSC ...@@ -407,11 +389,6 @@ config ARC_HAS_LLSC
default y default y
depends on !ARC_CANT_LLSC depends on !ARC_CANT_LLSC
config ARC_STAR_9000923308
bool "Workaround for llock/scond livelock"
default n
depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
config ARC_HAS_SWAPE config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)" bool "Insn: SWAPE (endian-swap)"
default y default y
...@@ -471,7 +448,7 @@ config LINUX_LINK_BASE ...@@ -471,7 +448,7 @@ config LINUX_LINK_BASE
config HIGHMEM config HIGHMEM
bool "High Memory Support" bool "High Memory Support"
select DISCONTIGMEM select ARCH_DISCONTIGMEM_ENABLE
help help
With ARC 2G:2G address split, only upper 2G is directly addressable by With ARC 2G:2G address split, only upper 2G is directly addressable by
kernel. Enable this to potentially allow access to rest of 2G and PAE kernel. Enable this to potentially allow access to rest of 2G and PAE
......
...@@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC) ...@@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot boot := arch/arc/boot
#default target for make without any arguements. #default target for make without any arguments.
KBUILD_IMAGE := bootpImage KBUILD_IMAGE := bootpImage
all: $(KBUILD_IMAGE) all: $(KBUILD_IMAGE)
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
/ { / {
clock-frequency = <500000000>; /* 500 MHZ */
soc100 { soc100 {
bus-frequency = <166666666>; bus-frequency = <166666666>;
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
/ { / {
clock-frequency = <500000000>; /* 500 MHZ */
soc100 { soc100 {
bus-frequency = <166666666>; bus-frequency = <166666666>;
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
/ { / {
compatible = "snps,arc"; compatible = "snps,arc";
clock-frequency = <750000000>; /* 750 MHZ */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
/ { / {
compatible = "snps,arc"; compatible = "snps,arc";
clock-frequency = <90000000>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
/ { / {
compatible = "snps,arc"; compatible = "snps,arc";
clock-frequency = <90000000>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
/ { / {
compatible = "ezchip,arc-nps"; compatible = "ezchip,arc-nps";
clock-frequency = <83333333>; /* 83.333333 MHZ */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
interrupt-parent = <&intc>; interrupt-parent = <&intc>;
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
/ { / {
compatible = "snps,nsim"; compatible = "snps,nsim";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
interrupt-parent = <&core_intc>; interrupt-parent = <&core_intc>;
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
/ { / {
compatible = "snps,nsimosci"; compatible = "snps,nsimosci";
clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
interrupt-parent = <&core_intc>; interrupt-parent = <&core_intc>;
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
/ { / {
compatible = "snps,nsimosci_hs"; compatible = "snps,nsimosci_hs";
clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
interrupt-parent = <&core_intc>; interrupt-parent = <&core_intc>;
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
/ { / {
compatible = "snps,nsimosci_hs"; compatible = "snps,nsimosci_hs";
clock-frequency = <5000000>; /* 5 MHZ */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
interrupt-parent = <&core_intc>; interrupt-parent = <&core_intc>;
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
/ { / {
compatible = "snps,arc"; compatible = "snps,arc";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
chosen { }; chosen { };
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
/ { / {
compatible = "snps,arc"; compatible = "snps,arc";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
chosen { }; chosen { };
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
/ { / {
compatible = "snps,arc"; compatible = "snps,arc";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
chosen { }; chosen { };
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
/ { / {
compatible = "snps,arc"; compatible = "snps,arc";
clock-frequency = <50000000>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
/ { / {
compatible = "snps,arc"; compatible = "snps,arc";
clock-frequency = <50000000>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
......
...@@ -25,50 +25,17 @@ ...@@ -25,50 +25,17 @@
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#ifdef CONFIG_ARC_STAR_9000923308
#define SCOND_FAIL_RETRY_VAR_DEF \
unsigned int delay = 1, tmp; \
#define SCOND_FAIL_RETRY_ASM \
" bz 4f \n" \
" ; --- scond fail delay --- \n" \
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
" b 1b \n" /* start over */ \
"4: ; --- success --- \n" \
#define SCOND_FAIL_RETRY_VARS \
,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
#else /* !CONFIG_ARC_STAR_9000923308 */
#define SCOND_FAIL_RETRY_VAR_DEF
#define SCOND_FAIL_RETRY_ASM \
" bnz 1b \n" \
#define SCOND_FAIL_RETRY_VARS
#endif
#define ATOMIC_OP(op, c_op, asm_op) \ #define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned int val; \ unsigned int val; \
SCOND_FAIL_RETRY_VAR_DEF \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: llock %[val], [%[ctr]] \n" \ "1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \ " scond %[val], [%[ctr]] \n" \
" \n" \ " bnz 1b \n" \
SCOND_FAIL_RETRY_ASM \
\
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \ [i] "ir" (i) \
: "cc"); \ : "cc"); \
...@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ ...@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned int val; \ unsigned int val; \
SCOND_FAIL_RETRY_VAR_DEF \
\ \
/* \ /* \
* Explicit full memory barrier needed before/after as \ * Explicit full memory barrier needed before/after as \
...@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
"1: llock %[val], [%[ctr]] \n" \ "1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \ " scond %[val], [%[ctr]] \n" \
" \n" \ " bnz 1b \n" \
SCOND_FAIL_RETRY_ASM \
\
: [val] "=&r" (val) \ : [val] "=&r" (val) \
SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), \ : [ctr] "r" (&v->counter), \
[i] "ir" (i) \ [i] "ir" (i) \
: "cc"); \ : "cc"); \
......
...@@ -76,8 +76,8 @@ ...@@ -76,8 +76,8 @@
* We need to be a bit more cautious here. What if a kernel bug in * We need to be a bit more cautious here. What if a kernel bug in
* L1 ISR, caused SP to go whaco (some small value which looks like * L1 ISR, caused SP to go whaco (some small value which looks like
* USER stk) and then we take L2 ISR. * USER stk) and then we take L2 ISR.
* Above brlo alone would treat it as a valid L1-L2 sceanrio * Above brlo alone would treat it as a valid L1-L2 scenario
* instead of shouting alound * instead of shouting around
* The only feasible way is to make sure this L2 happened in * The only feasible way is to make sure this L2 happened in
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
* L1 ISR before it switches stack * L1 ISR before it switches stack
......
...@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) ...@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
local_flush_tlb_all(); local_flush_tlb_all();
/* /*
* Above checke for rollover of 8 bit ASID in 32 bit container. * Above check for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero * If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context * "generation" to distinguish from no context
*/ */
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
* Page Tables are purely for Linux VM's consumption and the bits below are * Page Tables are purely for Linux VM's consumption and the bits below are
* suited to that (uniqueness). Hence some are not implemented in the TLB and * suited to that (uniqueness). Hence some are not implemented in the TLB and
* some have different value in TLB. * some have different value in TLB.
* e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
* seperate PD0 and PD1, which combined forms a translation entry) * seperate PD0 and PD1, which combined forms a translation entry)
* while for PTE perspective, they are 8 and 9 respectively * while for PTE perspective, they are 8 and 9 respectively
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
......
...@@ -78,7 +78,7 @@ struct task_struct; ...@@ -78,7 +78,7 @@ struct task_struct;
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/* /*
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout * Look in process.c for details of kernel stack layout
*/ */
#define TSK_K_ESP(tsk) (tsk->thread.ksp) #define TSK_K_ESP(tsk) (tsk->thread.ksp)
......
...@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void) ...@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* (1) These insn were introduced only in 4.10 release. So for older released * (1) These insn were introduced only in 4.10 release. So for older released
* support needed. * support needed.
* *
* (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles). * gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity. * disabling for atomicity.
......
...@@ -20,11 +20,6 @@ ...@@ -20,11 +20,6 @@
#ifdef CONFIG_ARC_HAS_LLSC #ifdef CONFIG_ARC_HAS_LLSC
/*
* A normal LLOCK/SCOND based system, w/o need for livelock workaround
*/
#ifndef CONFIG_ARC_STAR_9000923308
static inline void arch_spin_lock(arch_spinlock_t *lock) static inline void arch_spin_lock(arch_spinlock_t *lock)
{ {
unsigned int val; unsigned int val;
...@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) ...@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
smp_mb(); smp_mb();
} }
#else /* CONFIG_ARC_STAR_9000923308 */
/*
* HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
* coherency transactions in the SCU. The exclusive line state keeps rotating
* among contenting cores leading to a never ending cycle. So break the cycle
* by deferring the retry of failed exclusive access (SCOND). The actual delay
* needed is function of number of contending cores as well as the unrelated
* coherency traffic from other cores. To keep the code simple, start off with
* small delay of 1 which would suffice most cases and in case of contention
* double the delay. Eventually the delay is sufficient such that the coherency
* pipeline is drained, thus a subsequent exclusive access would succeed.
*/
#define SCOND_FAIL_RETRY_VAR_DEF \
unsigned int delay, tmp; \
#define SCOND_FAIL_RETRY_ASM \
" ; --- scond fail delay --- \n" \
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
" b 1b \n" /* start over */ \
" \n" \
"4: ; --- done --- \n" \
#define SCOND_FAIL_RETRY_VARS \
,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz 4f \n" /* done */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
smp_mb();
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*
* if (rw->counter > 0) {
* rw->counter--;
* ret = 1;
* }
*/
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
" sub %[val], %[val], 1 \n" /* reader lock */
" scond %[val], [%[rwlock]] \n"
" bz 4f \n" /* done */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
" sub %[val], %[val], 1 \n" /* counter-- */
" scond %[val], [%[rwlock]] \n"
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
* rw->counter = 0;
* ret = 1;
* }
*/
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bz 4f \n"
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter++;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" add %[val], %[val], 1 \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter))
: "memory", "cc");
smp_mb();
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" scond %[UNLOCKED], [%[rwlock]]\n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
: "memory", "cc");
smp_mb();
}
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
#endif /* CONFIG_ARC_STAR_9000923308 */
#else /* !CONFIG_ARC_HAS_LLSC */ #else /* !CONFIG_ARC_HAS_LLSC */
static inline void arch_spin_lock(arch_spinlock_t *lock) static inline void arch_spin_lock(arch_spinlock_t *lock)
......
...@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) ...@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
/* /*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
* SYSCALL_TRACE is anways seperately/unconditionally tested right after a * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
* syscall, so all that reamins to be tested is _TIF_WORK_MASK * syscall, so all that reamins to be tested is _TIF_WORK_MASK
*/ */
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/* /*
* Algorthmically, for __user_ok() we want do: * Algorithmically, for __user_ok() we want do:
* (start < TASK_SIZE) && (start+len < TASK_SIZE) * (start < TASK_SIZE) && (start+len < TASK_SIZE)
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
* emitted directly in code. * emitted directly in code.
......
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
__tmp ^ __in; \ __tmp ^ __in; \
}) })
#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */ #elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
#define __arch_swab32(x) \ #define __arch_swab32(x) \
({ \ ({ \
......
...@@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1) ...@@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1)
VECTOR instr_service ; 0x10, Instrn Error (0x2) VECTOR instr_service ; 0x10, Instrn Error (0x2)
; ******************** Device ISRs ********************** ; ******************** Device ISRs **********************
#ifdef CONFIG_ARC_IRQ3_LV2 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
VECTOR handle_interrupt_level1
#ifdef CONFIG_ARC_IRQ5_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
#ifdef CONFIG_ARC_IRQ6_LV2
VECTOR handle_interrupt_level2 VECTOR handle_interrupt_level2
#else #else
VECTOR handle_interrupt_level1 VECTOR handle_interrupt_level1
#endif #endif
.rept 25 .rept 28
VECTOR handle_interrupt_level1 ; Other devices VECTOR handle_interrupt_level1 ; Other devices
.endr .endr
......
...@@ -28,10 +28,8 @@ void arc_init_IRQ(void) ...@@ -28,10 +28,8 @@ void arc_init_IRQ(void)
{ {
int level_mask = 0; int level_mask = 0;
/* setup any high priority Interrupts (Level2 in ARCompact jargon) */ /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
/* /*
* Write to register, even if no LV2 IRQs configured to reset it * Write to register, even if no LV2 IRQs configured to reset it
......
...@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event, ...@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
int64_t delta = new_raw_count - prev_raw_count; int64_t delta = new_raw_count - prev_raw_count;
/* /*
* We don't afaraid of hwc->prev_count changing beneath our feet * We aren't afraid of hwc->prev_count changing beneath our feet
* because there's no way for us to re-enter this function anytime. * because there's no way for us to re-enter this function anytime.
*/ */
local64_set(&hwc->prev_count, new_raw_count); local64_set(&hwc->prev_count, new_raw_count);
......
...@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
/* /*
* If we are here, it is established that @uboot_arg didn't * If we are here, it is established that @uboot_arg didn't
* point to DT blob. Instead if u-boot says it is cmdline, * point to DT blob. Instead if u-boot says it is cmdline,
* Appent to embedded DT cmdline. * append to embedded DT cmdline.
* setup_machine_fdt() would have populated @boot_command_line * setup_machine_fdt() would have populated @boot_command_line
*/ */
if (uboot_tag == 1) { if (uboot_tag == 1) {
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* -ViXS were still seeing crashes when using insmod to load drivers. * -ViXS were still seeing crashes when using insmod to load drivers.
* It turned out that the code to change Execute permssions for TLB entries * It turned out that the code to change Execute permssions for TLB entries
* of user was not guarded for interrupts (mod_tlb_permission) * of user was not guarded for interrupts (mod_tlb_permission)
* This was cauing TLB entries to be overwritten on unrelated indexes * This was causing TLB entries to be overwritten on unrelated indexes
* *
* Vineetg: July 15th 2008: Bug #94183 * Vineetg: July 15th 2008: Bug #94183
* -Exception happens in Delay slot of a JMP, and before user space resumes, * -Exception happens in Delay slot of a JMP, and before user space resumes,
......
...@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file) ...@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
return 0; return 0;
} }
/* called on user read(): display the couters */ /* called on user read(): display the counters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */ char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */ size_t len, /* length of buffer */
......
...@@ -215,7 +215,7 @@ void read_decode_cache_bcr(void) ...@@ -215,7 +215,7 @@ void read_decode_cache_bcr(void)
* ------------------ * ------------------
* This ver of MMU supports variable page sizes (1k-16k): although Linux will * This ver of MMU supports variable page sizes (1k-16k): although Linux will
* only support 8k (default), 16k and 4k. * only support 8k (default), 16k and 4k.
* However from hardware perspective, smaller page sizes aggrevate aliasing * However from hardware perspective, smaller page sizes aggravate aliasing
* meaning more vaddr bits needed to disambiguate the cache-line-op ; * meaning more vaddr bits needed to disambiguate the cache-line-op ;
* the existing scheme of piggybacking won't work for certain configurations. * the existing scheme of piggybacking won't work for certain configurations.
* Two new registers IC_PTAG and DC_PTAG inttoduced. * Two new registers IC_PTAG and DC_PTAG inttoduced.
...@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, ...@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
/* /*
* This is technically for MMU v4, using the MMU v3 programming model * This is technically for MMU v4, using the MMU v3 programming model
* Special work for HS38 aliasing I-cache configuratino with PAE40 * Special work for HS38 aliasing I-cache configuration with PAE40
* - upper 8 bits of paddr need to be written into PTAG_HI * - upper 8 bits of paddr need to be written into PTAG_HI
* - (and needs to be written before the lower 32 bits) * - (and needs to be written before the lower 32 bits)
* Note that PTAG_HI is hoisted outside the line loop * Note that PTAG_HI is hoisted outside the line loop
...@@ -936,7 +936,7 @@ void arc_cache_init(void) ...@@ -936,7 +936,7 @@ void arc_cache_init(void)
ic->ver, CONFIG_ARC_MMU_VER); ic->ver, CONFIG_ARC_MMU_VER);
/* /*
* In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3 * pair to provide vaddr/paddr respectively, just as in MMU v3
*/ */
if (is_isa_arcv2() && ic->alias) if (is_isa_arcv2() && ic->alias)
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* DMA Coherent API Notes * DMA Coherent API Notes
* *
* I/O is inherently non-coherent on ARC. So a coherent DMA buffer is * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
* implemented by accessintg it using a kernel virtual address, with * implemented by accessing it using a kernel virtual address, with
* Cache bit off in the TLB entry. * Cache bit off in the TLB entry.
* *
* The default DMA address == Phy address which is 0x8000_0000 based. * The default DMA address == Phy address which is 0x8000_0000 based.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment