Commit 45e15c1a authored by Guo Ren's avatar Guo Ren

csky: Add qspinlock support

Enable qspinlock by the requirements mentioned in a8ad07e5
("asm-generic: qspinlock: Indicate the use of mixed-size atomics").

C-SKY only has "ldex/stex" for all atomic operations. So csky give a
strong forward guarantee for "ldex/stex." That means when ldex grabbed
the cache line into $L1, it would block other cores from snooping the
address with several cycles. The atomic_fetch_add & xchg16 has the same
forward guarantee level in C-SKY.

Qspinlock has better code size and performance in a fast path.
Signed-off-by: default avatarGuo Ren <guoren@linux.alibaba.com>
Signed-off-by: default avatarGuo Ren <guoren@kernel.org>
parent 4e8bb4ba
...@@ -8,6 +8,7 @@ config CSKY ...@@ -8,6 +8,7 @@ config CSKY
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select COMMON_CLK select COMMON_CLK
......
...@@ -3,10 +3,10 @@ generic-y += asm-offsets.h ...@@ -3,10 +3,10 @@ generic-y += asm-offsets.h
generic-y += extable.h generic-y += extable.h
generic-y += gpio.h generic-y += gpio.h
generic-y += kvm_para.h generic-y += kvm_para.h
generic-y += spinlock.h generic-y += mcs_spinlock.h
generic-y += spinlock_types.h
generic-y += qrwlock.h generic-y += qrwlock.h
generic-y += qrwlock_types.h generic-y += qrwlock_types.h
generic-y += qspinlock.h
generic-y += parport.h generic-y += parport.h
generic-y += user.h generic-y += user.h
generic-y += vmlinux.lds.h generic-y += vmlinux.lds.h
...@@ -15,6 +15,26 @@ extern void __bad_xchg(void); ...@@ -15,6 +15,26 @@ extern void __bad_xchg(void);
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
unsigned long tmp; \ unsigned long tmp; \
switch (size) { \ switch (size) { \
case 2: { \
u32 ret; \
u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \
u32 mask = 0xffff << shif; \
__ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \
__asm__ __volatile__ ( \
"1: ldex.w %0, (%4)\n" \
" and %1, %0, %2\n" \
" or %1, %1, %3\n" \
" stex.w %1, (%4)\n" \
" bez %1, 1b\n" \
: "=&r" (ret), "=&r" (tmp) \
: "r" (~mask), \
"r" ((u32)__new << shif), \
"r" (__ptr) \
: "memory"); \
__ret = (__typeof__(*(ptr))) \
((ret & mask) >> shif); \
break; \
} \
case 4: \ case 4: \
asm volatile ( \ asm volatile ( \
"1: ldex.w %0, (%3) \n" \ "1: ldex.w %0, (%3) \n" \
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CSKY_SPINLOCK_H
#define __ASM_CSKY_SPINLOCK_H
#include <asm/qspinlock.h>
#include <asm/qrwlock.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
#endif /* __ASM_CSKY_SPINLOCK_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CSKY_SPINLOCK_TYPES_H
#define __ASM_CSKY_SPINLOCK_TYPES_H
#include <asm-generic/qspinlock_types.h>
#include <asm-generic/qrwlock_types.h>
#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment