Commit 41d65207 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/atomics: Use immediate operand when possible

Today we get the following code generation for atomic operations:

	c001bb2c:	39 20 00 01 	li      r9,1
	c001bb30:	7d 40 18 28 	lwarx   r10,0,r3
	c001bb34:	7d 09 50 50 	subf    r8,r9,r10
	c001bb38:	7d 00 19 2d 	stwcx.  r8,0,r3

	c001c7a8:	39 40 00 01 	li      r10,1
	c001c7ac:	7d 00 18 28 	lwarx   r8,0,r3
	c001c7b0:	7c ea 42 14 	add     r7,r10,r8
	c001c7b4:	7c e0 19 2d 	stwcx.  r7,0,r3

By allowing GCC to choose between immediate or regular operation,
we get:

	c001bb2c:	7d 20 18 28 	lwarx   r9,0,r3
	c001bb30:	39 49 ff ff 	addi    r10,r9,-1
	c001bb34:	7d 40 19 2d 	stwcx.  r10,0,r3
	--
	c001c7a4:	7d 40 18 28 	lwarx   r10,0,r3
	c001c7a8:	39 0a 00 01 	addi    r8,r10,1
	c001c7ac:	7d 00 19 2d 	stwcx.  r8,0,r3

For "and", the dot form has to be used because "andi" doesn't exist.

For logical operations we use unsigned 16 bits immediate.
For arithmetic operations we use signed 16 bits immediate.

On pmac32_defconfig, it reduces the text by approx another 8 kbytes.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Acked-by: default avatarSegher Boessenkool <segher@kernel.crashing.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/2ec558d44db8045752fe9dbd29c9ba84bab6030b.1632236981.git.christophe.leroy@csgroup.eu
parent fb350784
......@@ -37,62 +37,62 @@ static __inline__ void arch_atomic_set(atomic_t *v, int i)
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
#define ATOMIC_OP(op, asm_op) \
#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_" #op "\n" \
#asm_op " %0,%2,%0\n" \
#asm_op "%I2" suffix " %0,%0,%2\n" \
" stwcx. %0,0,%3 \n" \
" bne- 1b\n" \
: "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
: "cc"); \
: "r"#sign (a), "r" (&v->counter) \
: "cc", ##__VA_ARGS__); \
} \
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
#asm_op " %0,%2,%0\n" \
#asm_op "%I2" suffix " %0,%0,%2\n" \
" stwcx. %0,0,%3\n" \
" bne- 1b\n" \
: "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
: "cc"); \
: "r"#sign (a), "r" (&v->counter) \
: "cc", ##__VA_ARGS__); \
\
return t; \
}
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
{ \
int res, t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
#asm_op " %1,%3,%0\n" \
#asm_op "%I3" suffix " %1,%0,%3\n" \
" stwcx. %1,0,%4\n" \
" bne- 1b\n" \
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
: "cc"); \
: "r"#sign (a), "r" (&v->counter) \
: "cc", ##__VA_ARGS__); \
\
return res; \
}
#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
ATOMIC_FETCH_OP_RELAXED(op, asm_op)
#define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
ATOMIC_OPS(add, add, "c", I, "xer")
ATOMIC_OPS(sub, sub, "c", I, "xer")
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
......@@ -101,13 +101,13 @@ ATOMIC_OPS(sub, subf)
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
ATOMIC_FETCH_OP_RELAXED(op, asm_op)
#define ATOMIC_OPS(op, asm_op, suffix, sign) \
ATOMIC_OP(op, asm_op, suffix, sign) \
ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor)
ATOMIC_OPS(and, and, ".", K)
ATOMIC_OPS(or, or, "", K)
ATOMIC_OPS(xor, xor, "", K)
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
......@@ -241,15 +241,15 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
cmpw 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
add%I2c %0,%0,%2 \n"
" stwcx. %0,0,%1 \n\
bne- 1b \n"
PPC_ATOMIC_EXIT_BARRIER
" subf %0,%2,%0 \n\
" sub%I2c %0,%0,%2 \n\
2:"
: "=&r" (t)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
: "r" (&v->counter), "rI" (a), "r" (u)
: "cc", "memory", "xer");
return t;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment