Commit 13f62e84 authored by Heiko Carstens's avatar Heiko Carstens

s390/cmpxchg: use symbolic names for inline assembly operands

Make cmpxchg() inline assemblies more readable by using symbolic names
for operands.

Link: https://lore.kernel.org/r/Y2J7yzQYt/bjLQXY@osirisSigned-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent 247f34f7
...@@ -96,56 +96,64 @@ static __always_inline unsigned long __cmpxchg(unsigned long address, ...@@ -96,56 +96,64 @@ static __always_inline unsigned long __cmpxchg(unsigned long address,
shift = (3 ^ (address & 3)) << 3; shift = (3 ^ (address & 3)) << 3;
address ^= address & 3; address ^= address & 3;
asm volatile( asm volatile(
" l %0,%2\n" " l %[prev],%[address]\n"
"0: nr %0,%5\n" "0: nr %[prev],%[mask]\n"
" lr %1,%0\n" " lr %[tmp],%[prev]\n"
" or %0,%3\n" " or %[prev],%[old]\n"
" or %1,%4\n" " or %[tmp],%[new]\n"
" cs %0,%1,%2\n" " cs %[prev],%[tmp],%[address]\n"
" jnl 1f\n" " jnl 1f\n"
" xr %1,%0\n" " xr %[tmp],%[prev]\n"
" nr %1,%5\n" " nr %[tmp],%[mask]\n"
" jnz 0b\n" " jnz 0b\n"
"1:" "1:"
: "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address) : [prev] "=&d" (prev),
: "d" ((old & 0xff) << shift), [tmp] "=&d" (tmp),
"d" ((new & 0xff) << shift), [address] "+Q" (*(int *)address)
"d" (~(0xff << shift)) : [old] "d" ((old & 0xff) << shift),
[new] "d" ((new & 0xff) << shift),
[mask] "d" (~(0xff << shift))
: "memory", "cc"); : "memory", "cc");
return prev >> shift; return prev >> shift;
case 2: case 2:
shift = (2 ^ (address & 2)) << 3; shift = (2 ^ (address & 2)) << 3;
address ^= address & 2; address ^= address & 2;
asm volatile( asm volatile(
" l %0,%2\n" " l %[prev],%[address]\n"
"0: nr %0,%5\n" "0: nr %[prev],%[mask]\n"
" lr %1,%0\n" " lr %[tmp],%[prev]\n"
" or %0,%3\n" " or %[prev],%[old]\n"
" or %1,%4\n" " or %[tmp],%[new]\n"
" cs %0,%1,%2\n" " cs %[prev],%[tmp],%[address]\n"
" jnl 1f\n" " jnl 1f\n"
" xr %1,%0\n" " xr %[tmp],%[prev]\n"
" nr %1,%5\n" " nr %[tmp],%[mask]\n"
" jnz 0b\n" " jnz 0b\n"
"1:" "1:"
: "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address) : [prev] "=&d" (prev),
: "d" ((old & 0xffff) << shift), [tmp] "=&d" (tmp),
"d" ((new & 0xffff) << shift), [address] "+Q" (*(int *)address)
"d" (~(0xffff << shift)) : [old] "d" ((old & 0xffff) << shift),
[new] "d" ((new & 0xffff) << shift),
[mask] "d" (~(0xffff << shift))
: "memory", "cc"); : "memory", "cc");
return prev >> shift; return prev >> shift;
case 4: case 4:
asm volatile( asm volatile(
" cs %0,%3,%1\n" " cs %[prev],%[new],%[address]\n"
: "=&d" (prev), "+Q" (*(int *) address) : [prev] "=&d" (prev),
: "0" (old), "d" (new) [address] "+Q" (*(int *)address)
: "0" (old),
[new] "d" (new)
: "memory", "cc"); : "memory", "cc");
return prev; return prev;
case 8: case 8:
asm volatile( asm volatile(
" csg %0,%3,%1\n" " csg %[prev],%[new],%[address]\n"
: "=&d" (prev), "+QS" (*(long *) address) : [prev] "=&d" (prev),
: "0" (old), "d" (new) [address] "+QS" (*(long *)address)
: "0" (old),
[new] "d" (new)
: "memory", "cc"); : "memory", "cc");
return prev; return prev;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment