Commit e4f06bee authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] s390: inline assembly constraints.

From: Martin Schwidefsky <schwidefsky@de.ibm.com>

s390 inline assembly bug-fixes:

- Add memory barriers to spinlocks, atomic variable functions and to
  test_and_{set,clear,change}_bit.

- Add "=m" and "m" contraints to tell gcc that the content of a variable
  is in fact used.

- Replace "+m" constraints by "=m" on the output and "m" on the input list.

- Use c-implemtation for ffz and __ffs.

- Use generic c-implemtation for ffs and fls.
parent 7e928143
#ifndef __ARCH_S390_ATOMIC__ #ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__ #define __ARCH_S390_ATOMIC__
#ifdef __KERNEL__
/* /*
* include/asm-s390/atomic.h * include/asm-s390/atomic.h
* *
...@@ -27,6 +26,8 @@ typedef struct { ...@@ -27,6 +26,8 @@ typedef struct {
} __attribute__ ((aligned (4))) atomic_t; } __attribute__ ((aligned (4))) atomic_t;
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
#define __CS_LOOP(ptr, op_val, op_string) ({ \ #define __CS_LOOP(ptr, op_val, op_string) ({ \
typeof(ptr->counter) old_val, new_val; \ typeof(ptr->counter) old_val, new_val; \
__asm__ __volatile__(" l %0,0(%3)\n" \ __asm__ __volatile__(" l %0,0(%3)\n" \
...@@ -35,8 +36,10 @@ typedef struct { ...@@ -35,8 +36,10 @@ typedef struct {
" cs %0,%1,0(%3)\n" \ " cs %0,%1,0(%3)\n" \
" jl 0b" \ " jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \ : "=&d" (old_val), "=&d" (new_val), \
"+m" (((atomic_t *)(ptr))->counter) \ "=m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val) : "cc" ); \ : "a" (ptr), "d" (op_val), \
"m" (((atomic_t *)(ptr))->counter) \
: "cc", "memory" ); \
new_val; \ new_val; \
}) })
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
...@@ -106,8 +109,10 @@ typedef struct { ...@@ -106,8 +109,10 @@ typedef struct {
" csg %0,%1,0(%3)\n" \ " csg %0,%1,0(%3)\n" \
" jl 0b" \ " jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \ : "=&d" (old_val), "=&d" (new_val), \
"+m" (((atomic_t *)(ptr))->counter) \ "=m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val) : "cc" ); \ : "a" (ptr), "d" (op_val), \
"m" (((atomic_t *)(ptr))->counter) \
: "cc", "memory" ); \
new_val; \ new_val; \
}) })
#define atomic64_read(v) ((v)->counter) #define atomic64_read(v) ((v)->counter)
...@@ -182,9 +187,9 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) ...@@ -182,9 +187,9 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
"0:" "0:"
: "=&d" (retval), "+m" (v->counter) : "=&d" (retval), "=m" (v->counter)
: "a" (v), "d" (expected_oldval) , "d" (new_val) : "a" (v), "d" (expected_oldval) , "d" (new_val),
: "cc" ); "m" (v->counter) : "cc", "memory" );
return retval; return retval;
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
* *
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/compiler.h>
/* /*
* 32 bit bitops format: * 32 bit bitops format:
...@@ -109,6 +110,8 @@ extern const char _sb_findmap[]; ...@@ -109,6 +110,8 @@ extern const char _sb_findmap[];
#endif /* __s390x__ */ #endif /* __s390x__ */
#define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" )
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* SMP safe set_bit routine based on compare and swap (CS) * SMP safe set_bit routine based on compare and swap (CS)
...@@ -189,6 +192,7 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) ...@@ -189,6 +192,7 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
/* Do the atomic update. */ /* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
__BITOPS_BARRIER();
return (old & mask) != 0; return (old & mask) != 0;
} }
...@@ -211,6 +215,7 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) ...@@ -211,6 +215,7 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
/* Do the atomic update. */ /* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
__BITOPS_BARRIER();
return (old ^ new) != 0; return (old ^ new) != 0;
} }
...@@ -233,6 +238,7 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) ...@@ -233,6 +238,7 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
/* Do the atomic update. */ /* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
__BITOPS_BARRIER();
return (old & mask) != 0; return (old & mask) != 0;
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -435,7 +441,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) ...@@ -435,7 +441,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
asm volatile("oc 0(1,%1),0(%2)" asm volatile("oc 0(1,%1),0(%2)"
: "=m" (*(char *) addr) : "=m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7)), : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
"m" (*(char *) addr) : "cc" ); "m" (*(char *) addr) : "cc", "memory" );
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
...@@ -454,7 +460,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) ...@@ -454,7 +460,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
asm volatile("nc 0(1,%1),0(%2)" asm volatile("nc 0(1,%1),0(%2)"
: "=m" (*(char *) addr) : "=m" (*(char *) addr)
: "a" (addr), "a" (_ni_bitmap + (nr & 7)), : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
"m" (*(char *) addr) : "cc" ); "m" (*(char *) addr) : "cc", "memory" );
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
...@@ -473,7 +479,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) ...@@ -473,7 +479,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
asm volatile("xc 0(1,%1),0(%2)" asm volatile("xc 0(1,%1),0(%2)"
: "=m" (*(char *) addr) : "=m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7)), : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
"m" (*(char *) addr) : "cc" ); "m" (*(char *) addr) : "cc", "memory" );
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
...@@ -681,59 +687,6 @@ find_next_bit (unsigned long * addr, int size, int offset) ...@@ -681,59 +687,6 @@ find_next_bit (unsigned long * addr, int size, int offset)
return (offset + res); return (offset + res);
} }
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
static inline unsigned long ffz(unsigned long word)
{
unsigned long reg;
int result;
__asm__(" slr %0,%0\n"
" lhi %2,0xff\n"
" tml %1,0xffff\n"
" jno 0f\n"
" ahi %0,16\n"
" srl %1,16\n"
"0: tml %1,0x00ff\n"
" jno 1f\n"
" ahi %0,8\n"
" srl %1,8\n"
"1: nr %1,%2\n"
" ic %1,0(%1,%3)\n"
" alr %0,%1"
: "=&d" (result), "+a" (word), "=&d" (reg)
: "a" (&_zb_findmap) : "cc" );
return result;
}
/*
* __ffs = find first bit in word. Undefined if no bit exists,
* so code should check against 0UL first..
*/
static inline unsigned long __ffs (unsigned long word)
{
unsigned long reg, result;
__asm__(" slr %0,%0\n"
" lhi %2,0xff\n"
" tml %1,0xffff\n"
" jnz 0f\n"
" ahi %0,16\n"
" srl %1,16\n"
"0: tml %1,0x00ff\n"
" jnz 1f\n"
" ahi %0,8\n"
" srl %1,8\n"
"1: nr %1,%2\n"
" ic %1,0(%1,%3)\n"
" alr %0,%1"
: "=&d" (result), "+a" (word), "=&d" (reg)
: "a" (&_sb_findmap) : "cc" );
return result;
}
#else /* __s390x__ */ #else /* __s390x__ */
/* /*
...@@ -910,35 +863,31 @@ find_next_bit (unsigned long * addr, unsigned long size, unsigned long offset) ...@@ -910,35 +863,31 @@ find_next_bit (unsigned long * addr, unsigned long size, unsigned long offset)
return (offset + res); return (offset + res);
} }
#endif /* __s390x__ */
/* /*
* ffz = Find First Zero in word. Undefined if no zero exists, * ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first.. * so code should check against ~0UL first..
*/ */
static inline unsigned long ffz(unsigned long word) static inline unsigned long ffz(unsigned long word)
{ {
unsigned long reg, result; unsigned long bit = 0;
__asm__(" lhi %2,-1\n" #ifdef __s390x__
" slgr %0,%0\n" if (likely((word & 0xffffffff) == 0xffffffff)) {
" clr %1,%2\n" word >>= 32;
" jne 0f\n" bit += 32;
" aghi %0,32\n" }
" srlg %1,%1,32\n" #endif
"0: lghi %2,0xff\n" if (likely((word & 0xffff) == 0xffff)) {
" tmll %1,0xffff\n" word >>= 16;
" jno 1f\n" bit += 16;
" aghi %0,16\n" }
" srlg %1,%1,16\n" if (likely((word & 0xff) == 0xff)) {
"1: tmll %1,0x00ff\n" word >>= 8;
" jno 2f\n" bit += 8;
" aghi %0,8\n" }
" srlg %1,%1,8\n" return bit + _zb_findmap[word & 0xff];
"2: ngr %1,%2\n"
" ic %1,0(%1,%3)\n"
" algr %0,%1"
: "=&d" (result), "+a" (word), "=&d" (reg)
: "a" (&_zb_findmap) : "cc" );
return result;
} }
/* /*
...@@ -947,31 +896,24 @@ static inline unsigned long ffz(unsigned long word) ...@@ -947,31 +896,24 @@ static inline unsigned long ffz(unsigned long word)
*/ */
static inline unsigned long __ffs (unsigned long word) static inline unsigned long __ffs (unsigned long word)
{ {
unsigned long reg, result; unsigned long bit = 0;
__asm__(" slgr %0,%0\n"
" ltr %1,%1\n"
" jnz 0f\n"
" aghi %0,32\n"
" srlg %1,%1,32\n"
"0: lghi %2,0xff\n"
" tmll %1,0xffff\n"
" jnz 1f\n"
" aghi %0,16\n"
" srlg %1,%1,16\n"
"1: tmll %1,0x00ff\n"
" jnz 2f\n"
" aghi %0,8\n"
" srlg %1,%1,8\n"
"2: ngr %1,%2\n"
" ic %1,0(%1,%3)\n"
" algr %0,%1"
: "=&d" (result), "+a" (word), "=&d" (reg)
: "a" (&_sb_findmap) : "cc" );
return result;
}
#endif /* __s390x__ */ #ifdef __s390x__
if (likely((word & 0xffffffff) == 0)) {
word >>= 32;
bit += 32;
}
#endif
if (likely((word & 0xffff) == 0)) {
word >>= 16;
bit += 16;
}
if (likely((word & 0xff) == 0)) {
word >>= 8;
bit += 8;
}
return bit + _sb_findmap[word & 0xff];
}
/* /*
* Every architecture must define this function. It's the fastest * Every architecture must define this function. It's the fastest
...@@ -989,68 +931,12 @@ static inline int sched_find_first_bit(unsigned long *b) ...@@ -989,68 +931,12 @@ static inline int sched_find_first_bit(unsigned long *b)
* the libc and compiler builtin ffs routines, therefore * the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs). * differs in spirit from the above ffz (man ffs).
*/ */
extern inline int ffs (int x) #define ffs(x) generic_ffs(x)
{
int r = 1;
if (x == 0)
return 0;
__asm__(" tml %1,0xffff\n"
" jnz 0f\n"
" srl %1,16\n"
" ahi %0,16\n"
"0: tml %1,0x00ff\n"
" jnz 1f\n"
" srl %1,8\n"
" ahi %0,8\n"
"1: tml %1,0x000f\n"
" jnz 2f\n"
" srl %1,4\n"
" ahi %0,4\n"
"2: tml %1,0x0003\n"
" jnz 3f\n"
" srl %1,2\n"
" ahi %0,2\n"
"3: tml %1,0x0001\n"
" jnz 4f\n"
" ahi %0,1\n"
"4:"
: "=&d" (r), "+d" (x) : : "cc" );
return r;
}
/* /*
* fls: find last bit set. * fls: find last bit set.
*/ */
static __inline__ int fls(int x) #define fls(x) generic_fls(x)
{
int r = 32;
if (x == 0)
return 0;
__asm__(" tmh %1,0xffff\n"
" jz 0f\n"
" sll %1,16\n"
" ahi %0,-16\n"
"0: tmh %1,0xff00\n"
" jz 1f\n"
" sll %1,8\n"
" ahi %0,-8\n"
"1: tmh %1,0xf000\n"
" jz 2f\n"
" sll %1,4\n"
" ahi %0,-4\n"
"2: tmh %1,0xc000\n"
" jz 3f\n"
" sll %1,2\n"
" ahi %0,-2\n"
"3: tmh %1,0x8000\n"
" jz 4f\n"
" ahi %0,-1\n"
"4:"
: "+d" (r), "+d" (x) : : "cc" );
return r;
}
/* /*
* hweightN: returns the hamming weight (i.e. the number * hweightN: returns the hamming weight (i.e. the number
...@@ -1273,11 +1159,16 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) ...@@ -1273,11 +1159,16 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
/* Bitmap functions for the minix filesystem. */ /* Bitmap functions for the minix filesystem. */
/* FIXME !!! */ /* FIXME !!! */
#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) #define minix_test_and_set_bit(nr,addr) \
#define minix_set_bit(nr,addr) set_bit(nr,addr) test_and_set_bit(nr,(unsigned long *)addr)
#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) #define minix_set_bit(nr,addr) \
#define minix_test_bit(nr,addr) test_bit(nr,addr) set_bit(nr,(unsigned long *)addr)
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) #define minix_test_and_clear_bit(nr,addr) \
test_and_clear_bit(nr,(unsigned long *)addr)
#define minix_test_bit(nr,addr) \
test_bit(nr,(unsigned long *)addr)
#define minix_find_first_zero_bit(addr,size) \
find_first_zero_bit(addr,size)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -50,7 +50,7 @@ static __inline__ __u32 ___arch__swab32p(__u32 *x) ...@@ -50,7 +50,7 @@ static __inline__ __u32 ___arch__swab32p(__u32 *x)
" icm %0,4,2(%1)\n" " icm %0,4,2(%1)\n"
" icm %0,2,1(%1)\n" " icm %0,2,1(%1)\n"
" ic %0,0(%1)" " ic %0,0(%1)"
: "=&d" (result) : "a" (x) : "cc" ); : "=&d" (result) : "a" (x), "m" (*x) : "cc" );
#else /* __s390x__ */ #else /* __s390x__ */
" lrv %0,%1" " lrv %0,%1"
: "=d" (result) : "m" (*x) ); : "=d" (result) : "m" (*x) );
...@@ -67,7 +67,7 @@ static __inline__ __u32 ___arch__swab32(__u32 x) ...@@ -67,7 +67,7 @@ static __inline__ __u32 ___arch__swab32(__u32 x)
__asm__ __volatile__ ( __asm__ __volatile__ (
" lrvr %0,%1" " lrvr %0,%1"
: "=d" (result) : "d" (x) ); : "=d" (result) : "d" (x), "m" (x) );
return result; return result;
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
...@@ -85,7 +85,7 @@ static __inline__ __u16 ___arch__swab16p(__u16 *x) ...@@ -85,7 +85,7 @@ static __inline__ __u16 ___arch__swab16p(__u16 *x)
#ifndef __s390x__ #ifndef __s390x__
" icm %0,2,1(%1)\n" " icm %0,2,1(%1)\n"
" ic %0,0(%1)\n" " ic %0,0(%1)\n"
: "=&d" (result) : "a" (x) : "cc" ); : "=&d" (result) : "a" (x), "m" (*x) : "cc" );
#else /* __s390x__ */ #else /* __s390x__ */
" lrvh %0,%1" " lrvh %0,%1"
: "=d" (result) : "m" (*x) ); : "=d" (result) : "m" (*x) );
......
...@@ -42,7 +42,7 @@ csum_partial(const unsigned char * buff, int len, unsigned int sum) ...@@ -42,7 +42,7 @@ csum_partial(const unsigned char * buff, int len, unsigned int sum)
__asm__ __volatile__ ( __asm__ __volatile__ (
"0: cksm %0,%1\n" /* do checksum on longs */ "0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n" " jo 0b\n"
: "+&d" (sum), "+&a" (rp) : : "cc" ); : "+&d" (sum), "+&a" (rp) : : "cc", "memory" );
#else /* __s390x__ */ #else /* __s390x__ */
__asm__ __volatile__ ( __asm__ __volatile__ (
" lgr 2,%1\n" /* address in gpr 2 */ " lgr 2,%1\n" /* address in gpr 2 */
...@@ -51,7 +51,7 @@ csum_partial(const unsigned char * buff, int len, unsigned int sum) ...@@ -51,7 +51,7 @@ csum_partial(const unsigned char * buff, int len, unsigned int sum)
" jo 0b\n" " jo 0b\n"
: "+&d" (sum) : "+&d" (sum)
: "d" (buff), "d" (len) : "d" (buff), "d" (len)
: "cc", "2", "3" ); : "cc", "memory", "2", "3" );
#endif /* __s390x__ */ #endif /* __s390x__ */
return sum; return sum;
} }
......
...@@ -36,8 +36,8 @@ ...@@ -36,8 +36,8 @@
" ahi 1,1\n" \ " ahi 1,1\n" \
"1: st 1,4+%1\n" \ "1: st 1,4+%1\n" \
" lr %0,0" \ " lr %0,0" \
: "=d" (__r), "+m" (__n) \ : "=d" (__r), "=m" (__n) \
: "d" (base) : "0", "1", "2", "cc" ); \ : "d" (base), "m" (__n) : "0", "1", "2", "cc" ); \
(n) = (__n); \ (n) = (__n); \
__r; \ __r; \
}) })
......
...@@ -553,11 +553,15 @@ ptep_clear_flush(struct vm_area_struct *vma, ...@@ -553,11 +553,15 @@ ptep_clear_flush(struct vm_area_struct *vma,
if (!(pte_val(pte) & _PAGE_INVALID)) { if (!(pte_val(pte) & _PAGE_INVALID)) {
/* S390 has 1mb segments, we are emulating 4MB segments */ /* S390 has 1mb segments, we are emulating 4MB segments */
pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
__asm__ __volatile__ ("ipte %0,%1" : : "a" (pto), "a" (address)); __asm__ __volatile__ ("ipte %2,%3"
: "=m" (*ptep) : "m" (*ptep),
"a" (pto), "a" (address) );
} }
#else /* __s390x__ */ #else /* __s390x__ */
if (!(pte_val(pte) & _PAGE_INVALID)) if (!(pte_val(pte) & _PAGE_INVALID))
__asm__ __volatile__ ("ipte %0,%1" : : "a" (ptep), "a" (address)); __asm__ __volatile__ ("ipte %2,%3"
: "=m" (*ptep) : "m" (*ptep),
"a" (ptep), "a" (address) );
#endif /* __s390x__ */ #endif /* __s390x__ */
pte_clear(ptep); pte_clear(ptep);
return pte; return pte;
......
...@@ -66,7 +66,7 @@ extern struct task_struct *last_task_used_math; ...@@ -66,7 +66,7 @@ extern struct task_struct *last_task_used_math;
#else /* __s390x__ */ #else /* __s390x__ */
# define TASK_SIZE (0x20000000000UL) # define TASK_SIZE (0x40000000000UL)
# define TASK31_SIZE (0x80000000UL) # define TASK31_SIZE (0x80000000UL)
# define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ # define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(TASK31_SIZE / 2) : (TASK_SIZE / 2)) (TASK31_SIZE / 2) : (TASK_SIZE / 2))
...@@ -200,14 +200,14 @@ static inline void __load_psw_mask (unsigned long mask) ...@@ -200,14 +200,14 @@ static inline void __load_psw_mask (unsigned long mask)
" st %0,4(%1)\n" " st %0,4(%1)\n"
" lpsw 0(%1)\n" " lpsw 0(%1)\n"
"1:" "1:"
: "=&d" (addr) : "a" (&psw) : "memory", "cc" ); : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" );
#else /* __s390x__ */ #else /* __s390x__ */
asm volatile ( asm volatile (
" larl %0,1f\n" " larl %0,1f\n"
" stg %0,8(%1)\n" " stg %0,8(%1)\n"
" lpswe 0(%1)\n" " lpswe 0(%1)\n"
"1:" "1:"
: "=&d" (addr) : "a" (&psw) : "memory", "cc" ); : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" );
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
...@@ -229,14 +229,16 @@ static inline void enabled_wait(void) ...@@ -229,14 +229,16 @@ static inline void enabled_wait(void)
" oi 4(%1),0x80\n" " oi 4(%1),0x80\n"
" lpsw 0(%1)\n" " lpsw 0(%1)\n"
"1:" "1:"
: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" ); : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw)
: "memory", "cc" );
#else /* __s390x__ */ #else /* __s390x__ */
asm volatile ( asm volatile (
" larl %0,0f\n" " larl %0,0f\n"
" stg %0,8(%1)\n" " stg %0,8(%1)\n"
" lpswe 0(%1)\n" " lpswe 0(%1)\n"
"0:" "0:"
: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" ); : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw)
: "memory", "cc" );
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
...@@ -247,7 +249,7 @@ static inline void enabled_wait(void) ...@@ -247,7 +249,7 @@ static inline void enabled_wait(void)
static inline void disabled_wait(unsigned long code) static inline void disabled_wait(unsigned long code)
{ {
char psw_buffer[2*sizeof(psw_t)]; char psw_buffer[2*sizeof(psw_t)];
char ctl_buf[4]; unsigned long ctl_buf;
psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1) psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1)
& -sizeof(psw_t)); & -sizeof(psw_t));
...@@ -258,9 +260,9 @@ static inline void disabled_wait(unsigned long code) ...@@ -258,9 +260,9 @@ static inline void disabled_wait(unsigned long code)
* the processor is dead afterwards * the processor is dead afterwards
*/ */
#ifndef __s390x__ #ifndef __s390x__
asm volatile (" stctl 0,0,0(%1)\n" asm volatile (" stctl 0,0,0(%2)\n"
" ni 0(%1),0xef\n" /* switch off protection */ " ni 0(%2),0xef\n" /* switch off protection */
" lctl 0,0,0(%1)\n" " lctl 0,0,0(%2)\n"
" stpt 0xd8\n" /* store timer */ " stpt 0xd8\n" /* store timer */
" stckc 0xe0\n" /* store clock comparator */ " stckc 0xe0\n" /* store clock comparator */
" stpx 0x108\n" /* store prefix register */ " stpx 0x108\n" /* store prefix register */
...@@ -271,13 +273,14 @@ static inline void disabled_wait(unsigned long code) ...@@ -271,13 +273,14 @@ static inline void disabled_wait(unsigned long code)
" std 6,0x178\n" /* store f6 */ " std 6,0x178\n" /* store f6 */
" stm 0,15,0x180\n" /* store general registers */ " stm 0,15,0x180\n" /* store general registers */
" stctl 0,15,0x1c0\n" /* store control registers */ " stctl 0,15,0x1c0\n" /* store control registers */
" oi 0(%1),0x10\n" /* fake protection bit */ " oi 0x1c0,0x10\n" /* fake protection bit */
" lpsw 0(%0)" " lpsw 0(%1)"
: : "a" (dw_psw), "a" (&ctl_buf) : "cc" ); : "=m" (ctl_buf)
: "a" (dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" );
#else /* __s390x__ */ #else /* __s390x__ */
asm volatile (" stctg 0,0,0(%1)\n" asm volatile (" stctg 0,0,0(%2)\n"
" ni 4(%1),0xef\n" /* switch off protection */ " ni 4(%2),0xef\n" /* switch off protection */
" lctlg 0,0,0(%1)\n" " lctlg 0,0,0(%2)\n"
" lghi 1,0x1000\n" " lghi 1,0x1000\n"
" stpt 0x328(1)\n" /* store timer */ " stpt 0x328(1)\n" /* store timer */
" stckc 0x330(1)\n" /* store clock comparator */ " stckc 0x330(1)\n" /* store clock comparator */
...@@ -303,8 +306,10 @@ static inline void disabled_wait(unsigned long code) ...@@ -303,8 +306,10 @@ static inline void disabled_wait(unsigned long code)
" stmg 0,15,0x280(1)\n" /* store general registers */ " stmg 0,15,0x280(1)\n" /* store general registers */
" stctg 0,15,0x380(1)\n" /* store control registers */ " stctg 0,15,0x380(1)\n" /* store control registers */
" oi 0x384(1),0x10\n" /* fake protection bit */ " oi 0x384(1),0x10\n" /* fake protection bit */
" lpswe 0(%0)" " lpswe 0(%1)"
: : "a" (dw_psw), "a" (&ctl_buf) : "cc", "0", "1"); : "=m" (ctl_buf)
: "a" (dw_psw), "a" (&ctl_buf),
"m" (dw_psw) : "cc", "0", "1");
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
......
...@@ -102,21 +102,21 @@ static inline void __down_read(struct rw_semaphore *sem) ...@@ -102,21 +102,21 @@ static inline void __down_read(struct rw_semaphore *sem)
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%2)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" ahi %1,%3\n" " ahi %1,%5\n"
" cs %0,%1,0(%2)\n" " cs %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%2)\n" " lg %0,0(%3)\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" aghi %1,%3\n" " aghi %1,%5\n"
" csg %0,%1,0(%2)\n" " csg %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) : "a" (&sem->count), "m" (sem->count),
: "cc", "memory" ); "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" );
if (old < 0) if (old < 0)
rwsem_down_read_failed(sem); rwsem_down_read_failed(sem);
} }
...@@ -130,25 +130,25 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -130,25 +130,25 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%2)\n" " l %0,0(%3)\n"
"0: ltr %1,%0\n" "0: ltr %1,%0\n"
" jm 1f\n" " jm 1f\n"
" ahi %1,%3\n" " ahi %1,%5\n"
" cs %0,%1,0(%2)\n" " cs %0,%1,0(%3)\n"
" jl 0b\n" " jl 0b\n"
"1:" "1:"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%2)\n" " lg %0,0(%3)\n"
"0: ltgr %1,%0\n" "0: ltgr %1,%0\n"
" jm 1f\n" " jm 1f\n"
" aghi %1,%3\n" " aghi %1,%5\n"
" csg %0,%1,0(%2)\n" " csg %0,%1,0(%3)\n"
" jl 0b\n" " jl 0b\n"
"1:" "1:"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) : "a" (&sem->count), "m" (sem->count),
: "cc", "memory" ); "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" );
return old >= 0 ? 1 : 0; return old >= 0 ? 1 : 0;
} }
...@@ -162,20 +162,20 @@ static inline void __down_write(struct rw_semaphore *sem) ...@@ -162,20 +162,20 @@ static inline void __down_write(struct rw_semaphore *sem)
tmp = RWSEM_ACTIVE_WRITE_BIAS; tmp = RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%2)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" a %1,%3\n" " a %1,%5\n"
" cs %0,%1,0(%2)\n" " cs %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%2)\n" " lg %0,0(%3)\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" ag %1,%3\n" " ag %1,%5\n"
" csg %0,%1,0(%2)\n" " csg %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (tmp) : "a" (&sem->count), "m" (sem->count), "m" (tmp)
: "cc", "memory" ); : "cc", "memory" );
if (old != 0) if (old != 0)
rwsem_down_write_failed(sem); rwsem_down_write_failed(sem);
...@@ -190,22 +190,22 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) ...@@ -190,22 +190,22 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%1)\n" " l %0,0(%2)\n"
"0: ltr %0,%0\n" "0: ltr %0,%0\n"
" jnz 1f\n" " jnz 1f\n"
" cs %0,%2,0(%1)\n" " cs %0,%4,0(%2)\n"
" jl 0b\n" " jl 0b\n"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%1)\n" " lg %0,0(%2)\n"
"0: ltgr %0,%0\n" "0: ltgr %0,%0\n"
" jnz 1f\n" " jnz 1f\n"
" csg %0,%2,0(%1)\n" " csg %0,%4,0(%2)\n"
" jl 0b\n" " jl 0b\n"
#endif /* __s390x__ */ #endif /* __s390x__ */
"1:" "1:"
: "=&d" (old) : "=&d" (old), "=m" (sem->count)
: "a" (&sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) : "a" (&sem->count), "m" (sem->count),
: "cc", "memory" ); "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory" );
return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
} }
...@@ -218,20 +218,21 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -218,20 +218,21 @@ static inline void __up_read(struct rw_semaphore *sem)
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%2)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" ahi %1,%3\n" " ahi %1,%5\n"
" cs %0,%1,0(%2)\n" " cs %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%2)\n" " lg %0,0(%3)\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" aghi %1,%3\n" " aghi %1,%5\n"
" csg %0,%1,0(%2)\n" " csg %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) : "a" (&sem->count), "m" (sem->count),
"i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory" ); : "cc", "memory" );
if (new < 0) if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0) if ((new & RWSEM_ACTIVE_MASK) == 0)
...@@ -248,20 +249,20 @@ static inline void __up_write(struct rw_semaphore *sem) ...@@ -248,20 +249,20 @@ static inline void __up_write(struct rw_semaphore *sem)
tmp = -RWSEM_ACTIVE_WRITE_BIAS; tmp = -RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%2)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" a %1,%3\n" " a %1,%5\n"
" cs %0,%1,0(%2)\n" " cs %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%2)\n" " lg %0,0(%3)\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" ag %1,%3\n" " ag %1,%5\n"
" csg %0,%1,0(%2)\n" " csg %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (tmp) : "a" (&sem->count), "m" (sem->count), "m" (tmp)
: "cc", "memory" ); : "cc", "memory" );
if (new < 0) if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0) if ((new & RWSEM_ACTIVE_MASK) == 0)
...@@ -278,20 +279,20 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -278,20 +279,20 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
tmp = -RWSEM_WAITING_BIAS; tmp = -RWSEM_WAITING_BIAS;
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%2)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" a %1,%3\n" " a %1,%5\n"
" cs %0,%1,0(%2)\n" " cs %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%2)\n" " lg %0,0(%3)\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" ag %1,%3\n" " ag %1,%5\n"
" csg %0,%1,0(%2)\n" " csg %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (tmp) : "a" (&sem->count), "m" (sem->count), "m" (tmp)
: "cc", "memory" ); : "cc", "memory" );
if (new > 1) if (new > 1)
rwsem_downgrade_wake(sem); rwsem_downgrade_wake(sem);
...@@ -306,20 +307,20 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) ...@@ -306,20 +307,20 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%2)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" ar %1,%3\n" " ar %1,%5\n"
" cs %0,%1,0(%2)\n" " cs %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%2)\n" " lg %0,0(%3)\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" agr %1,%3\n" " agr %1,%5\n"
" csg %0,%1,0(%2)\n" " csg %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "d" (delta) : "a" (&sem->count), "m" (sem->count), "d" (delta)
: "cc", "memory" ); : "cc", "memory" );
} }
...@@ -332,20 +333,20 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) ...@@ -332,20 +333,20 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%2)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" ar %1,%3\n" " ar %1,%5\n"
" cs %0,%1,0(%2)\n" " cs %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%2)\n" " lg %0,0(%3)\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" agr %1,%3\n" " agr %1,%5\n"
" csg %0,%1,0(%2)\n" " csg %0,%1,0(%3)\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "d" (delta) : "a" (&sem->count), "m" (sem->count), "d" (delta)
: "cc", "memory" ); : "cc", "memory" );
return new; return new;
} }
......
...@@ -95,9 +95,9 @@ static inline int down_trylock(struct semaphore * sem) ...@@ -95,9 +95,9 @@ static inline int down_trylock(struct semaphore * sem)
" cs %0,%1,0(%3)\n" " cs %0,%1,0(%3)\n"
" jl 0b\n" " jl 0b\n"
"1:" "1:"
: "=&d" (old_val), "=&d" (new_val), : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
"+m" (sem->count.counter) : "a" (&sem->count.counter), "m" (sem->count.counter)
: "a" (&sem->count.counter) : "cc" ); : "cc", "memory" );
return old_val <= 0; return old_val <= 0;
} }
......
...@@ -57,8 +57,9 @@ extern inline void _raw_spin_lock(spinlock_t *lp) ...@@ -57,8 +57,9 @@ extern inline void _raw_spin_lock(spinlock_t *lp)
"1: slr %1,%1\n" "1: slr %1,%1\n"
" cs %1,%0,0(%3)\n" " cs %1,%0,0(%3)\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (reg1), "=&d" (reg2), "+m" (lp->lock) : "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock)
: "a" (&lp->lock) : "cc" ); : "a" (&lp->lock), "m" (lp->lock)
: "cc", "memory" );
#else /* __s390x__ */ #else /* __s390x__ */
unsigned long reg1, reg2; unsigned long reg1, reg2;
__asm__ __volatile(" bras %1,1f\n" __asm__ __volatile(" bras %1,1f\n"
...@@ -66,9 +67,9 @@ extern inline void _raw_spin_lock(spinlock_t *lp) ...@@ -66,9 +67,9 @@ extern inline void _raw_spin_lock(spinlock_t *lp)
"1: slr %0,%0\n" "1: slr %0,%0\n"
" cs %0,%1,0(%3)\n" " cs %0,%1,0(%3)\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (reg1), "=&d" (reg2), "+m" (lp->lock) : "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock)
: "a" (&lp->lock), "i" (__DIAG44_OPERAND) : "a" (&lp->lock), "i" (__DIAG44_OPERAND),
: "cc" ); "m" (lp->lock) : "cc", "memory" );
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
...@@ -82,8 +83,9 @@ extern inline int _raw_spin_trylock(spinlock_t *lp) ...@@ -82,8 +83,9 @@ extern inline int _raw_spin_trylock(spinlock_t *lp)
__asm__ __volatile(" slr %0,%0\n" __asm__ __volatile(" slr %0,%0\n"
" basr %1,0\n" " basr %1,0\n"
"0: cs %0,%1,0(%3)" "0: cs %0,%1,0(%3)"
: "=&d" (result), "=&d" (reg), "+m" (lp->lock) : "=&d" (result), "=&d" (reg), "=m" (lp->lock)
: "a" (&lp->lock) : "cc" ); : "a" (&lp->lock), "m" (lp->lock)
: "cc", "memory" );
return !result; return !result;
} }
...@@ -93,7 +95,8 @@ extern inline void _raw_spin_unlock(spinlock_t *lp) ...@@ -93,7 +95,8 @@ extern inline void _raw_spin_unlock(spinlock_t *lp)
__asm__ __volatile("cs %0,%3,0(%4)" __asm__ __volatile("cs %0,%3,0(%4)"
: "=d" (old), "=m" (lp->lock) : "=d" (old), "=m" (lp->lock)
: "0" (lp->lock), "d" (0), "a" (lp) : "cc" ); : "0" (lp->lock), "d" (0), "a" (lp)
: "cc", "memory" );
} }
/* /*
...@@ -126,8 +129,8 @@ typedef struct { ...@@ -126,8 +129,8 @@ typedef struct {
" la 3,1(2)\n" /* one more reader */ \ " la 3,1(2)\n" /* one more reader */ \
" cs 2,3,0(%1)\n" /* try to write new value */ \ " cs 2,3,0(%1)\n" /* try to write new value */ \
" jl 0b" \ " jl 0b" \
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \ : "=m" ((rw)->lock) : "a" (&(rw)->lock), \
: "2", "3", "cc" ) "m" ((rw)->lock) : "2", "3", "cc", "memory" )
#else /* __s390x__ */ #else /* __s390x__ */
#define _raw_read_lock(rw) \ #define _raw_read_lock(rw) \
asm volatile(" lg 2,0(%1)\n" \ asm volatile(" lg 2,0(%1)\n" \
...@@ -137,9 +140,9 @@ typedef struct { ...@@ -137,9 +140,9 @@ typedef struct {
" la 3,1(2)\n" /* one more reader */ \ " la 3,1(2)\n" /* one more reader */ \
" csg 2,3,0(%1)\n" /* try to write new value */ \ " csg 2,3,0(%1)\n" /* try to write new value */ \
" jl 0b" \ " jl 0b" \
: "+m" ((rw)->lock) \ : "=m" ((rw)->lock) \
: "a" (&(rw)->lock), "i" (__DIAG44_OPERAND) \ : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
: "2", "3", "cc" ) "m" ((rw)->lock) : "2", "3", "cc", "memory" )
#endif /* __s390x__ */ #endif /* __s390x__ */
#ifndef __s390x__ #ifndef __s390x__
...@@ -151,8 +154,8 @@ typedef struct { ...@@ -151,8 +154,8 @@ typedef struct {
" ahi 3,-1\n" /* one less reader */ \ " ahi 3,-1\n" /* one less reader */ \
" cs 2,3,0(%1)\n" \ " cs 2,3,0(%1)\n" \
" jl 0b" \ " jl 0b" \
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \ : "=m" ((rw)->lock) : "a" (&(rw)->lock), \
: "2", "3", "cc" ) "m" ((rw)->lock) : "2", "3", "cc", "memory" )
#else /* __s390x__ */ #else /* __s390x__ */
#define _raw_read_unlock(rw) \ #define _raw_read_unlock(rw) \
asm volatile(" lg 2,0(%1)\n" \ asm volatile(" lg 2,0(%1)\n" \
...@@ -162,9 +165,9 @@ typedef struct { ...@@ -162,9 +165,9 @@ typedef struct {
" bctgr 3,0\n" /* one less reader */ \ " bctgr 3,0\n" /* one less reader */ \
" csg 2,3,0(%1)\n" \ " csg 2,3,0(%1)\n" \
" jl 0b" \ " jl 0b" \
: "+m" ((rw)->lock) \ : "=m" ((rw)->lock) \
: "a" (&(rw)->lock), "i" (__DIAG44_OPERAND) \ : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
: "2", "3", "cc" ) "m" ((rw)->lock) : "2", "3", "cc", "memory" )
#endif /* __s390x__ */ #endif /* __s390x__ */
#ifndef __s390x__ #ifndef __s390x__
...@@ -176,8 +179,8 @@ typedef struct { ...@@ -176,8 +179,8 @@ typedef struct {
"1: slr 2,2\n" /* old lock value must be 0 */ \ "1: slr 2,2\n" /* old lock value must be 0 */ \
" cs 2,3,0(%1)\n" \ " cs 2,3,0(%1)\n" \
" jl 0b" \ " jl 0b" \
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \ : "=m" ((rw)->lock) : "a" (&(rw)->lock), \
: "2", "3", "cc" ) "m" ((rw)->lock) : "2", "3", "cc", "memory" )
#else /* __s390x__ */ #else /* __s390x__ */
#define _raw_write_lock(rw) \ #define _raw_write_lock(rw) \
asm volatile(" llihh 3,0x8000\n" /* new lock value = 0x80...0 */ \ asm volatile(" llihh 3,0x8000\n" /* new lock value = 0x80...0 */ \
...@@ -186,9 +189,9 @@ typedef struct { ...@@ -186,9 +189,9 @@ typedef struct {
"1: slgr 2,2\n" /* old lock value must be 0 */ \ "1: slgr 2,2\n" /* old lock value must be 0 */ \
" csg 2,3,0(%1)\n" \ " csg 2,3,0(%1)\n" \
" jl 0b" \ " jl 0b" \
: "+m" ((rw)->lock) \ : "=m" ((rw)->lock) \
: "a" (&(rw)->lock), "i" (__DIAG44_OPERAND) \ : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
: "2", "3", "cc" ) "m" ((rw)->lock) : "2", "3", "cc", "memory" )
#endif /* __s390x__ */ #endif /* __s390x__ */
#ifndef __s390x__ #ifndef __s390x__
...@@ -200,8 +203,8 @@ typedef struct { ...@@ -200,8 +203,8 @@ typedef struct {
" sll 2,31\n" /* old lock value must be 0x80000000 */ \ " sll 2,31\n" /* old lock value must be 0x80000000 */ \
" cs 2,3,0(%1)\n" \ " cs 2,3,0(%1)\n" \
" jl 0b" \ " jl 0b" \
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \ : "=m" ((rw)->lock) : "a" (&(rw)->lock), \
: "2", "3", "cc" ) "m" ((rw)->lock) : "2", "3", "cc", "memory" )
#else /* __s390x__ */ #else /* __s390x__ */
#define _raw_write_unlock(rw) \ #define _raw_write_unlock(rw) \
asm volatile(" slgr 3,3\n" /* new lock value = 0 */ \ asm volatile(" slgr 3,3\n" /* new lock value = 0 */ \
...@@ -210,9 +213,9 @@ typedef struct { ...@@ -210,9 +213,9 @@ typedef struct {
"1: llihh 2,0x8000\n" /* old lock value must be 0x8..0 */\ "1: llihh 2,0x8000\n" /* old lock value must be 0x8..0 */\
" csg 2,3,0(%1)\n" \ " csg 2,3,0(%1)\n" \
" jl 0b" \ " jl 0b" \
: "+m" ((rw)->lock) \ : "=m" ((rw)->lock) \
: "a" (&(rw)->lock), "i" (__DIAG44_OPERAND) \ : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
: "2", "3", "cc" ) "m" ((rw)->lock) : "2", "3", "cc", "memory" )
#endif /* __s390x__ */ #endif /* __s390x__ */
extern inline int _raw_write_trylock(rwlock_t *rw) extern inline int _raw_write_trylock(rwlock_t *rw)
...@@ -230,8 +233,9 @@ extern inline int _raw_write_trylock(rwlock_t *rw) ...@@ -230,8 +233,9 @@ extern inline int _raw_write_trylock(rwlock_t *rw)
" llihh %1,0x8000\n" " llihh %1,0x8000\n"
"0: csg %0,%1,0(%3)\n" "0: csg %0,%1,0(%3)\n"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (result), "=&d" (reg), "+m" (rw->lock) : "=&d" (result), "=&d" (reg), "=m" (rw->lock)
: "a" (&rw->lock) : "cc" ); : "a" (&rw->lock), "m" (rw->lock)
: "cc", "memory" );
return result == 0; return result == 0;
} }
......
...@@ -32,28 +32,28 @@ extern struct task_struct *__switch_to(void *, void *); ...@@ -32,28 +32,28 @@ extern struct task_struct *__switch_to(void *, void *);
static inline void save_fp_regs(s390_fp_regs *fpregs) static inline void save_fp_regs(s390_fp_regs *fpregs)
{ {
asm volatile ( asm volatile (
" std 0,8(%0)\n" " std 0,8(%1)\n"
" std 2,24(%0)\n" " std 2,24(%1)\n"
" std 4,40(%0)\n" " std 4,40(%1)\n"
" std 6,56(%0)" " std 6,56(%1)"
: : "a" (fpregs) : "memory" ); : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" );
if (!MACHINE_HAS_IEEE) if (!MACHINE_HAS_IEEE)
return; return;
asm volatile( asm volatile(
" stfpc 0(%0)\n" " stfpc 0(%1)\n"
" std 1,16(%0)\n" " std 1,16(%1)\n"
" std 3,32(%0)\n" " std 3,32(%1)\n"
" std 5,48(%0)\n" " std 5,48(%1)\n"
" std 7,64(%0)\n" " std 7,64(%1)\n"
" std 8,72(%0)\n" " std 8,72(%1)\n"
" std 9,80(%0)\n" " std 9,80(%1)\n"
" std 10,88(%0)\n" " std 10,88(%1)\n"
" std 11,96(%0)\n" " std 11,96(%1)\n"
" std 12,104(%0)\n" " std 12,104(%1)\n"
" std 13,112(%0)\n" " std 13,112(%1)\n"
" std 14,120(%0)\n" " std 14,120(%1)\n"
" std 15,128(%0)\n" " std 15,128(%1)\n"
: : "a" (fpregs) : "memory" ); : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" );
} }
static inline void restore_fp_regs(s390_fp_regs *fpregs) static inline void restore_fp_regs(s390_fp_regs *fpregs)
...@@ -63,7 +63,7 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs) ...@@ -63,7 +63,7 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs)
" ld 2,24(%0)\n" " ld 2,24(%0)\n"
" ld 4,40(%0)\n" " ld 4,40(%0)\n"
" ld 6,56(%0)" " ld 6,56(%0)"
: : "a" (fpregs)); : : "a" (fpregs), "m" (*fpregs) );
if (!MACHINE_HAS_IEEE) if (!MACHINE_HAS_IEEE)
return; return;
asm volatile( asm volatile(
...@@ -80,7 +80,7 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs) ...@@ -80,7 +80,7 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs)
" ld 13,112(%0)\n" " ld 13,112(%0)\n"
" ld 14,120(%0)\n" " ld 14,120(%0)\n"
" ld 15,128(%0)\n" " ld 15,128(%0)\n"
: : "a" (fpregs)); : : "a" (fpregs), "m" (*fpregs) );
} }
#define switch_to(prev,next,last) do { \ #define switch_to(prev,next,last) do { \
...@@ -107,15 +107,15 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) ...@@ -107,15 +107,15 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
shift = (3 ^ (addr & 3)) << 3; shift = (3 ^ (addr & 3)) << 3;
addr ^= addr & 3; addr ^= addr & 3;
asm volatile( asm volatile(
" l %0,0(%3)\n" " l %0,0(%4)\n"
"0: lr 0,%0\n" "0: lr 0,%0\n"
" nr 0,%2\n" " nr 0,%3\n"
" or 0,%1\n" " or 0,%2\n"
" cs %0,0,0(%3)\n" " cs %0,0,0(%4)\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old) : "=&d" (old), "=m" (*(int *) addr)
: "d" (x << shift), "d" (~(255 << shift)), "a" (addr) : "d" (x << shift), "d" (~(255 << shift)), "a" (addr),
: "memory", "cc", "0" ); "m" (*(int *) addr) : "memory", "cc", "0" );
x = old >> shift; x = old >> shift;
break; break;
case 2: case 2:
...@@ -123,34 +123,36 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) ...@@ -123,34 +123,36 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
shift = (2 ^ (addr & 2)) << 3; shift = (2 ^ (addr & 2)) << 3;
addr ^= addr & 2; addr ^= addr & 2;
asm volatile( asm volatile(
" l %0,0(%3)\n" " l %0,0(%4)\n"
"0: lr 0,%0\n" "0: lr 0,%0\n"
" nr 0,%2\n" " nr 0,%3\n"
" or 0,%1\n" " or 0,%2\n"
" cs %0,0,0(%3)\n" " cs %0,0,0(%4)\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old) : "=&d" (old), "=m" (*(int *) addr)
: "d" (x << shift), "d" (~(65535 << shift)), "a" (addr) : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr),
: "memory", "cc", "0" ); "m" (*(int *) addr) : "memory", "cc", "0" );
x = old >> shift; x = old >> shift;
break; break;
case 4: case 4:
asm volatile ( asm volatile (
" l %0,0(%2)\n" " l %0,0(%3)\n"
"0: cs %0,%1,0(%2)\n" "0: cs %0,%2,0(%3)\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old) : "d" (x), "a" (ptr) : "=&d" (old), "=m" (*(int *) ptr)
: "memory", "cc", "0" ); : "d" (x), "a" (ptr), "m" (*(int *) ptr)
: "memory", "cc" );
x = old; x = old;
break; break;
#ifdef __s390x__ #ifdef __s390x__
case 8: case 8:
asm volatile ( asm volatile (
" lg %0,0(%2)\n" " lg %0,0(%3)\n"
"0: csg %0,%1,0(%2)\n" "0: csg %0,%2,0(%3)\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old) : "d" (x), "a" (ptr) : "=&d" (old), "=m" (*(long *) ptr)
: "memory", "cc", "0" ); : "d" (x), "a" (ptr), "m" (*(long *) ptr)
: "memory", "cc" );
x = old; x = old;
break; break;
#endif /* __s390x__ */ #endif /* __s390x__ */
...@@ -268,7 +270,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -268,7 +270,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define local_irq_enable() ({ \ #define local_irq_enable() ({ \
unsigned long __dummy; \ unsigned long __dummy; \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"stosm 0(%1),0x03" : "=m" (__dummy) : "a" (&__dummy) ); \ "stosm 0(%1),0x03" \
: "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
}) })
#define local_irq_disable() ({ \ #define local_irq_disable() ({ \
...@@ -279,10 +282,10 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -279,10 +282,10 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
}) })
#define local_save_flags(x) \ #define local_save_flags(x) \
__asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x) ) __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) )
#define local_irq_restore(x) \ #define local_irq_restore(x) \
__asm__ __volatile__("ssm 0(%0)" : : "a" (&x) : "memory") __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory")
#define irqs_disabled() \ #define irqs_disabled() \
({ \ ({ \
...@@ -294,7 +297,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -294,7 +297,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#ifdef __s390x__ #ifdef __s390x__
#define __load_psw(psw) \ #define __load_psw(psw) \
__asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw) : "cc" ); __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" );
#define __ctl_load(array, low, high) ({ \ #define __ctl_load(array, low, high) ({ \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
......
...@@ -25,7 +25,7 @@ static inline cycles_t get_cycles(void) ...@@ -25,7 +25,7 @@ static inline cycles_t get_cycles(void)
{ {
cycles_t cycles; cycles_t cycles;
__asm__("stck 0(%0)" : : "a" (&(cycles)) : "memory", "cc"); __asm__("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc");
return cycles >> 2; return cycles >> 2;
} }
...@@ -33,7 +33,7 @@ static inline unsigned long long get_clock (void) ...@@ -33,7 +33,7 @@ static inline unsigned long long get_clock (void)
{ {
unsigned long long clk; unsigned long long clk;
__asm__("stck 0(%0)" : : "a" (&(clk)) : "memory", "cc"); __asm__("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
return clk; return clk;
} }
......
...@@ -85,7 +85,7 @@ static inline void global_flush_tlb(void) ...@@ -85,7 +85,7 @@ static inline void global_flush_tlb(void)
" slr 2,2\n" " slr 2,2\n"
" slr 3,3\n" " slr 3,3\n"
" csp 2,%0" " csp 2,%0"
: : "a" (addr) : "cc", "2", "3" ); : : "a" (addr), "m" (dummy) : "cc", "2", "3" );
} }
} }
......
...@@ -124,8 +124,8 @@ struct exception_table_entry ...@@ -124,8 +124,8 @@ struct exception_table_entry
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err) \ : "=&d" (err) \
: "a" (__to),"a" (__from),"K" (-EFAULT),"0" (0) \ : "a" (__to),"a" (__from),"K" (-EFAULT),"0" (0),\
: "cc" ); \ "m" (x) : "cc" ); \
}) })
#else /* __s390x__ */ #else /* __s390x__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment