Commit b4f2a17b authored by Joshua Kinard's avatar Joshua Kinard Committed by Ralf Baechle

Improve atomic.h robustness

I've maintained this patch, originally from Thiemo Seufer in 2004, for a
really long time, but I think it's time for it to get a look at for
possible inclusion.  I have had no problems with it across various SGI
systems over the years.

To quote the post here:
http://www.linux-mips.org/archives/linux-mips/2004-12/msg00000.html

"the atomic functions use so far memory references for the inline
assembler to access the semaphore. This can lead to additional
instructions in the ll/sc loop, because newer compilers don't
expand the memory reference any more but leave it to the assembler.

The appended patch uses registers instead, and makes the ll/sc
arguments more explicit. In some cases it will lead also to better
register scheduling because the register isn't bound to an output
any more."
Signed-off-by: default avatarJoshua Kinard <kumba@gentoo.org>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/4029/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 12250d84
...@@ -59,8 +59,8 @@ static __inline__ void atomic_add(int i, atomic_t * v) ...@@ -59,8 +59,8 @@ static __inline__ void atomic_add(int i, atomic_t * v)
" sc %0, %1 \n" " sc %0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i));
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
int temp; int temp;
...@@ -71,8 +71,8 @@ static __inline__ void atomic_add(int i, atomic_t * v) ...@@ -71,8 +71,8 @@ static __inline__ void atomic_add(int i, atomic_t * v)
" addu %0, %2 \n" " addu %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i));
} while (unlikely(!temp)); } while (unlikely(!temp));
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -102,8 +102,8 @@ static __inline__ void atomic_sub(int i, atomic_t * v) ...@@ -102,8 +102,8 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
" sc %0, %1 \n" " sc %0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i));
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
int temp; int temp;
...@@ -114,8 +114,8 @@ static __inline__ void atomic_sub(int i, atomic_t * v) ...@@ -114,8 +114,8 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
" subu %0, %2 \n" " subu %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i));
} while (unlikely(!temp)); } while (unlikely(!temp));
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -146,9 +146,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) ...@@ -146,9 +146,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i));
: "memory");
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
int temp; int temp;
...@@ -159,9 +158,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) ...@@ -159,9 +158,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
" sc %0, %2 \n" " sc %0, %2 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i));
: "memory");
} while (unlikely(!result)); } while (unlikely(!result));
result = temp + i; result = temp + i;
...@@ -212,9 +210,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) ...@@ -212,9 +210,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" sc %0, %2 \n" " sc %0, %2 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i));
: "memory");
} while (unlikely(!result)); } while (unlikely(!result));
result = temp - i; result = temp - i;
...@@ -262,7 +259,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -262,7 +259,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" .set reorder \n" " .set reorder \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i), "m" (v->counter)
: "memory"); : "memory");
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
...@@ -280,9 +277,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -280,9 +277,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" .set reorder \n" " .set reorder \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i));
: "memory");
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -430,8 +426,8 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) ...@@ -430,8 +426,8 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
" scd %0, %1 \n" " scd %0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i));
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
long temp; long temp;
...@@ -442,8 +438,8 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) ...@@ -442,8 +438,8 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
" daddu %0, %2 \n" " daddu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i));
} while (unlikely(!temp)); } while (unlikely(!temp));
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -473,8 +469,8 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) ...@@ -473,8 +469,8 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
" scd %0, %1 \n" " scd %0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i));
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
long temp; long temp;
...@@ -485,8 +481,8 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) ...@@ -485,8 +481,8 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
" dsubu %0, %2 \n" " dsubu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i));
} while (unlikely(!temp)); } while (unlikely(!temp));
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -517,9 +513,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) ...@@ -517,9 +513,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" daddu %0, %1, %3 \n" " daddu %0, %1, %3 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i));
: "memory");
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
long temp; long temp;
...@@ -649,9 +644,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -649,9 +644,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" .set reorder \n" " .set reorder \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i));
: "memory");
} else { } else {
unsigned long flags; unsigned long flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment