Commit dc647a63 authored by David Mosberger's avatar David Mosberger

ia64: Drop ".bias" in spinlocks as it caused more harm than good. Pointed

	out by Jesse Barnes.  Also, drop the old lock code.
parent 63aa95fe
...@@ -863,7 +863,7 @@ GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4) ...@@ -863,7 +863,7 @@ GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
.wait: .wait:
// exponential backoff, kdb, lockmeter etc. go in here // exponential backoff, kdb, lockmeter etc. go in here
hint @pause hint @pause
ld4.bias r30=[r31] ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word
nop 0 nop 0
;; ;;
cmp4.eq p14,p0=r30,r0 cmp4.eq p14,p0=r30,r0
...@@ -880,7 +880,7 @@ GLOBAL_ENTRY(ia64_spinlock_contention) ...@@ -880,7 +880,7 @@ GLOBAL_ENTRY(ia64_spinlock_contention)
.wait: .wait:
// exponential backoff, kdb, lockmeter etc. go in here // exponential backoff, kdb, lockmeter etc. go in here
hint @pause hint @pause
ld4.bias r30=[r31] ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word
;; ;;
cmp4.ne p14,p0=r30,r0 cmp4.ne p14,p0=r30,r0
mov r30 = 1 mov r30 = 1
......
...@@ -22,9 +22,6 @@ typedef struct { ...@@ -22,9 +22,6 @@ typedef struct {
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0) #define spin_lock_init(x) ((x)->lock = 0)
#define NEW_LOCK
#ifdef NEW_LOCK
/* /*
* Try to get the lock. If we fail to get the lock, make a non-standard call to * Try to get the lock. If we fail to get the lock, make a non-standard call to
* ia64_spinlock_contention(). We do not use a normal call because that would force all * ia64_spinlock_contention(). We do not use a normal call because that would force all
...@@ -87,31 +84,6 @@ _raw_spin_lock (spinlock_t *lock) ...@@ -87,31 +84,6 @@ _raw_spin_lock (spinlock_t *lock)
#endif #endif
} }
#else /* !NEW_LOCK */
/*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when
* there is contention.
*/
#define _raw_spin_lock(x) __asm__ __volatile__ ( \
"mov ar.ccv = r0\n" \
"mov r29 = 1\n" \
";;\n" \
"1:\n" \
"ld4.bias r2 = [%0]\n" \
";;\n" \
"cmp4.eq p0,p7 = r0,r2\n" \
"(p7) br.cond.spnt.few 1b \n" \
"cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \
";;\n" \
"cmp4.eq p0,p7 = r0, r2\n" \
"(p7) br.cond.spnt.few 1b\n" \
";;\n" \
:: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
#endif /* !NEW_LOCK */
#define spin_is_locked(x) ((x)->lock != 0) #define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment