Commit 2994488f authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by H. Peter Anvin

x86, ticketlock: Convert __ticket_spin_lock to use xadd()

Convert the two variants of __ticket_spin_lock() to use xadd(), which
has the effect of making them identical, so remove the duplicate function.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.orgSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent c576a3ea
...@@ -54,26 +54,22 @@ ...@@ -54,26 +54,22 @@
* save some instructions and make the code more elegant. There really isn't * save some instructions and make the code more elegant. There really isn't
* much between them in performance though, especially as locks are out of line. * much between them in performance though, especially as locks are out of line.
*/ */
#if (NR_CPUS < 256)
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{ {
register union { register struct __raw_tickets inc = { .tail = 1 };
struct __raw_tickets tickets;
unsigned short slock;
} inc = { .slock = 1 << TICKET_SHIFT };
asm volatile (LOCK_PREFIX "xaddw %w0, %1\n" inc = xadd(&lock->tickets, inc);
: "+Q" (inc), "+m" (lock->slock) : : "memory", "cc");
for (;;) { for (;;) {
if (inc.tickets.head == inc.tickets.tail) if (inc.head == inc.tail)
break; break;
cpu_relax(); cpu_relax();
inc.tickets.head = ACCESS_ONCE(lock->tickets.head); inc.head = ACCESS_ONCE(lock->tickets.head);
} }
barrier(); /* make sure nothing creeps before the lock is taken */ barrier(); /* make sure nothing creeps before the lock is taken */
} }
#if (NR_CPUS < 256)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{ {
unsigned int tmp, new; unsigned int tmp, new;
...@@ -101,27 +97,6 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) ...@@ -101,27 +97,6 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
: "memory", "cc"); : "memory", "cc");
} }
#else #else
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
unsigned inc = 1 << TICKET_SHIFT;
__ticket_t tmp;
asm volatile(LOCK_PREFIX "xaddl %0, %1\n\t"
: "+r" (inc), "+m" (lock->slock)
: : "memory", "cc");
tmp = inc;
inc >>= TICKET_SHIFT;
for (;;) {
if ((__ticket_t)inc == tmp)
break;
cpu_relax();
tmp = ACCESS_ONCE(lock->tickets.head);
}
barrier(); /* make sure nothing creeps before the lock is taken */
}
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{ {
unsigned tmp; unsigned tmp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment