Commit 0e9f9fd2 authored by Stefan Kristiansson's avatar Stefan Kristiansson Committed by Stafford Horne

openrisc: add atomic bitops

This utilize the load-link/store-conditional l.lwa and l.swa
instructions to implement the atomic bitops.
When those instructions are not available emulation is provided.
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarStefan Kristiansson <stefan.kristiansson@saunalahti.fi>
[shorne@gmail.com: remove OPENRISC_HAVE_INST_LWA_SWA config suggesed by
Alan Cox https://lkml.org/lkml/2014/7/23/666, implement
test_and_change_bit]
Signed-off-by: default avatarStafford Horne <shorne@gmail.com>
parent 63104c06
......@@ -45,7 +45,7 @@
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/atomic.h>
#include <asm/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
......
/*
* Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef __ASM_OPENRISC_BITOPS_ATOMIC_H
#define __ASM_OPENRISC_BITOPS_ATOMIC_H
static inline void set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long tmp;
__asm__ __volatile__(
"1: l.lwa %0,0(%1) \n"
" l.or %0,%0,%2 \n"
" l.swa 0(%1),%0 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(tmp)
: "r"(p), "r"(mask)
: "cc", "memory");
}
static inline void clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long tmp;
__asm__ __volatile__(
"1: l.lwa %0,0(%1) \n"
" l.and %0,%0,%2 \n"
" l.swa 0(%1),%0 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(tmp)
: "r"(p), "r"(~mask)
: "cc", "memory");
}
static inline void change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long tmp;
__asm__ __volatile__(
"1: l.lwa %0,0(%1) \n"
" l.xor %0,%0,%2 \n"
" l.swa 0(%1),%0 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(tmp)
: "r"(p), "r"(mask)
: "cc", "memory");
}
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long tmp;
__asm__ __volatile__(
"1: l.lwa %0,0(%2) \n"
" l.or %1,%0,%3 \n"
" l.swa 0(%2),%1 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(old), "=&r"(tmp)
: "r"(p), "r"(mask)
: "cc", "memory");
return (old & mask) != 0;
}
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long tmp;
__asm__ __volatile__(
"1: l.lwa %0,0(%2) \n"
" l.and %1,%0,%3 \n"
" l.swa 0(%2),%1 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(old), "=&r"(tmp)
: "r"(p), "r"(~mask)
: "cc", "memory");
return (old & mask) != 0;
}
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long tmp;
__asm__ __volatile__(
"1: l.lwa %0,0(%2) \n"
" l.xor %1,%0,%3 \n"
" l.swa 0(%2),%1 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(old), "=&r"(tmp)
: "r"(p), "r"(mask)
: "cc", "memory");
return (old & mask) != 0;
}
#endif /* __ASM_OPENRISC_BITOPS_ATOMIC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment