Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
8184c812
Commit
8184c812
authored
Jul 21, 2003
by
Richard Henderson
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[ALPHA] Add atomic64_t.
parent
4ded3272
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
123 additions
and
2 deletions
+123
-2
include/asm-alpha/atomic.h
include/asm-alpha/atomic.h
+83
-2
include/asm-alpha/local.h
include/asm-alpha/local.h
+40
-0
No files found.
include/asm-alpha/atomic.h
View file @
8184c812
...
...
@@ -16,11 +16,16 @@
* the user gave us, not some alias that contains the same information.
*/
typedef
struct
{
volatile
int
counter
;
}
atomic_t
;
typedef
struct
{
volatile
long
counter
;
}
atomic64_t
;
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
#define atomic_read(v) ((v)->counter + 0)
#define atomic64_read(v) ((v)->counter + 0)
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#define atomic64_set(v,i) ((v)->counter = (i))
/*
* To get proper branch prediction for the main line, we must branch
...
...
@@ -43,6 +48,21 @@ static __inline__ void atomic_add(int i, atomic_t * v)
:
"Ir"
(
i
),
"m"
(
v
->
counter
));
}
static
__inline__
void
atomic64_add
(
long
i
,
atomic64_t
*
v
)
{
unsigned
long
temp
;
__asm__
__volatile__
(
"1: ldq_l %0,%1
\n
"
" addq %0,%2,%0
\n
"
" stq_c %0,%1
\n
"
" beq %0,2f
\n
"
".subsection 2
\n
"
"2: br 1b
\n
"
".previous"
:
"=&r"
(
temp
),
"=m"
(
v
->
counter
)
:
"Ir"
(
i
),
"m"
(
v
->
counter
));
}
static
__inline__
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
unsigned
long
temp
;
...
...
@@ -58,6 +78,22 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
:
"Ir"
(
i
),
"m"
(
v
->
counter
));
}
static
__inline__
void
atomic64_sub
(
long
i
,
atomic64_t
*
v
)
{
unsigned
long
temp
;
__asm__
__volatile__
(
"1: ldq_l %0,%1
\n
"
" subq %0,%2,%0
\n
"
" stq_c %0,%1
\n
"
" beq %0,2f
\n
"
".subsection 2
\n
"
"2: br 1b
\n
"
".previous"
:
"=&r"
(
temp
),
"=m"
(
v
->
counter
)
:
"Ir"
(
i
),
"m"
(
v
->
counter
));
}
/*
* Same as above, but return the result value
*/
...
...
@@ -79,6 +115,24 @@ static __inline__ long atomic_add_return(int i, atomic_t * v)
return
result
;
}
static
__inline__
long
atomic64_add_return
(
long
i
,
atomic64_t
*
v
)
{
long
temp
,
result
;
__asm__
__volatile__
(
"1: ldq_l %0,%1
\n
"
" addq %0,%3,%2
\n
"
" addq %0,%3,%0
\n
"
" stq_c %0,%1
\n
"
" beq %0,2f
\n
"
" mb
\n
"
".subsection 2
\n
"
"2: br 1b
\n
"
".previous"
:
"=&r"
(
temp
),
"=m"
(
v
->
counter
),
"=&r"
(
result
)
:
"Ir"
(
i
),
"m"
(
v
->
counter
)
:
"memory"
);
return
result
;
}
static
__inline__
long
atomic_sub_return
(
int
i
,
atomic_t
*
v
)
{
long
temp
,
result
;
...
...
@@ -97,14 +151,41 @@ static __inline__ long atomic_sub_return(int i, atomic_t * v)
return
result
;
}
static
__inline__
long
atomic64_sub_return
(
long
i
,
atomic64_t
*
v
)
{
long
temp
,
result
;
__asm__
__volatile__
(
"1: ldq_l %0,%1
\n
"
" subq %0,%3,%2
\n
"
" subq %0,%3,%0
\n
"
" stq_c %0,%1
\n
"
" beq %0,2f
\n
"
" mb
\n
"
".subsection 2
\n
"
"2: br 1b
\n
"
".previous"
:
"=&r"
(
temp
),
"=m"
(
v
->
counter
),
"=&r"
(
result
)
:
"Ir"
(
i
),
"m"
(
v
->
counter
)
:
"memory"
);
return
result
;
}
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
#define atomic64_inc_return(v) atomic64_add_return(1,(v))
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
#define atomic_inc(v) atomic_add(1,(v))
#define atomic64_inc(v) atomic64_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
#define atomic64_dec(v) atomic64_sub(1,(v))
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
...
...
include/asm-alpha/local.h
0 → 100644
View file @
8184c812
#ifndef _ARCH_SPARC64_LOCAL_H
#define _ARCH_SPARC64_LOCAL_H
#include <linux/percpu.h>
#include <asm/atomic.h>
typedef
atomic64_t
local_t
;
#define LOCAL_INIT(i) ATOMIC64_INIT(i)
#define local_read(v) atomic64_read(v)
#define local_set(v,i) atomic64_set(v,i)
#define local_inc(v) atomic64_inc(v)
#define local_dec(v) atomic64_inc(v)
#define local_add(i, v) atomic64_add(i, v)
#define local_sub(i, v) atomic64_sub(i, v)
#define __local_inc(v) ((v)->counter++)
#define __local_dec(v) ((v)->counter++)
#define __local_add(i,v) ((v)->counter+=(i))
#define __local_sub(i,v) ((v)->counter-=(i))
/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*/
#define cpu_local_read(v) local_read(&__get_cpu_var(v))
#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
#endif
/* _ARCH_SPARC64_LOCAL_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment