Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
87dbaaab
Commit
87dbaaab
authored
Aug 29, 2005
by
Tony Luck
Browse files
Options
Browse Files
Download
Plain Diff
Pull lameter-rwsem-limit into release branch
parents
7ee175f7
16592d26
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
18 additions
and
17 deletions
+18
-17
include/asm-ia64/rwsem.h
include/asm-ia64/rwsem.h
+18
-17
No files found.
include/asm-ia64/rwsem.h
View file @
87dbaaab
...
...
@@ -3,6 +3,7 @@
*
* Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
* Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
* Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
*
* Based on asm-i386/rwsem.h and other architecture implementation.
*
...
...
@@ -11,9 +12,9 @@
*
* The lock count is initialized to 0 (no active and no waiting lockers).
*
* When a writer subtracts WRITE_BIAS, it'll get 0xffff
0001 for the case
*
of an uncontended lock. Readers increment by 1 and see a positive valu
e
* when uncontended, negative if there are writers (and maybe) readers
* When a writer subtracts WRITE_BIAS, it'll get 0xffff
ffff00000001 for
*
the case of an uncontended lock. Readers increment by 1 and see a positiv
e
*
value
when uncontended, negative if there are writers (and maybe) readers
* waiting (in which case it goes to sleep).
*/
...
...
@@ -29,7 +30,7 @@
* the semaphore definition
*/
struct
rw_semaphore
{
signed
int
count
;
signed
long
count
;
spinlock_t
wait_lock
;
struct
list_head
wait_list
;
#if RWSEM_DEBUG
...
...
@@ -37,10 +38,10 @@ struct rw_semaphore {
#endif
};
#define RWSEM_UNLOCKED_VALUE
0x00000000
#define RWSEM_ACTIVE_BIAS
0x00000001
#define RWSEM_ACTIVE_MASK
0x0000ffff
#define RWSEM_WAITING_BIAS
(-0x0001
0000)
#define RWSEM_UNLOCKED_VALUE
__IA64_UL_CONST(0x0000000000000000)
#define RWSEM_ACTIVE_BIAS
__IA64_UL_CONST(0x0000000000000001)
#define RWSEM_ACTIVE_MASK
__IA64_UL_CONST(0x00000000ffffffff)
#define RWSEM_WAITING_BIAS
-__IA64_UL_CONST(0x000000010000
0000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
...
...
@@ -83,7 +84,7 @@ init_rwsem (struct rw_semaphore *sem)
static
inline
void
__down_read
(
struct
rw_semaphore
*
sem
)
{
int
result
=
ia64_fetchadd4_acq
((
unsigned
int
*
)
&
sem
->
count
,
1
);
long
result
=
ia64_fetchadd8_acq
((
unsigned
long
*
)
&
sem
->
count
,
1
);
if
(
result
<
0
)
rwsem_down_read_failed
(
sem
);
...
...
@@ -95,7 +96,7 @@ __down_read (struct rw_semaphore *sem)
static
inline
void
__down_write
(
struct
rw_semaphore
*
sem
)
{
int
old
,
new
;
long
old
,
new
;
do
{
old
=
sem
->
count
;
...
...
@@ -112,7 +113,7 @@ __down_write (struct rw_semaphore *sem)
static
inline
void
__up_read
(
struct
rw_semaphore
*
sem
)
{
int
result
=
ia64_fetchadd4_rel
((
unsigned
int
*
)
&
sem
->
count
,
-
1
);
long
result
=
ia64_fetchadd8_rel
((
unsigned
long
*
)
&
sem
->
count
,
-
1
);
if
(
result
<
0
&&
(
--
result
&
RWSEM_ACTIVE_MASK
)
==
0
)
rwsem_wake
(
sem
);
...
...
@@ -124,7 +125,7 @@ __up_read (struct rw_semaphore *sem)
static
inline
void
__up_write
(
struct
rw_semaphore
*
sem
)
{
int
old
,
new
;
long
old
,
new
;
do
{
old
=
sem
->
count
;
...
...
@@ -141,7 +142,7 @@ __up_write (struct rw_semaphore *sem)
static
inline
int
__down_read_trylock
(
struct
rw_semaphore
*
sem
)
{
int
tmp
;
long
tmp
;
while
((
tmp
=
sem
->
count
)
>=
0
)
{
if
(
tmp
==
cmpxchg_acq
(
&
sem
->
count
,
tmp
,
tmp
+
1
))
{
return
1
;
...
...
@@ -156,7 +157,7 @@ __down_read_trylock (struct rw_semaphore *sem)
static
inline
int
__down_write_trylock
(
struct
rw_semaphore
*
sem
)
{
int
tmp
=
cmpxchg_acq
(
&
sem
->
count
,
RWSEM_UNLOCKED_VALUE
,
long
tmp
=
cmpxchg_acq
(
&
sem
->
count
,
RWSEM_UNLOCKED_VALUE
,
RWSEM_ACTIVE_WRITE_BIAS
);
return
tmp
==
RWSEM_UNLOCKED_VALUE
;
}
...
...
@@ -167,7 +168,7 @@ __down_write_trylock (struct rw_semaphore *sem)
static
inline
void
__downgrade_write
(
struct
rw_semaphore
*
sem
)
{
int
old
,
new
;
long
old
,
new
;
do
{
old
=
sem
->
count
;
...
...
@@ -182,7 +183,7 @@ __downgrade_write (struct rw_semaphore *sem)
* Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
* doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
*/
#define rwsem_atomic_add(delta, sem) atomic
_add(delta, (atomic
_t *)(&(sem)->count))
#define rwsem_atomic_update(delta, sem) atomic
_add_return(delta, (atomic
_t *)(&(sem)->count))
#define rwsem_atomic_add(delta, sem) atomic
64_add(delta, (atomic64
_t *)(&(sem)->count))
#define rwsem_atomic_update(delta, sem) atomic
64_add_return(delta, (atomic64
_t *)(&(sem)->count))
#endif
/* _ASM_IA64_RWSEM_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment