Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
dcf82962
Commit
dcf82962
authored
Aug 29, 2005
by
Tony Luck
Browse files
Options
Browse Files
Download
Plain Diff
Pull lameter-spinlock-optimization into release branch
parents
02b3e4e2
f5210891
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
24 additions
and
9 deletions
+24
-9
include/asm-ia64/spinlock.h
include/asm-ia64/spinlock.h
+24
-9
No files found.
include/asm-ia64/spinlock.h
View file @
dcf82962
...
...
@@ -93,7 +93,15 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
# endif
/* CONFIG_MCKINLEY */
#endif
}
#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
/* Unlock by doing an ordered store and releasing the cacheline with nta */
static
inline
void
_raw_spin_unlock
(
spinlock_t
*
x
)
{
barrier
();
asm
volatile
(
"st4.rel.nta [%0] = r0
\n\t
"
::
"r"
(
x
));
}
#else
/* !ASM_SUPPORTED */
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
# define _raw_spin_lock(x) \
...
...
@@ -109,16 +117,16 @@ do { \
} while (ia64_spinlock_val); \
} \
} while (0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#endif
/* !ASM_SUPPORTED */
#define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
typedef
struct
{
volatile
unsigned
int
read_counter
:
31
;
volatile
unsigned
int
write_lock
:
1
;
volatile
unsigned
int
read_counter
:
24
;
volatile
unsigned
int
write_lock
:
8
;
#ifdef CONFIG_PREEMPT
unsigned
int
break_lock
;
#endif
...
...
@@ -174,6 +182,13 @@ do { \
(result == 0); \
})
static
inline
void
_raw_write_unlock
(
rwlock_t
*
x
)
{
u8
*
y
=
(
u8
*
)
x
;
barrier
();
asm
volatile
(
"st1.rel.nta [%0] = r0
\n\t
"
::
"r"
(
y
+
3
)
:
"memory"
);
}
#else
/* !ASM_SUPPORTED */
#define _raw_write_lock(l) \
...
...
@@ -195,14 +210,14 @@ do { \
(ia64_val == 0); \
})
static
inline
void
_raw_write_unlock
(
rwlock_t
*
x
)
{
barrier
();
x
->
write_lock
=
0
;
}
#endif
/* !ASM_SUPPORTED */
#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
#define _raw_write_unlock(x) \
({ \
smp_mb__before_clear_bit();
/* need barrier before releasing lock... */
\
clear_bit(31, (x)); \
})
#endif
/* _ASM_IA64_SPINLOCK_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment