Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
ef146c9b
Commit
ef146c9b
authored
Feb 13, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://kernel.bkbits.net/davem/sparc-2.6
into home.osdl.org:/home/torvalds/v2.5/linux
parents
8a81d818
5d869168
Changes
15
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
231 additions
and
209 deletions
+231
-209
arch/sparc/kernel/irq.c
arch/sparc/kernel/irq.c
+60
-0
arch/sparc/kernel/process.c
arch/sparc/kernel/process.c
+5
-4
arch/sparc/kernel/semaphore.c
arch/sparc/kernel/semaphore.c
+4
-4
arch/sparc/kernel/smp.c
arch/sparc/kernel/smp.c
+3
-0
arch/sparc/kernel/sparc_ksyms.c
arch/sparc/kernel/sparc_ksyms.c
+4
-8
arch/sparc/lib/atomic.S
arch/sparc/lib/atomic.S
+10
-10
include/asm-generic/local.h
include/asm-generic/local.h
+1
-1
include/asm-i386/atomic.h
include/asm-i386/atomic.h
+10
-20
include/asm-mips/atomic.h
include/asm-mips/atomic.h
+12
-24
include/asm-sparc/atomic.h
include/asm-sparc/atomic.h
+79
-18
include/asm-sparc/dma-mapping.h
include/asm-sparc/dma-mapping.h
+21
-1
include/asm-sparc/processor.h
include/asm-sparc/processor.h
+0
-1
include/asm-sparc/semaphore.h
include/asm-sparc/semaphore.h
+6
-6
include/asm-sparc/system.h
include/asm-sparc/system.h
+6
-92
include/asm-x86_64/atomic.h
include/asm-x86_64/atomic.h
+10
-20
No files found.
arch/sparc/kernel/irq.c
View file @
ef146c9b
...
...
@@ -52,6 +52,66 @@
/* Used to protect the IRQ action lists */
spinlock_t
irq_action_lock
=
SPIN_LOCK_UNLOCKED
;
#ifdef CONFIG_SMP
#define SMP_NOP2 "nop; nop;\n\t"
#define SMP_NOP3 "nop; nop; nop;\n\t"
#else
#define SMP_NOP2
#define SMP_NOP3
#endif
/* SMP */
unsigned
long
__local_irq_save
(
void
)
{
unsigned
long
retval
;
unsigned
long
tmp
;
__asm__
__volatile__
(
"rd %%psr, %0
\n\t
"
SMP_NOP3
/* Sun4m + Cypress + SMP bug */
"or %0, %2, %1
\n\t
"
"wr %1, 0, %%psr
\n\t
"
"nop; nop; nop
\n
"
:
"=&r"
(
retval
),
"=r"
(
tmp
)
:
"i"
(
PSR_PIL
)
:
"memory"
);
return
retval
;
}
void
local_irq_enable
(
void
)
{
unsigned
long
tmp
;
__asm__
__volatile__
(
"rd %%psr, %0
\n\t
"
SMP_NOP3
/* Sun4m + Cypress + SMP bug */
"andn %0, %1, %0
\n\t
"
"wr %0, 0, %%psr
\n\t
"
"nop; nop; nop
\n
"
:
"=&r"
(
tmp
)
:
"i"
(
PSR_PIL
)
:
"memory"
);
}
void
local_irq_restore
(
unsigned
long
old_psr
)
{
unsigned
long
tmp
;
__asm__
__volatile__
(
"rd %%psr, %0
\n\t
"
"and %2, %1, %2
\n\t
"
SMP_NOP2
/* Sun4m + Cypress + SMP bug */
"andn %0, %1, %0
\n\t
"
"wr %0, %2, %%psr
\n\t
"
"nop; nop; nop
\n
"
:
"=&r"
(
tmp
)
:
"i"
(
PSR_PIL
),
"r"
(
old_psr
)
:
"memory"
);
}
EXPORT_SYMBOL
(
__local_irq_save
);
EXPORT_SYMBOL
(
local_irq_enable
);
EXPORT_SYMBOL
(
local_irq_restore
);
/*
* Dave Redman (djhr@tadpole.co.uk)
*
...
...
arch/sparc/kernel/process.c
View file @
ef146c9b
...
...
@@ -148,11 +148,12 @@ extern char reboot_command [];
extern
void
(
*
prom_palette
)(
int
);
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
void
machine_halt
(
void
)
{
sti
();
local_irq_enable
();
mdelay
(
8
);
cli
();
local_irq_disable
();
if
(
!
serial_console
&&
prom_palette
)
prom_palette
(
1
);
prom_halt
();
...
...
@@ -165,9 +166,9 @@ void machine_restart(char * cmd)
{
char
*
p
;
sti
();
local_irq_enable
();
mdelay
(
8
);
cli
();
local_irq_disable
();
p
=
strchr
(
reboot_command
,
'\n'
);
if
(
p
)
*
p
=
0
;
...
...
arch/sparc/kernel/semaphore.c
View file @
ef146c9b
...
...
@@ -61,7 +61,7 @@ void __down(struct semaphore * sem)
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
*/
if
(
!
atomic_add_negative
(
sleepers
-
1
,
&
sem
->
count
))
{
if
(
!
atomic
24
_add_negative
(
sleepers
-
1
,
&
sem
->
count
))
{
sem
->
sleepers
=
0
;
break
;
}
...
...
@@ -101,7 +101,7 @@ int __down_interruptible(struct semaphore * sem)
if
(
signal_pending
(
current
))
{
retval
=
-
EINTR
;
sem
->
sleepers
=
0
;
atomic_add
(
sleepers
,
&
sem
->
count
);
atomic
24
_add
(
sleepers
,
&
sem
->
count
);
break
;
}
...
...
@@ -111,7 +111,7 @@ int __down_interruptible(struct semaphore * sem)
* "-1" is because we're still hoping to get
* the lock.
*/
if
(
!
atomic_add_negative
(
sleepers
-
1
,
&
sem
->
count
))
{
if
(
!
atomic
24
_add_negative
(
sleepers
-
1
,
&
sem
->
count
))
{
sem
->
sleepers
=
0
;
break
;
}
...
...
@@ -146,7 +146,7 @@ int __down_trylock(struct semaphore * sem)
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock.
*/
if
(
!
atomic_add_negative
(
sleepers
,
&
sem
->
count
))
if
(
!
atomic
24
_add_negative
(
sleepers
,
&
sem
->
count
))
wake_up
(
&
sem
->
wait
);
spin_unlock_irqrestore
(
&
semaphore_lock
,
flags
);
...
...
arch/sparc/kernel/smp.c
View file @
ef146c9b
...
...
@@ -56,6 +56,9 @@ int smp_activated = 0;
volatile
int
__cpu_number_map
[
NR_CPUS
];
volatile
int
__cpu_logical_map
[
NR_CPUS
];
cycles_t
cacheflush_time
=
0
;
/* XXX */
spinlock_t
__atomic_hash
[
ATOMIC_HASH_SIZE
]
=
{
[
0
...
(
ATOMIC_HASH_SIZE
-
1
)]
=
SPIN_LOCK_UNLOCKED
};
/* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
...
...
arch/sparc/kernel/sparc_ksyms.c
View file @
ef146c9b
...
...
@@ -86,8 +86,8 @@ extern int __divdi3(int, int);
extern
void
dump_thread
(
struct
pt_regs
*
,
struct
user
*
);
/* Private functions with odd calling conventions. */
extern
void
___atomic_add
(
void
);
extern
void
___atomic_sub
(
void
);
extern
void
___atomic
24
_add
(
void
);
extern
void
___atomic
24
_sub
(
void
);
extern
void
___set_bit
(
void
);
extern
void
___clear_bit
(
void
);
extern
void
___change_bit
(
void
);
...
...
@@ -147,8 +147,8 @@ EXPORT_SYMBOL(sparc_valid_addr_bitmap);
EXPORT_SYMBOL
(
phys_base
);
/* Atomic operations. */
EXPORT_SYMBOL
(
___atomic_add
);
EXPORT_SYMBOL
(
___atomic_sub
);
EXPORT_SYMBOL
(
___atomic
24
_add
);
EXPORT_SYMBOL
(
___atomic
24
_sub
);
/* Bit operations. */
EXPORT_SYMBOL
(
___set_bit
);
...
...
@@ -159,10 +159,6 @@ EXPORT_SYMBOL(___change_bit);
/* IRQ implementation. */
EXPORT_SYMBOL
(
global_irq_holder
);
EXPORT_SYMBOL
(
synchronize_irq
);
EXPORT_SYMBOL
(
__global_cli
);
EXPORT_SYMBOL
(
__global_sti
);
EXPORT_SYMBOL
(
__global_save_flags
);
EXPORT_SYMBOL
(
__global_restore_flags
);
/* Misc SMP information */
EXPORT_SYMBOL
(
__cpu_number_map
);
...
...
arch/sparc/lib/atomic.S
View file @
ef146c9b
...
...
@@ -45,8 +45,8 @@ ___xchg32_sun4md:
/
*
Read
asm
-
sparc
/
atomic
.
h
carefully
to
understand
how
this
works
for
SMP
.
*
Really
,
some
things
here
for
SMP
are
overly
clever
,
go
read
the
header
.
*/
.
globl
___atomic_add
___atomic_add
:
.
globl
___atomic
24
_add
___atomic
24
_add
:
rd
%
psr
,
%
g3
!
Keep
the
code
small
,
old
way
was
stupid
nop
; nop; nop; ! Let the bits set
or
%
g3
,
PSR_PIL
,
%
g7
!
Disable
interrupts
...
...
@@ -56,13 +56,13 @@ ___atomic_add:
1
:
ldstub
[%
g1
+
3
],
%
g7
!
Spin
on
the
byte
lock
for
SMP
.
orcc
%
g7
,
0x0
,
%
g0
!
Did
we
get
it
?
bne
1
b
!
Nope
...
ld
[%
g1
],
%
g7
!
Load
locked
atomic_t
ld
[%
g1
],
%
g7
!
Load
locked
atomic
24
_t
sra
%
g7
,
8
,
%
g7
!
Get
signed
24
-
bit
integer
add
%
g7
,
%
g2
,
%
g2
!
Add
in
argument
sll
%
g2
,
8
,
%
g7
!
Transpose
back
to
atomic_t
sll
%
g2
,
8
,
%
g7
!
Transpose
back
to
atomic
24
_t
st
%
g7
,
[%
g1
]
!
Clever
:
This
releases
the
lock
as
well
.
#else
ld
[%
g1
],
%
g7
!
Load
locked
atomic_t
ld
[%
g1
],
%
g7
!
Load
locked
atomic
24
_t
add
%
g7
,
%
g2
,
%
g2
!
Add
in
argument
st
%
g2
,
[%
g1
]
!
Store
it
back
#endif
...
...
@@ -71,8 +71,8 @@ ___atomic_add:
jmpl
%
o7
,
%
g0
!
NOTE
:
not
+
8
,
see
callers
in
atomic
.
h
mov
%
g4
,
%
o7
!
Restore
%
o7
.
globl
___atomic_sub
___atomic_sub
:
.
globl
___atomic
24
_sub
___atomic
24
_sub
:
rd
%
psr
,
%
g3
!
Keep
the
code
small
,
old
way
was
stupid
nop
; nop; nop; ! Let the bits set
or
%
g3
,
PSR_PIL
,
%
g7
!
Disable
interrupts
...
...
@@ -82,13 +82,13 @@ ___atomic_sub:
1
:
ldstub
[%
g1
+
3
],
%
g7
!
Spin
on
the
byte
lock
for
SMP
.
orcc
%
g7
,
0x0
,
%
g0
!
Did
we
get
it
?
bne
1
b
!
Nope
...
ld
[%
g1
],
%
g7
!
Load
locked
atomic_t
ld
[%
g1
],
%
g7
!
Load
locked
atomic
24
_t
sra
%
g7
,
8
,
%
g7
!
Get
signed
24
-
bit
integer
sub
%
g7
,
%
g2
,
%
g2
!
Subtract
argument
sll
%
g2
,
8
,
%
g7
!
Transpose
back
to
atomic_t
sll
%
g2
,
8
,
%
g7
!
Transpose
back
to
atomic
24
_t
st
%
g7
,
[%
g1
]
!
Clever
:
This
releases
the
lock
as
well
#else
ld
[%
g1
],
%
g7
!
Load
locked
atomic_t
ld
[%
g1
],
%
g7
!
Load
locked
atomic
24
_t
sub
%
g7
,
%
g2
,
%
g2
!
Subtract
argument
st
%
g2
,
[%
g1
]
!
Store
it
back
#endif
...
...
include/asm-generic/local.h
View file @
ef146c9b
...
...
@@ -9,7 +9,7 @@
/* An unsigned long type for operations which are atomic for a single
* CPU. Usually used in combination with per-cpu variables. */
#if BITS_PER_LONG == 32
&& !defined(CONFIG_SPARC32)
#if BITS_PER_LONG == 32
/* Implement in terms of atomics. */
/* Don't use typedef: don't want them to be mixed with atomic_t's. */
...
...
include/asm-i386/atomic.h
View file @
ef146c9b
...
...
@@ -27,8 +27,7 @@ typedef struct { volatile int counter; } atomic_t;
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
...
...
@@ -37,8 +36,7 @@ typedef struct { volatile int counter; } atomic_t;
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v,i) (((v)->counter) = (i))
...
...
@@ -47,8 +45,7 @@ typedef struct { volatile int counter; } atomic_t;
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v. Note that the guaranteed useful range
* of an atomic_t is only 24 bits.
* Atomically adds @i to @v.
*/
static
__inline__
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
...
...
@@ -63,8 +60,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically subtracts @i from @v.
*/
static
__inline__
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
...
...
@@ -81,8 +77,7 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* other cases.
*/
static
__inline__
int
atomic_sub_and_test
(
int
i
,
atomic_t
*
v
)
{
...
...
@@ -99,8 +94,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically increments @v by 1.
*/
static
__inline__
void
atomic_inc
(
atomic_t
*
v
)
{
...
...
@@ -114,8 +108,7 @@ static __inline__ void atomic_inc(atomic_t *v)
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically decrements @v by 1.
*/
static
__inline__
void
atomic_dec
(
atomic_t
*
v
)
{
...
...
@@ -131,8 +124,7 @@ static __inline__ void atomic_dec(atomic_t *v)
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* cases.
*/
static
__inline__
int
atomic_dec_and_test
(
atomic_t
*
v
)
{
...
...
@@ -151,8 +143,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* other cases.
*/
static
__inline__
int
atomic_inc_and_test
(
atomic_t
*
v
)
{
...
...
@@ -172,8 +163,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* result is greater than or equal to zero.
*/
static
__inline__
int
atomic_add_negative
(
int
i
,
atomic_t
*
v
)
{
...
...
include/asm-mips/atomic.h
View file @
ef146c9b
...
...
@@ -29,8 +29,7 @@ typedef struct { volatile __s64 counter; } atomic64_t;
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
...
...
@@ -46,8 +45,7 @@ typedef struct { volatile __s64 counter; } atomic64_t;
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v,i) ((v)->counter = (i))
...
...
@@ -68,8 +66,7 @@ typedef struct { volatile __s64 counter; } atomic64_t;
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v. Note that the guaranteed useful range
* of an atomic_t is only 24 bits.
* Atomically adds @i to @v.
*/
static
__inline__
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
...
...
@@ -85,8 +82,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically subtracts @i from @v.
*/
static
__inline__
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
...
...
@@ -137,8 +133,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v. Note that the guaranteed useful range
* of an atomic_t is only 24 bits.
* Atomically adds @i to @v.
*/
static
__inline__
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
...
...
@@ -158,8 +153,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically subtracts @i from @v.
*/
static
__inline__
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
...
...
@@ -390,8 +384,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* other cases.
*/
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
...
...
@@ -412,8 +405,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
...
...
@@ -433,8 +425,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* cases.
*/
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
...
...
@@ -452,8 +443,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically increments @v by 1.
*/
#define atomic_inc(v) atomic_add(1,(v))
...
...
@@ -469,8 +459,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
* atomic_dec - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically decrements @v by 1.
*/
#define atomic_dec(v) atomic_sub(1,(v))
...
...
@@ -489,8 +478,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* result is greater than or equal to zero.
*/
#define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0)
...
...
include/asm-sparc/atomic.h
View file @
ef146c9b
...
...
@@ -2,21 +2,82 @@
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
#include <linux/config.h>
#include <linux/spinlock.h>
typedef
struct
{
volatile
int
counter
;
}
atomic_t
;
#ifdef __KERNEL__
#ifndef CONFIG_SMP
#ifdef CONFIG_SMP
#define ATOMIC_HASH_SIZE 4
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
extern
spinlock_t
__atomic_hash
[
ATOMIC_HASH_SIZE
];
#else
/* SMP */
#define ATOMIC_HASH_SIZE 1
#define ATOMIC_HASH(a) 0
#endif
/* SMP */
static
inline
int
__atomic_add_return
(
int
i
,
atomic_t
*
v
)
{
int
ret
;
unsigned
long
flags
;
spin_lock_irqsave
(
ATOMIC_HASH
(
v
),
flags
);
ret
=
(
v
->
counter
+=
i
);
spin_unlock_irqrestore
(
ATOMIC_HASH
(
v
),
flags
);
return
ret
;
}
static
inline
void
atomic_set
(
atomic_t
*
v
,
int
i
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
ATOMIC_HASH
(
v
),
flags
);
v
->
counter
=
i
;
spin_unlock_irqrestore
(
ATOMIC_HASH
(
v
),
flags
);
}
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
/* This is the old 24-bit implementation. It's still used internally
* by some sparc-specific code, notably the semaphore implementation.
*/
typedef
struct
{
volatile
int
counter
;
}
atomic24_t
;
#ifndef CONFIG_SMP
#define ATOMIC24_INIT(i) { (i) }
#define atomic24_read(v) ((v)->counter)
#define atomic24_set(v, i) (((v)->counter) = i)
#else
/* We do the bulk of the actual work out of line in two common
...
...
@@ -33,9 +94,9 @@ typedef struct { volatile int counter; } atomic_t;
* 31 8 7 0
*/
#define ATOMIC_INIT(i) { ((i) << 8) }
#define ATOMIC
24
_INIT(i) { ((i) << 8) }
static
__inline__
int
atomic_read
(
const
atomic
_t
*
v
)
static
inline
int
atomic24_read
(
const
atomic24
_t
*
v
)
{
int
ret
=
v
->
counter
;
...
...
@@ -45,10 +106,10 @@ static __inline__ int atomic_read(const atomic_t *v)
return
ret
>>
8
;
}
#define atomic_set(v, i) (((v)->counter) = ((i) << 8))
#define atomic
24
_set(v, i) (((v)->counter) = ((i) << 8))
#endif
static
inline
int
__atomic
_add
(
int
i
,
atomic
_t
*
v
)
static
inline
int
__atomic
24_add
(
int
i
,
atomic24
_t
*
v
)
{
register
volatile
int
*
ptr
asm
(
"g1"
);
register
int
increment
asm
(
"g2"
);
...
...
@@ -61,7 +122,7 @@ static inline int __atomic_add(int i, atomic_t *v)
__asm__
__volatile__
(
"mov %%o7, %%g4
\n\t
"
"call ___atomic_add
\n\t
"
"call ___atomic
24
_add
\n\t
"
" add %%o7, 8, %%o7
\n
"
:
"=&r"
(
increment
),
"=r"
(
tmp1
),
"=r"
(
tmp2
),
"=r"
(
tmp3
)
:
"0"
(
increment
),
"r"
(
ptr
)
...
...
@@ -70,7 +131,7 @@ static inline int __atomic_add(int i, atomic_t *v)
return
increment
;
}
static
inline
int
__atomic
_sub
(
int
i
,
atomic
_t
*
v
)
static
inline
int
__atomic
24_sub
(
int
i
,
atomic24
_t
*
v
)
{
register
volatile
int
*
ptr
asm
(
"g1"
);
register
int
increment
asm
(
"g2"
);
...
...
@@ -83,7 +144,7 @@ static inline int __atomic_sub(int i, atomic_t *v)
__asm__
__volatile__
(
"mov %%o7, %%g4
\n\t
"
"call ___atomic_sub
\n\t
"
"call ___atomic
24
_sub
\n\t
"
" add %%o7, 8, %%o7
\n
"
:
"=&r"
(
increment
),
"=r"
(
tmp1
),
"=r"
(
tmp2
),
"=r"
(
tmp3
)
:
"0"
(
increment
),
"r"
(
ptr
)
...
...
@@ -92,19 +153,19 @@ static inline int __atomic_sub(int i, atomic_t *v)
return
increment
;
}
#define atomic
_add(i, v) ((void)__atomic
_add((i), (v)))
#define atomic
_sub(i, v) ((void)__atomic
_sub((i), (v)))
#define atomic
24_add(i, v) ((void)__atomic24
_add((i), (v)))
#define atomic
24_sub(i, v) ((void)__atomic24
_sub((i), (v)))
#define atomic
_dec_return(v) __atomic
_sub(1, (v))
#define atomic
_inc_return(v) __atomic
_add(1, (v))
#define atomic
24_dec_return(v) __atomic24
_sub(1, (v))
#define atomic
24_inc_return(v) __atomic24
_add(1, (v))
#define atomic
_sub_and_test(i, v) (__atomic
_sub((i), (v)) == 0)
#define atomic
_dec_and_test(v) (__atomic
_sub(1, (v)) == 0)
#define atomic
24_sub_and_test(i, v) (__atomic24
_sub((i), (v)) == 0)
#define atomic
24_dec_and_test(v) (__atomic24
_sub(1, (v)) == 0)
#define atomic
_inc(v) ((void)__atomic
_add(1, (v)))
#define atomic
_dec(v) ((void)__atomic
_sub(1, (v)))
#define atomic
24_inc(v) ((void)__atomic24
_add(1, (v)))
#define atomic
24_dec(v) ((void)__atomic24
_sub(1, (v)))
#define atomic
_add_negative(i, v) (__atomic
_add((i), (v)) < 0)
#define atomic
24_add_negative(i, v) (__atomic24
_add((i), (v)) < 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
...
...
include/asm-sparc/dma-mapping.h
View file @
ef146c9b
#ifndef _ASM_SPARC_DMA_MAPPING_H
#define _ASM_SPARC_DMA_MAPPING_H
#include <linux/config.h>
#ifdef CONFIG_PCI
#include <asm-generic/dma-mapping.h>
#endif
#else
static
inline
void
*
dma_alloc_coherent
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
dma_handle
,
int
flag
)
{
BUG
();
return
NULL
;
}
static
inline
void
dma_free_coherent
(
struct
device
*
dev
,
size_t
size
,
void
*
vaddr
,
dma_addr_t
dma_handle
)
{
BUG
();
}
#endif
/* PCI */
#endif
/* _ASM_SPARC_DMA_MAPPING_H */
include/asm-sparc/processor.h
View file @
ef146c9b
...
...
@@ -22,7 +22,6 @@
#include <asm/segment.h>
#include <asm/btfixup.h>
#include <asm/page.h>
#include <asm/atomic.h>
/*
* Bus types
...
...
include/asm-sparc/semaphore.h
View file @
ef146c9b
...
...
@@ -10,7 +10,7 @@
#include <linux/rwsem.h>
struct
semaphore
{
atomic_t
count
;
atomic
24
_t
count
;
int
sleepers
;
wait_queue_head_t
wait
;
#if WAITQUEUE_DEBUG
...
...
@@ -40,7 +40,7 @@ struct semaphore {
static
inline
void
sema_init
(
struct
semaphore
*
sem
,
int
val
)
{
atomic_set
(
&
sem
->
count
,
val
);
atomic
24
_set
(
&
sem
->
count
,
val
);
sem
->
sleepers
=
0
;
init_waitqueue_head
(
&
sem
->
wait
);
#if WAITQUEUE_DEBUG
...
...
@@ -78,7 +78,7 @@ static inline void down(struct semaphore * sem)
__asm__
__volatile__
(
"mov %%o7, %%g4
\n\t
"
"call ___atomic_sub
\n\t
"
"call ___atomic
24
_sub
\n\t
"
" add %%o7, 8, %%o7
\n\t
"
"tst %%g2
\n\t
"
"bl 2f
\n\t
"
...
...
@@ -115,7 +115,7 @@ static inline int down_interruptible(struct semaphore * sem)
__asm__
__volatile__
(
"mov %%o7, %%g4
\n\t
"
"call ___atomic_sub
\n\t
"
"call ___atomic
24
_sub
\n\t
"
" add %%o7, 8, %%o7
\n\t
"
"tst %%g2
\n\t
"
"bl 2f
\n\t
"
...
...
@@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem)
__asm__
__volatile__
(
"mov %%o7, %%g4
\n\t
"
"call ___atomic_sub
\n\t
"
"call ___atomic
24
_sub
\n\t
"
" add %%o7, 8, %%o7
\n\t
"
"tst %%g2
\n\t
"
"bl 2f
\n\t
"
...
...
@@ -193,7 +193,7 @@ static inline void up(struct semaphore * sem)
__asm__
__volatile__
(
"mov %%o7, %%g4
\n\t
"
"call ___atomic_add
\n\t
"
"call ___atomic
24
_add
\n\t
"
" add %%o7, 8, %%o7
\n\t
"
"tst %%g2
\n\t
"
"ble 2f
\n\t
"
...
...
include/asm-sparc/system.h
View file @
ef146c9b
...
...
@@ -171,32 +171,11 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
/*
* Changing the IRQ level on the Sparc.
*/
extern
__inline__
void
setipl
(
unsigned
long
__orig_psr
)
{
__asm__
__volatile__
(
"wr %0, 0x0, %%psr
\n\t
"
"nop; nop; nop
\n
"
:
/* no outputs */
:
"r"
(
__orig_psr
)
:
"memory"
,
"cc"
);
}
extern
void
local_irq_restore
(
unsigned
long
);
extern
unsigned
long
__local_irq_save
(
void
);
extern
void
local_irq_enable
(
void
);
extern
__inline__
void
local_irq_enable
(
void
)
{
unsigned
long
tmp
;
__asm__
__volatile__
(
"rd %%psr, %0
\n\t
"
"nop; nop; nop;
\n\t
"
/* Sun4m + Cypress + SMP bug */
"andn %0, %1, %0
\n\t
"
"wr %0, 0x0, %%psr
\n\t
"
"nop; nop; nop
\n
"
:
"=r"
(
tmp
)
:
"i"
(
PSR_PIL
)
:
"memory"
);
}
extern
__inline__
unsigned
long
getipl
(
void
)
static
inline
unsigned
long
getipl
(
void
)
{
unsigned
long
retval
;
...
...
@@ -204,76 +183,11 @@ extern __inline__ unsigned long getipl(void)
return
retval
;
}
#if 0 /* not used */
extern __inline__ unsigned long swap_pil(unsigned long __new_psr)
{
unsigned long retval;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */
"and %0, %2, %%g1\n\t"
"and %1, %2, %%g2\n\t"
"xorcc %%g1, %%g2, %%g0\n\t"
"be 1f\n\t"
" nop\n\t"
"wr %0, %2, %%psr\n\t"
"nop; nop; nop;\n"
"1:\n"
: "=&r" (retval)
: "r" (__new_psr), "i" (PSR_PIL)
: "g1", "g2", "memory", "cc");
return retval;
}
#endif
extern
__inline__
unsigned
long
read_psr_and_cli
(
void
)
{
unsigned
long
retval
;
__asm__
__volatile__
(
"rd %%psr, %0
\n\t
"
"nop; nop; nop;
\n\t
"
/* Sun4m + Cypress + SMP bug */
"or %0, %1, %%g1
\n\t
"
"wr %%g1, 0x0, %%psr
\n\t
"
"nop; nop; nop
\n\t
"
:
"=r"
(
retval
)
:
"i"
(
PSR_PIL
)
:
"g1"
,
"memory"
);
return
retval
;
}
#define local_save_flags(flags) ((flags) = getipl())
#define local_irq_save(flags) ((flags) = read_psr_and_cli())
#define local_irq_restore(flags) setipl((flags))
#define local_irq_disable() ((void) read_psr_and_cli())
#define local_irq_save(flags) ((flags) = __local_irq_save())
#define local_irq_disable() ((void) __local_irq_save())
#define irqs_disabled() ((getipl() & PSR_PIL) != 0)
#ifdef CONFIG_SMP
extern
unsigned
char
global_irq_holder
;
#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
extern
void
__global_cli
(
void
);
extern
void
__global_sti
(
void
);
extern
unsigned
long
__global_save_flags
(
void
);
extern
void
__global_restore_flags
(
unsigned
long
flags
);
#define cli() __global_cli()
#define sti() __global_sti()
#define save_flags(flags) ((flags)=__global_save_flags())
#define restore_flags(flags) __global_restore_flags(flags)
#else
#define cli() local_irq_disable()
#define sti() local_irq_enable()
#endif
/* XXX Change this if we ever use a PSO mode kernel. */
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb()
...
...
include/asm-x86_64/atomic.h
View file @
ef146c9b
...
...
@@ -29,8 +29,7 @@ typedef struct { volatile int counter; } atomic_t;
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
...
...
@@ -39,8 +38,7 @@ typedef struct { volatile int counter; } atomic_t;
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v,i) (((v)->counter) = (i))
...
...
@@ -49,8 +47,7 @@ typedef struct { volatile int counter; } atomic_t;
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v. Note that the guaranteed useful range
* of an atomic_t is only 24 bits.
* Atomically adds @i to @v.
*/
static
__inline__
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
...
...
@@ -65,8 +62,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically subtracts @i from @v.
*/
static
__inline__
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
...
...
@@ -83,8 +79,7 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* other cases.
*/
static
__inline__
int
atomic_sub_and_test
(
int
i
,
atomic_t
*
v
)
{
...
...
@@ -101,8 +96,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically increments @v by 1.
*/
static
__inline__
void
atomic_inc
(
atomic_t
*
v
)
{
...
...
@@ -116,8 +110,7 @@ static __inline__ void atomic_inc(atomic_t *v)
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* Atomically decrements @v by 1.
*/
static
__inline__
void
atomic_dec
(
atomic_t
*
v
)
{
...
...
@@ -133,8 +126,7 @@ static __inline__ void atomic_dec(atomic_t *v)
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* cases.
*/
static
__inline__
int
atomic_dec_and_test
(
atomic_t
*
v
)
{
...
...
@@ -153,8 +145,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* other cases.
*/
static
__inline__
int
atomic_inc_and_test
(
atomic_t
*
v
)
{
...
...
@@ -174,8 +165,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
* result is greater than or equal to zero.
*/
static
__inline__
int
atomic_add_negative
(
int
i
,
atomic_t
*
v
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment