Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3d150630
Commit
3d150630
authored
Jun 13, 2009
by
Mike Frysinger
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Blackfin: convert locking primitives to asm-generic
Signed-off-by:
Mike Frysinger
<
vapier@gentoo.org
>
parent
22a151c1
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
25 additions
and
308 deletions
+25
-308
arch/blackfin/include/asm/atomic.h
arch/blackfin/include/asm/atomic.h
+7
-102
arch/blackfin/include/asm/bitops.h
arch/blackfin/include/asm/bitops.h
+9
-189
arch/blackfin/include/asm/mutex.h
arch/blackfin/include/asm/mutex.h
+1
-1
arch/blackfin/include/asm/spinlock.h
arch/blackfin/include/asm/spinlock.h
+6
-0
arch/blackfin/include/asm/swab.h
arch/blackfin/include/asm/swab.h
+1
-5
arch/blackfin/include/asm/unaligned.h
arch/blackfin/include/asm/unaligned.h
+1
-11
No files found.
arch/blackfin/include/asm/atomic.h
View file @
3d150630
#ifndef __ARCH_BLACKFIN_ATOMIC__
#define __ARCH_BLACKFIN_ATOMIC__
#ifndef CONFIG_SMP
# include <asm-generic/atomic.h>
#else
#include <linux/types.h>
#include <asm/system.h>
/* local_irq_XXX() */
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* Generally we do not concern about SMP BFIN systems, so we don't have
* to deal with that.
*
* Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001
*/
#define ATOMIC_INIT(i) { (i) }
#define atomic_set(v, i) (((v)->counter) = i)
#ifdef CONFIG_SMP
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
asmlinkage
int
__raw_uncached_fetch_asm
(
const
volatile
int
*
ptr
);
...
...
@@ -84,100 +81,6 @@ static inline int atomic_test_mask(int mask, atomic_t *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#else
/* !CONFIG_SMP */
#define atomic_read(v) ((v)->counter)
static
inline
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
+=
i
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
-=
i
;
local_irq_restore_hw
(
flags
);
}
static
inline
int
atomic_add_return
(
int
i
,
atomic_t
*
v
)
{
int
__temp
=
0
;
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
+=
i
;
__temp
=
v
->
counter
;
local_irq_restore_hw
(
flags
);
return
__temp
;
}
static
inline
int
atomic_sub_return
(
int
i
,
atomic_t
*
v
)
{
int
__temp
=
0
;
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
-=
i
;
__temp
=
v
->
counter
;
local_irq_restore_hw
(
flags
);
return
__temp
;
}
static
inline
void
atomic_inc
(
volatile
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
++
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
atomic_dec
(
volatile
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
--
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
atomic_clear_mask
(
unsigned
int
mask
,
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
&=
~
mask
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
atomic_set_mask
(
unsigned
int
mask
,
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
|=
mask
;
local_irq_restore_hw
(
flags
);
}
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif
/* !CONFIG_SMP */
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
...
...
@@ -210,4 +113,6 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
#include <asm-generic/atomic-long.h>
#endif
/* __ARCH_BLACKFIN_ATOMIC __ */
#endif
#endif
arch/blackfin/include/asm/bitops.h
View file @
3d150630
#ifndef _BLACKFIN_BITOPS_H
#define _BLACKFIN_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*/
#include <linux/compiler.h>
#include <asm/byteorder.h>
/* swab32 */
#ifdef __KERNEL__
#ifndef CONFIG_SMP
# include <asm-generic/bitops.h>
#else
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/byteorder.h>
/* swab32 */
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffz.h>
#ifdef CONFIG_SMP
#include <linux/linkage.h>
asmlinkage
int
__raw_bit_set_asm
(
volatile
unsigned
long
*
addr
,
int
nr
);
...
...
@@ -79,189 +75,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
return
__raw_bit_test_toggle_asm
(
a
,
nr
&
0x1f
);
}
#else
/* !CONFIG_SMP */
#include <asm/system.h>
/* save_flags */
static
inline
void
set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
*
a
=
(
int
*
)
addr
;
int
mask
;
unsigned
long
flags
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
local_irq_save_hw
(
flags
);
*
a
|=
mask
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
*
a
=
(
int
*
)
addr
;
int
mask
;
unsigned
long
flags
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
local_irq_save_hw
(
flags
);
*
a
&=
~
mask
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
;
unsigned
long
flags
;
unsigned
long
*
ADDR
=
(
unsigned
long
*
)
addr
;
ADDR
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
31
);
local_irq_save_hw
(
flags
);
*
ADDR
^=
mask
;
local_irq_restore_hw
(
flags
);
}
static
inline
int
test_and_set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
unsigned
long
flags
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
local_irq_save_hw
(
flags
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
|=
mask
;
local_irq_restore_hw
(
flags
);
return
retval
;
}
static
inline
int
test_and_clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
unsigned
long
flags
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
local_irq_save_hw
(
flags
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
&=
~
mask
;
local_irq_restore_hw
(
flags
);
return
retval
;
}
static
inline
int
test_and_change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
unsigned
long
flags
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
local_irq_save_hw
(
flags
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
^=
mask
;
local_irq_restore_hw
(
flags
);
return
retval
;
}
#endif
/* CONFIG_SMP */
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
static
inline
void
__set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
*
a
=
(
int
*
)
addr
;
int
mask
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
*
a
|=
mask
;
}
static
inline
void
__clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
*
a
=
(
int
*
)
addr
;
int
mask
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
*
a
&=
~
mask
;
}
static
inline
void
__change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
;
unsigned
long
*
ADDR
=
(
unsigned
long
*
)
addr
;
ADDR
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
31
);
*
ADDR
^=
mask
;
}
static
inline
int
__test_and_set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
|=
mask
;
return
retval
;
}
static
inline
int
__test_and_clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
&=
~
mask
;
return
retval
;
}
static
inline
int
__test_and_change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
^=
mask
;
return
retval
;
}
static
inline
int
__test_bit
(
int
nr
,
const
void
*
addr
)
{
int
*
a
=
(
int
*
)
addr
;
int
mask
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
return
((
mask
&
*
a
)
!=
0
);
}
#ifndef CONFIG_SMP
/*
* This routine doesn't need irq save and restore ops in UP
* context.
*/
static
inline
int
test_bit
(
int
nr
,
const
void
*
addr
)
{
return
__test_bit
(
nr
,
addr
);
}
#endif
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
...
...
@@ -272,10 +92,10 @@ static inline int test_bit(int nr, const void *addr)
#include <asm-generic/bitops/minix.h>
#endif
/* __KERNEL__ */
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#endif
/* CONFIG_SMP */
#endif
/* _BLACKFIN_BITOPS_H */
arch/blackfin/include/asm/mutex.h
View file @
3d150630
...
...
@@ -10,7 +10,7 @@
#define _ASM_MUTEX_H
#ifndef CONFIG_SMP
#include <asm-generic/mutex
-dec
.h>
#include <asm-generic/mutex.h>
#else
static
inline
void
...
...
arch/blackfin/include/asm/spinlock.h
View file @
3d150630
#ifndef __BFIN_SPINLOCK_H
#define __BFIN_SPINLOCK_H
#ifndef CONFIG_SMP
# include <asm-generic/spinlock.h>
#else
#include <asm/atomic.h>
asmlinkage
int
__raw_spin_is_locked_asm
(
volatile
int
*
ptr
);
...
...
@@ -86,4 +90,6 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#endif
#endif
/* !__BFIN_SPINLOCK_H */
arch/blackfin/include/asm/swab.h
View file @
3d150630
...
...
@@ -2,11 +2,7 @@
#define _BLACKFIN_SWAB_H
#include <linux/types.h>
#include <linux/compiler.h>
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __SWAB_64_THRU_32__
#endif
#include <asm-generic/swab.h>
#ifdef __GNUC__
...
...
arch/blackfin/include/asm/unaligned.h
View file @
3d150630
#ifndef _ASM_BLACKFIN_UNALIGNED_H
#define _ASM_BLACKFIN_UNALIGNED_H
#include <linux/unaligned/le_struct.h>
#include <linux/unaligned/be_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_le
#define put_unaligned __put_unaligned_le
#endif
/* _ASM_BLACKFIN_UNALIGNED_H */
#include <asm-generic/unaligned.h>
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment