Commit b2347fad authored by Joe Perches's avatar Joe Perches Committed by Ingo Molnar

include/asm-x86/mutex_32.h: checkpatch cleanups - formatting only

Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9969b440
...@@ -21,22 +21,20 @@ ...@@ -21,22 +21,20 @@
* wasn't 1 originally. This function MUST leave the value lower than 1 * wasn't 1 originally. This function MUST leave the value lower than 1
* even when the "1" assertion wasn't true. * even when the "1" assertion wasn't true.
*/ */
#define __mutex_fastpath_lock(count, fail_fn) \ #define __mutex_fastpath_lock(count, fail_fn) \
do { \ do { \
unsigned int dummy; \ unsigned int dummy; \
\ \
typecheck(atomic_t *, count); \ typecheck(atomic_t *, count); \
typecheck_fn(void (*)(atomic_t *), fail_fn); \ typecheck_fn(void (*)(atomic_t *), fail_fn); \
\ \
__asm__ __volatile__( \ asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
LOCK_PREFIX " decl (%%eax) \n" \ " jns 1f \n" \
" jns 1f \n" \ " call " #fail_fn "\n" \
" call "#fail_fn" \n" \ "1:\n" \
"1: \n" \ : "=a" (dummy) \
\ : "a" (count) \
:"=a" (dummy) \ : "memory", "ecx", "edx"); \
: "a" (count) \
: "memory", "ecx", "edx"); \
} while (0) } while (0)
...@@ -50,8 +48,8 @@ do { \ ...@@ -50,8 +48,8 @@ do { \
* wasn't 1 originally. This function returns 0 if the fastpath succeeds, * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns * or anything the slow path function returns
*/ */
static inline int static inline int __mutex_fastpath_lock_retval(atomic_t *count,
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) int (*fail_fn)(atomic_t *))
{ {
if (unlikely(atomic_dec_return(count) < 0)) if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count); return fail_fn(count);
...@@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) ...@@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise. * to return 0 otherwise.
*/ */
#define __mutex_fastpath_unlock(count, fail_fn) \ #define __mutex_fastpath_unlock(count, fail_fn) \
do { \ do { \
unsigned int dummy; \ unsigned int dummy; \
\ \
typecheck(atomic_t *, count); \ typecheck(atomic_t *, count); \
typecheck_fn(void (*)(atomic_t *), fail_fn); \ typecheck_fn(void (*)(atomic_t *), fail_fn); \
\ \
__asm__ __volatile__( \ asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
LOCK_PREFIX " incl (%%eax) \n" \ " jg 1f\n" \
" jg 1f \n" \ " call " #fail_fn "\n" \
" call "#fail_fn" \n" \ "1:\n" \
"1: \n" \ : "=a" (dummy) \
\ : "a" (count) \
:"=a" (dummy) \ : "memory", "ecx", "edx"); \
: "a" (count) \
: "memory", "ecx", "edx"); \
} while (0) } while (0)
#define __mutex_slowpath_needs_to_unlock() 1 #define __mutex_slowpath_needs_to_unlock() 1
...@@ -104,8 +100,8 @@ do { \ ...@@ -104,8 +100,8 @@ do { \
* Additionally, if the value was < 0 originally, this function must not leave * Additionally, if the value was < 0 originally, this function must not leave
* it to 0 on failure. * it to 0 on failure.
*/ */
static inline int static inline int __mutex_fastpath_trylock(atomic_t *count,
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) int (*fail_fn)(atomic_t *))
{ {
/* /*
* We have two variants here. The cmpxchg based one is the best one * We have two variants here. The cmpxchg based one is the best one
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment