Commit 61d73e4f authored by Ingo Molnar's avatar Ingo Molnar

x86/percpu: Clean up <asm/percpu.h> a bit

 - Fix misc typos

 - There's 4 variants of the same spelling right now:

     'per-CPU', 'per CPU', 'percpu' and 'per-cpu'

   Standardize on 'per-CPU' only.

 - s/makes gcc load
    /makes the compiler load

 - Instead of:

     #ifdef CONFIG_XXXX
     #define YYYY FOO
     #else
     #define YYYY BAR
     #endif

   Use the slightly more readable form of:

     #ifdef CONFIG_XXXX
     # define YYYY FOO
     #else
     # define YYYY BAR
     #endif

 - Standardize & expand '#else' and '#endif' comments

 - Fix comment style

 - Capitalize x86 instruction names in comments

No change in code.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: Uros Bizjak <ubizjak@gmail.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
parent 47c9dbd2
...@@ -3,30 +3,30 @@ ...@@ -3,30 +3,30 @@
#define _ASM_X86_PERCPU_H #define _ASM_X86_PERCPU_H
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define __percpu_seg gs # define __percpu_seg gs
#define __percpu_rel (%rip) # define __percpu_rel (%rip)
#else #else
#define __percpu_seg fs # define __percpu_seg fs
#define __percpu_rel # define __percpu_rel
#endif #endif
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define __percpu %__percpu_seg: # define __percpu %__percpu_seg:
#else #else
#define __percpu # define __percpu
#endif #endif
#define PER_CPU_VAR(var) __percpu(var)__percpu_rel #define PER_CPU_VAR(var) __percpu(var)__percpu_rel
#ifdef CONFIG_X86_64_SMP #ifdef CONFIG_X86_64_SMP
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var # define INIT_PER_CPU_VAR(var) init_per_cpu__##var
#else #else
#define INIT_PER_CPU_VAR(var) var # define INIT_PER_CPU_VAR(var) var
#endif #endif
#else /* ...!ASSEMBLY */ #else /* !__ASSEMBLY__: */
#include <linux/build_bug.h> #include <linux/build_bug.h>
#include <linux/stringify.h> #include <linux/stringify.h>
...@@ -37,19 +37,19 @@ ...@@ -37,19 +37,19 @@
#ifdef CONFIG_CC_HAS_NAMED_AS #ifdef CONFIG_CC_HAS_NAMED_AS
#ifdef __CHECKER__ #ifdef __CHECKER__
#define __seg_gs __attribute__((address_space(__seg_gs))) # define __seg_gs __attribute__((address_space(__seg_gs)))
#define __seg_fs __attribute__((address_space(__seg_fs))) # define __seg_fs __attribute__((address_space(__seg_fs)))
#endif #endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define __percpu_seg_override __seg_gs # define __percpu_seg_override __seg_gs
#else #else
#define __percpu_seg_override __seg_fs # define __percpu_seg_override __seg_fs
#endif #endif
#define __percpu_prefix "" #define __percpu_prefix ""
#else /* CONFIG_CC_HAS_NAMED_AS */ #else /* !CONFIG_CC_HAS_NAMED_AS: */
#define __percpu_seg_override #define __percpu_seg_override
#define __percpu_prefix "%%"__stringify(__percpu_seg)":" #define __percpu_prefix "%%"__stringify(__percpu_seg)":"
...@@ -80,7 +80,8 @@ ...@@ -80,7 +80,8 @@
#define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel #define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel
#else /* CONFIG_SMP */ #else /* !CONFIG_SMP: */
#define __percpu_seg_override #define __percpu_seg_override
#define __percpu_prefix "" #define __percpu_prefix ""
#define __force_percpu_prefix "" #define __force_percpu_prefix ""
...@@ -96,7 +97,7 @@ ...@@ -96,7 +97,7 @@
#define __force_percpu_arg(x) __force_percpu_prefix "%" #x #define __force_percpu_arg(x) __force_percpu_prefix "%" #x
/* /*
* Initialized pointers to per-cpu variables needed for the boot * Initialized pointers to per-CPU variables needed for the boot
* processor need to use these macros to get the proper address * processor need to use these macros to get the proper address
* offset from __per_cpu_load on SMP. * offset from __per_cpu_load on SMP.
* *
...@@ -106,13 +107,15 @@ ...@@ -106,13 +107,15 @@
extern typeof(var) init_per_cpu_var(var) extern typeof(var) init_per_cpu_var(var)
#ifdef CONFIG_X86_64_SMP #ifdef CONFIG_X86_64_SMP
#define init_per_cpu_var(var) init_per_cpu__##var # define init_per_cpu_var(var) init_per_cpu__##var
#else #else
#define init_per_cpu_var(var) var # define init_per_cpu_var(var) var
#endif #endif
/* For arch-specific code, we can use direct single-insn ops (they /*
* don't give an lvalue though). */ * For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though).
*/
#define __pcpu_type_1 u8 #define __pcpu_type_1 u8
#define __pcpu_type_2 u16 #define __pcpu_type_2 u16
...@@ -158,7 +161,7 @@ do { \ ...@@ -158,7 +161,7 @@ do { \
#define __raw_cpu_read_const(pcp) __raw_cpu_read(, , pcp) #define __raw_cpu_read_const(pcp) __raw_cpu_read(, , pcp)
#else /* CONFIG_USE_X86_SEG_SUPPORT */ #else /* !CONFIG_USE_X86_SEG_SUPPORT: */
#define __raw_cpu_read(size, qual, _var) \ #define __raw_cpu_read(size, qual, _var) \
({ \ ({ \
...@@ -183,7 +186,7 @@ do { \ ...@@ -183,7 +186,7 @@ do { \
} while (0) } while (0)
/* /*
* The generic per-cpu infrastrucutre is not suitable for * The generic per-CPU infrastrucutre is not suitable for
* reading const-qualified variables. * reading const-qualified variables.
*/ */
#define __raw_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) #define __raw_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; })
...@@ -219,7 +222,7 @@ do { \ ...@@ -219,7 +222,7 @@ do { \
} while (0) } while (0)
/* /*
* Generate a percpu add to memory instruction and optimize code * Generate a per-CPU add to memory instruction and optimize code
* if one is added or subtracted. * if one is added or subtracted.
*/ */
#define percpu_add_op(size, qual, var, val) \ #define percpu_add_op(size, qual, var, val) \
...@@ -266,9 +269,9 @@ do { \ ...@@ -266,9 +269,9 @@ do { \
}) })
/* /*
* this_cpu_xchg() is implemented using cmpxchg without a lock prefix. * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix.
* xchg is expensive due to the implied lock prefix. The processor * XCHG is expensive due to the implied LOCK prefix. The processor
* cannot prefetch cachelines if xchg is used. * cannot prefetch cachelines if XCHG is used.
*/ */
#define this_percpu_xchg_op(_var, _nval) \ #define this_percpu_xchg_op(_var, _nval) \
({ \ ({ \
...@@ -278,8 +281,8 @@ do { \ ...@@ -278,8 +281,8 @@ do { \
}) })
/* /*
* cmpxchg has no such implied lock semantics as a result it is much * CMPXCHG has no such implied lock semantics as a result it is much
* more efficient for cpu local operations. * more efficient for CPU-local operations.
*/ */
#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \ #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \
({ \ ({ \
...@@ -314,6 +317,7 @@ do { \ ...@@ -314,6 +317,7 @@ do { \
}) })
#if defined(CONFIG_X86_32) && !defined(CONFIG_UML) #if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \ #define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \
({ \ ({ \
union { \ union { \
...@@ -374,7 +378,8 @@ do { \ ...@@ -374,7 +378,8 @@ do { \
#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval) #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval)
#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval) #define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval)
#endif
#endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval); #define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval);
...@@ -443,7 +448,8 @@ do { \ ...@@ -443,7 +448,8 @@ do { \
#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval) #define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval)
#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval) #define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
#endif
#endif /* CONFIG_X86_64 */
#define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp) #define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp)
#define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp) #define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp)
...@@ -510,8 +516,8 @@ do { \ ...@@ -510,8 +516,8 @@ do { \
#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval) #define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)
/* /*
* Per cpu atomic 64 bit operations are only available under 64 bit. * Per-CPU atomic 64-bit operations are only available under 64-bit kernels.
* 32 bit must fall back to generic operations. * 32-bit kernels must fall back to generic operations.
*/ */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp) #define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp)
...@@ -539,20 +545,23 @@ do { \ ...@@ -539,20 +545,23 @@ do { \
#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval) #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
#define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp) #define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp)
#else
/* There is no generic 64 bit read stable operation for 32 bit targets. */ #else /* !CONFIG_X86_64: */
/* There is no generic 64-bit read stable operation for 32-bit targets. */
#define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) #define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; })
#define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp) #define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp)
#endif
#endif /* CONFIG_X86_64 */
#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp) #define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp)
/* /*
* this_cpu_read() makes gcc load the percpu variable every time it is * this_cpu_read() makes the compiler load the per-CPU variable every time
* accessed while this_cpu_read_stable() allows the value to be cached. * it is accessed while this_cpu_read_stable() allows the value to be cached.
* this_cpu_read_stable() is more efficient and can be used if its value * this_cpu_read_stable() is more efficient and can be used if its value
* is guaranteed to be valid across cpus. The current users include * is guaranteed to be valid across CPUs. The current users include
* pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
* actually per-thread variables implemented as per-CPU variables and * actually per-thread variables implemented as per-CPU variables and
* thus stable for the duration of the respective task. * thus stable for the duration of the respective task.
...@@ -626,12 +635,12 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); ...@@ -626,12 +635,12 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
#define early_per_cpu_ptr(_name) (_name##_early_ptr) #define early_per_cpu_ptr(_name) (_name##_early_ptr)
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
#define early_per_cpu(_name, _cpu) \ #define early_per_cpu(_name, _cpu) \
*(early_per_cpu_ptr(_name) ? \ *(early_per_cpu_ptr(_name) ? \
&early_per_cpu_ptr(_name)[_cpu] : \ &early_per_cpu_ptr(_name)[_cpu] : \
&per_cpu(_name, _cpu)) &per_cpu(_name, _cpu))
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP: */
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
DEFINE_PER_CPU(_type, _name) = _initvalue DEFINE_PER_CPU(_type, _name) = _initvalue
...@@ -651,6 +660,6 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); ...@@ -651,6 +660,6 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
#define early_per_cpu_ptr(_name) NULL #define early_per_cpu_ptr(_name) NULL
/* no early_per_cpu_map() */ /* no early_per_cpu_map() */
#endif /* !CONFIG_SMP */ #endif /* CONFIG_SMP */
#endif /* _ASM_X86_PERCPU_H */ #endif /* _ASM_X86_PERCPU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment