Commit 62326650 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
  alpha: fix x86.git merge build error
  ia64: on UP percpu variables are not small memory model
  x86: fix arch/x86/kernel/test_nx.c modular build bug
  s390: use generic percpu linux-2.6.git
  POWERPC: use generic per cpu
  ia64: use generic percpu
  SPARC64: use generic percpu
  percpu: change Kconfig to HAVE_SETUP_PER_CPU_AREA
  modules: fold percpu_modcopy into module.c
  x86: export copy_from_user_ll_nocache[_nozero]
  x86: fix duplicated TIF on 64-bit
parents 94ed294c c18d1250
...@@ -80,7 +80,7 @@ config GENERIC_TIME_VSYSCALL ...@@ -80,7 +80,7 @@ config GENERIC_TIME_VSYSCALL
bool bool
default y default y
config ARCH_SETS_UP_PER_CPU_AREA config HAVE_SETUP_PER_CPU_AREA
def_bool y def_bool y
config DMI config DMI
......
...@@ -940,14 +940,3 @@ module_arch_cleanup (struct module *mod) ...@@ -940,14 +940,3 @@ module_arch_cleanup (struct module *mod)
if (mod->arch.core_unw_table) if (mod->arch.core_unw_table)
unw_remove_unwind_table(mod->arch.core_unw_table); unw_remove_unwind_table(mod->arch.core_unw_table);
} }
#ifdef CONFIG_SMP
void
percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
{
unsigned int i;
for_each_possible_cpu(i) {
memcpy(pcpudst + per_cpu_offset(i), src, size);
}
}
#endif /* CONFIG_SMP */
...@@ -42,7 +42,7 @@ config GENERIC_HARDIRQS ...@@ -42,7 +42,7 @@ config GENERIC_HARDIRQS
bool bool
default y default y
config ARCH_SETS_UP_PER_CPU_AREA config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64 def_bool PPC64
config IRQ_PER_CPU config IRQ_PER_CPU
......
...@@ -66,7 +66,7 @@ config AUDIT_ARCH ...@@ -66,7 +66,7 @@ config AUDIT_ARCH
bool bool
default y default y
config ARCH_SETS_UP_PER_CPU_AREA config HAVE_SETUP_PER_CPU_AREA
def_bool y def_bool y
config ARCH_NO_VIRT_TO_BUS config ARCH_NO_VIRT_TO_BUS
......
...@@ -1328,6 +1328,11 @@ pgd_t swapper_pg_dir[2048]; ...@@ -1328,6 +1328,11 @@ pgd_t swapper_pg_dir[2048];
static void sun4u_pgprot_init(void); static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void); static void sun4v_pgprot_init(void);
/* Dummy function */
void __init setup_per_cpu_areas(void)
{
}
void __init paging_init(void) void __init paging_init(void)
{ {
unsigned long end_pfn, pages_avail, shift, phys_base; unsigned long end_pfn, pages_avail, shift, phys_base;
......
...@@ -91,8 +91,13 @@ static noinline int test_address(void *address) ...@@ -91,8 +91,13 @@ static noinline int test_address(void *address)
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
" .align 8\n" " .align 8\n"
#ifdef CONFIG_X86_32
" .long 0b\n"
" .long 2b\n"
#else
" .quad 0b\n" " .quad 0b\n"
" .quad 2b\n" " .quad 2b\n"
#endif
".previous\n" ".previous\n"
: [rslt] "=r" (result) : [rslt] "=r" (result)
: [fake_code] "r" (address), [zero] "r" (0UL), "0" (result) : [fake_code] "r" (address), [zero] "r" (0UL), "0" (result)
......
...@@ -817,6 +817,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, ...@@ -817,6 +817,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
#endif #endif
return n; return n;
} }
EXPORT_SYMBOL(__copy_from_user_ll_nocache);
unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
unsigned long n) unsigned long n)
...@@ -831,6 +832,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr ...@@ -831,6 +832,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
#endif #endif
return n; return n;
} }
EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
/** /**
* copy_to_user: - Copy a block of data into user space. * copy_to_user: - Copy a block of data into user space.
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/pgalloc.h>
#ifndef __EXTERN_INLINE #ifndef __EXTERN_INLINE
#define __EXTERN_INLINE extern inline #define __EXTERN_INLINE extern inline
......
...@@ -15,38 +15,20 @@ ...@@ -15,38 +15,20 @@
#include <linux/threads.h> #include <linux/threads.h>
#ifdef CONFIG_SMP
#ifdef HAVE_MODEL_SMALL_ATTRIBUTE #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
# define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__))) # define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__)))
#endif #endif
#define DECLARE_PER_CPU(type, name) \ #define __my_cpu_offset __ia64_per_cpu_var(local_per_cpu_offset)
extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
/*
* Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
* external routine, to avoid include-hell.
*/
#ifdef CONFIG_SMP
extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu_offset(x) (__per_cpu_offset[x])
/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
extern void setup_per_cpu_areas (void);
extern void *per_cpu_init(void); extern void *per_cpu_init(void);
#else /* ! SMP */ #else /* ! SMP */
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu")))
#define __get_cpu_var(var) per_cpu__##var
#define __raw_get_cpu_var(var) per_cpu__##var
#define per_cpu_init() (__phys_per_cpu_start) #define per_cpu_init() (__phys_per_cpu_start)
#endif /* SMP */ #endif /* SMP */
...@@ -57,7 +39,12 @@ extern void *per_cpu_init(void); ...@@ -57,7 +39,12 @@ extern void *per_cpu_init(void);
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient. * more efficient.
*/ */
#define __ia64_per_cpu_var(var) (per_cpu__##var) #define __ia64_per_cpu_var(var) per_cpu__##var
#include <asm-generic/percpu.h>
/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -13,37 +13,12 @@ ...@@ -13,37 +13,12 @@
#include <asm/paca.h> #include <asm/paca.h>
#define __per_cpu_offset(cpu) (paca[cpu].data_offset) #define __per_cpu_offset(cpu) (paca[cpu].data_offset)
#define __my_cpu_offset() get_paca()->data_offset #define __my_cpu_offset get_paca()->data_offset
#define per_cpu_offset(x) (__per_cpu_offset(x)) #define per_cpu_offset(x) (__per_cpu_offset(x))
/* var is in discarded region: offset to particular copy we want */ #endif /* CONFIG_SMP */
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) #endif /* __powerpc64__ */
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, local_paca->data_offset))
/* A macro to avoid #include hell... */
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
for_each_possible_cpu(__i) \
memcpy((pcpudst)+__per_cpu_offset(__i), \
(src), (size)); \
} while (0)
extern void setup_per_cpu_areas(void);
#else /* ! SMP */
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#define __raw_get_cpu_var(var) per_cpu__##var
#endif /* SMP */
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
#else
#include <asm-generic/percpu.h> #include <asm-generic/percpu.h>
#endif
#endif /* _ASM_POWERPC_PERCPU_H_ */ #endif /* _ASM_POWERPC_PERCPU_H_ */
...@@ -13,49 +13,25 @@ ...@@ -13,49 +13,25 @@
*/ */
#if defined(__s390x__) && defined(MODULE) #if defined(__s390x__) && defined(MODULE)
#define __reloc_hide(var,offset) (*({ \ #define SHIFT_PERCPU_PTR(ptr,offset) (({ \
extern int simple_identifier_##var(void); \ extern int simple_identifier_##var(void); \
unsigned long *__ptr; \ unsigned long *__ptr; \
asm ( "larl %0,per_cpu__"#var"@GOTENT" \ asm ( "larl %0, %1@GOTENT" \
: "=a" (__ptr) : "X" (per_cpu__##var) ); \ : "=a" (__ptr) : "X" (ptr) ); \
(typeof(&per_cpu__##var))((*__ptr) + (offset)); })) (typeof(ptr))((*__ptr) + (offset)); }))
#else #else
#define __reloc_hide(var, offset) (*({ \ #define SHIFT_PERCPU_PTR(ptr, offset) (({ \
extern int simple_identifier_##var(void); \ extern int simple_identifier_##var(void); \
unsigned long __ptr; \ unsigned long __ptr; \
asm ( "" : "=a" (__ptr) : "0" (&per_cpu__##var) ); \ asm ( "" : "=a" (__ptr) : "0" (ptr) ); \
(typeof(&per_cpu__##var)) (__ptr + (offset)); })) (typeof(ptr)) (__ptr + (offset)); }))
#endif #endif
#ifdef CONFIG_SMP #define __my_cpu_offset S390_lowcore.percpu_offset
extern unsigned long __per_cpu_offset[NR_CPUS]; #include <asm-generic/percpu.h>
#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
#define per_cpu_offset(x) (__per_cpu_offset[x])
/* A macro to avoid #include hell... */
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
for_each_possible_cpu(__i) \
memcpy((pcpudst)+__per_cpu_offset[__i], \
(src), (size)); \
} while (0)
#else /* ! SMP */
#define __get_cpu_var(var) __reloc_hide(var,0)
#define __raw_get_cpu_var(var) __reloc_hide(var,0)
#define per_cpu(var,cpu) __reloc_hide(var,0)
#endif /* SMP */
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
#endif /* __ARCH_S390_PERCPU__ */ #endif /* __ARCH_S390_PERCPU__ */
...@@ -7,7 +7,6 @@ register unsigned long __local_per_cpu_offset asm("g5"); ...@@ -7,7 +7,6 @@ register unsigned long __local_per_cpu_offset asm("g5");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define setup_per_cpu_areas() do { } while (0)
extern void real_setup_per_cpu_areas(void); extern void real_setup_per_cpu_areas(void);
extern unsigned long __per_cpu_base; extern unsigned long __per_cpu_base;
...@@ -16,29 +15,14 @@ extern unsigned long __per_cpu_shift; ...@@ -16,29 +15,14 @@ extern unsigned long __per_cpu_shift;
(__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
#define per_cpu_offset(x) (__per_cpu_offset(x)) #define per_cpu_offset(x) (__per_cpu_offset(x))
/* var is in discarded region: offset to particular copy we want */ #define __my_cpu_offset __local_per_cpu_offset
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
/* A macro to avoid #include hell... */
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
for_each_possible_cpu(__i) \
memcpy((pcpudst)+__per_cpu_offset(__i), \
(src), (size)); \
} while (0)
#else /* ! SMP */ #else /* ! SMP */
#define real_setup_per_cpu_areas() do { } while (0) #define real_setup_per_cpu_areas() do { } while (0)
#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#define __raw_get_cpu_var(var) per_cpu__##var
#endif /* SMP */ #endif /* SMP */
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name #include <asm-generic/percpu.h>
#endif /* __ARCH_SPARC64_PERCPU__ */ #endif /* __ARCH_SPARC64_PERCPU__ */
...@@ -123,8 +123,8 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -123,8 +123,8 @@ static inline struct thread_info *stack_thread_info(void)
#define TIF_FREEZE 23 /* is freezing for suspend */ #define TIF_FREEZE 23 /* is freezing for suspend */
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
#define TIF_DS_AREA_MSR 25 /* uses thread_struct.ds_area_msr */ #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
#define TIF_BTS_TRACE_TS 26 /* record scheduling event timestamps */ #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
......
...@@ -9,10 +9,6 @@ ...@@ -9,10 +9,6 @@
#include <asm/percpu.h> #include <asm/percpu.h>
#ifndef PER_CPU_ATTRIBUTES
#define PER_CPU_ATTRIBUTES
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define DEFINE_PER_CPU(type, name) \ #define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) \ __attribute__((__section__(".data.percpu"))) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment