Commit 3afc6202 authored by travis@sgi.com's avatar travis@sgi.com Committed by Ingo Molnar

SPARC64: use generic percpu

Sparc64 has a way of providing the base address for the per cpu area of the
currently executing processor in a global register.

Sparc64 also provides a way to calculate the address of a per cpu area
from a base address instead of performing an array lookup.

Cc: David Miller <davem@davemloft.net>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 988c388a
...@@ -1328,6 +1328,11 @@ pgd_t swapper_pg_dir[2048]; ...@@ -1328,6 +1328,11 @@ pgd_t swapper_pg_dir[2048];
static void sun4u_pgprot_init(void); static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void); static void sun4v_pgprot_init(void);
/* Dummy function */
void __init setup_per_cpu_areas(void)
{
}
void __init paging_init(void) void __init paging_init(void)
{ {
unsigned long end_pfn, pages_avail, shift, phys_base; unsigned long end_pfn, pages_avail, shift, phys_base;
......
...@@ -7,7 +7,6 @@ register unsigned long __local_per_cpu_offset asm("g5"); ...@@ -7,7 +7,6 @@ register unsigned long __local_per_cpu_offset asm("g5");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define setup_per_cpu_areas() do { } while (0)
extern void real_setup_per_cpu_areas(void); extern void real_setup_per_cpu_areas(void);
extern unsigned long __per_cpu_base; extern unsigned long __per_cpu_base;
...@@ -16,29 +15,14 @@ extern unsigned long __per_cpu_shift; ...@@ -16,29 +15,14 @@ extern unsigned long __per_cpu_shift;
(__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
#define per_cpu_offset(x) (__per_cpu_offset(x)) #define per_cpu_offset(x) (__per_cpu_offset(x))
/* var is in discarded region: offset to particular copy we want */ #define __my_cpu_offset __local_per_cpu_offset
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
/* A macro to avoid #include hell... */
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
for_each_possible_cpu(__i) \
memcpy((pcpudst)+__per_cpu_offset(__i), \
(src), (size)); \
} while (0)
#else /* ! SMP */ #else /* ! SMP */
#define real_setup_per_cpu_areas() do { } while (0) #define real_setup_per_cpu_areas() do { } while (0)
#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#define __raw_get_cpu_var(var) per_cpu__##var
#endif /* SMP */ #endif /* SMP */
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name #include <asm-generic/percpu.h>
#endif /* __ARCH_SPARC64_PERCPU__ */ #endif /* __ARCH_SPARC64_PERCPU__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment