Commit a52ccf00 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/to-linus-2.5

into home.osdl.org:/home/torvalds/v2.5/linux
parents 18a65429 900e7bd2
......@@ -66,8 +66,7 @@ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ \
arch/ia64/sn/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
boot := arch/ia64/boot
......
......@@ -720,7 +720,7 @@ acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
{
int vector = 0;
if (acpi_madt->flags.pcat_compat && (gsi < 16))
if (has_8259 && (gsi < 16))
return isa_irq_to_vector(gsi);
if (!iosapic_register_intr)
......
......@@ -61,7 +61,17 @@ ENTRY(ia64_execve)
mov out2=in2 // envp
add out3=16,sp // regs
br.call.sptk.many rp=sys_execve
.ret0: cmp4.ge p6,p7=r8,r0
.ret0:
#ifdef CONFIG_IA32_SUPPORT
/*
* Check if we're returning to ia32 mode. If so, we need to restore ia32 registers
* from pt_regs.
*/
adds r16=PT(CR_IPSR)+16,sp
;;
ld8 r16=[r16]
#endif
cmp4.ge p6,p7=r8,r0
mov ar.pfs=loc1 // restore ar.pfs
sxt4 r8=r8 // return 64-bit result
;;
......@@ -89,6 +99,12 @@ ENTRY(ia64_execve)
ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
#ifdef CONFIG_IA32_SUPPORT
tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
movl loc0=ia64_ret_from_ia32_execve
;;
(p6) mov rp=loc0
#endif
br.ret.sptk.many rp
END(ia64_execve)
......@@ -688,7 +704,7 @@ GLOBAL_ENTRY(ia64_leave_syscall)
mov b7=r0 // clear b7
;;
(pUStk) st1 [r14]=r3
movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
;;
mov r16=ar.bsp // get existing backing store pointer
srlz.i // ensure interruption collection is off
......@@ -701,6 +717,19 @@ GLOBAL_ENTRY(ia64_leave_syscall)
br.cond.sptk.many rbs_switch
END(ia64_leave_syscall)
#ifdef CONFIG_IA32_SUPPORT
GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
PT_REGS_UNWIND_INFO(0)
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
;;
.mem.offset 0,0
st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0
st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
END(ia64_ret_from_ia32_execve_syscall)
// fall through
#endif /* CONFIG_IA32_SUPPORT */
GLOBAL_ENTRY(ia64_leave_kernel)
PT_REGS_UNWIND_INFO(0)
/*
......@@ -841,7 +870,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
shr.u r18=r19,16 // get byte size of existing "dirty" partition
;;
mov r16=ar.bsp // get existing backing store pointer
movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
;;
ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
(pKStk) br.cond.dpnt skip_rbs_switch
......
......@@ -165,7 +165,7 @@ ENTRY(fsys_gettimeofday)
.altrp b6
.body
add r9=TI_FLAGS+IA64_TASK_SIZE,r16
movl r3=THIS_CPU(cpu_info)
addl r3=THIS_CPU(cpu_info),r0
mov.m r31=ar.itc // put time stamp into r31 (ITC) == now (35 cyc)
#ifdef CONFIG_SMP
......@@ -177,7 +177,7 @@ ENTRY(fsys_gettimeofday)
movl r19=xtime // xtime is a timespec struct
ld8 r10=[r10] // r10 <- __per_cpu_offset[0]
movl r21=THIS_CPU(cpu_info)
addl r21=THIS_CPU(cpu_info),r0
;;
add r10=r21, r10 // r10 <- &cpu_data(time_keeper_id)
tbit.nz p8,p0 = r2, IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT
......
......@@ -64,9 +64,10 @@ EXPORT_SYMBOL(ia64_pfn_valid);
#endif
#include <asm/processor.h>
EXPORT_SYMBOL(cpu_info__per_cpu);
EXPORT_SYMBOL(per_cpu__cpu_info);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__per_cpu_offset);
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
#endif
EXPORT_SYMBOL(kernel_thread);
......
......@@ -2,7 +2,7 @@
* This is where we statically allocate and initialize the initial
* task.
*
* Copyright (C) 1999, 2002 Hewlett-Packard Co
* Copyright (C) 1999, 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
......@@ -34,7 +34,7 @@ static union {
struct thread_info thread_info;
} s;
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
} init_task_mem __attribute__((section(".data.init_task"))) = {{
} init_task_mem asm ("init_task_mem") __attribute__((section(".data.init_task"))) = {{
.task = INIT_TASK(init_task_mem.s.task),
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}};
......
......@@ -717,6 +717,7 @@ iosapic_parse_prt (void)
register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW,
IOSAPIC_LEVEL);
}
entry->irq = vector;
snprintf(pci_id, sizeof(pci_id), "%02x:%02x:%02x[%c]",
entry->id.segment, entry->id.bus, entry->id.device, 'A' + entry->pin);
......
......@@ -566,7 +566,7 @@ static struct vm_operations_struct pfm_vm_ops={
#define pfm_wait_task_inactive(t) wait_task_inactive(t)
#define pfm_get_cpu_var(v) __get_cpu_var(v)
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
typedef irqreturn_t pfm_irq_handler_t;
#define PFM_IRQ_HANDLER_RET(v) do { \
......
......@@ -42,7 +42,7 @@
(IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
#define IPSR_READ_MASK IPSR_WRITE_MASK
#define PTRACE_DEBUG 1
#define PTRACE_DEBUG 0
#if PTRACE_DEBUG
# define dprintk(format...) printk(format)
......
......@@ -56,6 +56,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
#endif
DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
......@@ -709,6 +710,8 @@ cpu_init (void)
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
cpu_data += PERCPU_PAGE_SIZE;
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
}
}
cpu_data = __per_cpu_start + __per_cpu_offset[smp_processor_id()];
......@@ -716,19 +719,18 @@ cpu_init (void)
cpu_data = __phys_per_cpu_start;
#endif /* !CONFIG_SMP */
cpu_info = cpu_data + ((char *) &__get_cpu_var(cpu_info) - __per_cpu_start);
#ifdef CONFIG_NUMA
cpu_info->node_data = get_node_data_ptr();
#endif
get_max_cacheline_size();
/*
* We can't pass "local_cpu_data" to identify_cpu() because we haven't called
* ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() the old way, through identity mapped space.
* accessing cpu_data() through the canonical per-CPU address.
*/
cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
#ifdef CONFIG_NUMA
cpu_info->node_data = get_node_data_ptr();
#endif
identify_cpu(cpu_info);
#ifdef CONFIG_MCKINLEY
......
......@@ -72,7 +72,7 @@ static volatile struct call_data_struct *call_data;
#define IPI_CPU_STOP 1
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static DEFINE_PER_CPU(__u64, ipi_operation) ____cacheline_aligned;
static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
static void
stop_this_cpu (void)
......@@ -91,7 +91,7 @@ irqreturn_t
handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
{
int this_cpu = get_cpu();
unsigned long *pending_ipis = &__get_cpu_var(ipi_operation);
unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
unsigned long ops;
/* Count this now; we may make a call that never returns. */
......
......@@ -83,12 +83,11 @@ unsigned long
itc_get_offset (void)
{
unsigned long elapsed_cycles, lost = jiffies - wall_jiffies;
unsigned long now, last_tick;
unsigned long now = ia64_get_itc(), last_tick;
last_tick = (cpu_data(TIME_KEEPER_ID)->itm_next
- (lost + 1)*cpu_data(TIME_KEEPER_ID)->itm_delta);
now = ia64_get_itc();
if (unlikely((long) (now - last_tick) < 0)) {
printk(KERN_ERR "CPU %d: now < last_tick (now=0x%lx,last_tick=0x%lx)!\n",
smp_processor_id(), now, last_tick);
......
......@@ -124,7 +124,7 @@ subsys_initcall(pci_acpi_init);
/* Called by ACPI when it finds a new root bus. */
static struct pci_controller *
static struct pci_controller * __devinit
alloc_pci_controller (int seg)
{
struct pci_controller *controller;
......@@ -138,7 +138,7 @@ alloc_pci_controller (int seg)
return controller;
}
static int
static int __devinit
alloc_resource (char *name, struct resource *root, unsigned long start, unsigned long end, unsigned long flags)
{
struct resource *res;
......@@ -159,7 +159,7 @@ alloc_resource (char *name, struct resource *root, unsigned long start, unsigned
return 0;
}
static u64
static u64 __devinit
add_io_space (struct acpi_resource_address64 *addr)
{
u64 offset;
......@@ -190,7 +190,7 @@ add_io_space (struct acpi_resource_address64 *addr)
return IO_SPACE_BASE(i);
}
static acpi_status
static acpi_status __devinit
count_window (struct acpi_resource *resource, void *data)
{
unsigned int *windows = (unsigned int *) data;
......@@ -211,7 +211,7 @@ struct pci_root_info {
char *name;
};
static acpi_status
static acpi_status __devinit
add_window (struct acpi_resource *res, void *data)
{
struct pci_root_info *info = (struct pci_root_info *) data;
......@@ -252,7 +252,7 @@ add_window (struct acpi_resource *res, void *data)
return AE_OK;
}
struct pci_bus *
struct pci_bus * __devinit
pci_acpi_scan_root (struct acpi_device *device, int domain, int bus)
{
struct pci_root_info info;
......
int __attribute__ ((__model__ (__small__))) x;
......@@ -2,6 +2,7 @@
#
# Check whether linker can handle cross-segment @segrel():
#
CPPFLAGS=""
CC=$1
OBJDUMP=$2
dir=$(dirname $0)
......@@ -11,10 +12,17 @@ $CC -nostdlib -static -Wl,-T$dir/check-segrel.lds $dir/check-segrel.S -o $out
res=$($OBJDUMP --full --section .rodata $out | fgrep 000 | cut -f3 -d' ')
rm -f $out
if [ $res != 00000a00 ]; then
echo " -DHAVE_BUGGY_SEGREL"
CPPFLAGS="$CPPFLAGS -DHAVE_BUGGY_SEGREL"
cat >&2 <<EOF
warning: your linker cannot handle cross-segment segment-relative relocations.
please upgrade to a newer version (it is safe to use this linker, but
the kernel will be bigger than strictly necessary).
EOF
fi
if ! $CC -c $dir/check-model.c -o $out | grep -q 'attribute directive ignored'
then
CPPFLAGS="$CPPFLAGS -DHAVE_MODEL_SMALL_ATTRIBUTE"
fi
rm -f $out
echo $CPPFLAGS
......@@ -9,7 +9,7 @@
* "int" types were carefully placed so as to ensure proper operation
* of the macros.
*
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/types.h>
......@@ -21,11 +21,16 @@
* memory accesses are ordered.
*/
typedef struct { volatile __s32 counter; } atomic_t;
typedef struct { volatile __s64 counter; } atomic64_t;
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
#define atomic_read(v) ((v)->counter)
#define atomic64_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic64_set(v,i) (((v)->counter) = (i))
static __inline__ int
ia64_atomic_add (int i, atomic_t *v)
......@@ -37,7 +42,21 @@ ia64_atomic_add (int i, atomic_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
} while (ia64_cmpxchg("acq", v, old, old + i, sizeof(atomic_t)) != old);
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
return new;
}
static __inline__ int
ia64_atomic64_add (int i, atomic64_t *v)
{
__s64 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
return new;
}
......@@ -55,6 +74,20 @@ ia64_atomic_sub (int i, atomic_t *v)
return new;
}
static __inline__ int
ia64_atomic64_sub (int i, atomic64_t *v)
{
__s64 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
return new;
}
#define atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
......@@ -67,6 +100,18 @@ ia64_atomic_sub (int i, atomic_t *v)
: ia64_atomic_add(__ia64_aar_i, v); \
})
#define atomic64_add_return(i,v) \
({ \
long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic64_add(__ia64_aar_i, v); \
})
/*
* Atomically add I to V and return TRUE if the resulting value is
* negative.
......@@ -77,6 +122,12 @@ atomic_add_negative (int i, atomic_t *v)
return atomic_add_return(i, v) < 0;
}
static __inline__ int
atomic64_add_negative (int i, atomic64_t *v)
{
return atomic64_add_return(i, v) < 0;
}
#define atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
......@@ -89,18 +140,40 @@ atomic_add_negative (int i, atomic_t *v)
: ia64_atomic_sub(__ia64_asr_i, v); \
})
#define atomic64_sub_return(i,v) \
({ \
long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) != 0)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) != 0)
#define atomic_add(i,v) atomic_add_return((i), (v))
#define atomic_sub(i,v) atomic_sub_return((i), (v))
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic64_add(i,v) atomic64_add_return((i), (v))
#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
......
......@@ -202,7 +202,7 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define ARCH_DLINFO \
do { \
extern char __kernel_syscall_via_epc[]; \
NEW_AUX_ENT(AT_SYSINFO, __kernel_syscall_via_epc); \
NEW_AUX_ENT(AT_SYSINFO, (unsigned long) __kernel_syscall_via_epc); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \
} while (0)
......
#ifndef _ASM_IA64_LOCAL_H
#define _ASM_IA64_LOCAL_H
/*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/percpu.h>
typedef struct {
atomic64_t val;
} local_t;
#define LOCAL_INIT(i) ((local_t) { { (i) } })
#define local_read(l) atomic64_read(&(l)->val)
#define local_set(l, i) atomic64_set(&(l)->val, i)
#define local_inc(l) atomic64_inc(&(l)->val)
#define local_dec(l) atomic64_dec(&(l)->val)
#define local_add(l) atomic64_add(&(l)->val)
#define local_sub(l) atomic64_sub(&(l)->val)
/* Non-atomic variants, i.e., preemption disabled and won't be touched in interrupt, etc. */
#define __local_inc(l) (++(l)->val.counter)
#define __local_dec(l) (--(l)->val.counter)
#define __local_add(i,l) ((l)->val.counter += (i))
#define __local_sub(i,l) ((l)->val.counter -= (i))
/*
* Use these for per-cpu local_t variables. Note they take a variable (eg. mystruct.foo),
* not an address.
*/
#define cpu_local_read(v) local_read(&__ia64_per_cpu_var(v))
#define cpu_local_set(v, i) local_set(&__ia64_per_cpu_var(v), (i))
#define cpu_local_inc(v) local_inc(&__ia64_per_cpu_var(v))
#define cpu_local_dec(v) local_dec(&__ia64_per_cpu_var(v))
#define cpu_local_add(i, v) local_add((i), &__ia64_per_cpu_var(v))
#define cpu_local_sub(i, v) local_sub((i), &__ia64_per_cpu_var(v))
/*
* Non-atomic increments, i.e., preemption disabled and won't be touched in interrupt,
* etc.
*/
#define __cpu_local_inc(v) __local_inc(&__ia64_per_cpu_var(v))
#define __cpu_local_dec(v) __local_dec(&__ia64_per_cpu_var(v))
#define __cpu_local_add(i, v) __local_add((i), &__ia64_per_cpu_var(v))
#define __cpu_local_sub(i, v) __local_sub((i), &__ia64_per_cpu_var(v))
#endif /* _ASM_IA64_LOCAL_H */
......@@ -86,9 +86,9 @@ delayed_tlb_flush (void)
{
extern void local_flush_tlb_all (void);
if (unlikely(__get_cpu_var(ia64_need_tlb_flush))) {
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
local_flush_tlb_all();
__get_cpu_var(ia64_need_tlb_flush) = 0;
__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
}
}
......
#ifndef _ASM_IA64_PERCPU_H
#define _ASM_IA64_PERCPU_H
#include <linux/config.h>
#include <linux/compiler.h>
/*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
#ifdef __ASSEMBLY__
#define THIS_CPU(var) (var##__per_cpu) /* use this to mark accesses to per-CPU variables... */
# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
#else /* !__ASSEMBLY__ */
#include <linux/config.h>
#include <linux/threads.h>
extern unsigned long __per_cpu_offset[NR_CPUS];
#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
#else
# define __SMALL_ADDR_AREA
#endif
#define DECLARE_PER_CPU(type, name) \
extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
/* Separate out the type, so (int[3], foo) works. */
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) name##__per_cpu
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) name##__per_cpu
__attribute__((__section__(".data.percpu"))) \
__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
#define __get_cpu_var(var) (var##__per_cpu)
/*
* Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
* external routine, to avoid include-hell.
*/
#ifdef CONFIG_SMP
# define per_cpu(var, cpu) (*RELOC_HIDE(&var##__per_cpu, __per_cpu_offset[cpu]))
extern unsigned long __per_cpu_offset[NR_CPUS];
/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
#else
# define per_cpu(var, cpu) ((void)cpu, __get_cpu_var(var))
#endif
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var##__per_cpu)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var##__per_cpu)
#else /* ! SMP */
#define per_cpu(var, cpu) ((void)cpu, per_cpu__##var)
#define __get_cpu_var(var) per_cpu__##var
#endif /* SMP */
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
/* ia64-specific part: */
extern void setup_per_cpu_areas (void);
/*
* Be extremely careful when taking the address of this variable! Due to virtual
* remapping, it is different from the canonical address returned by __get_cpu_var(var)!
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient.
*/
#define __ia64_per_cpu_var(var) (per_cpu__##var)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_PERCPU_H */
......@@ -191,10 +191,12 @@ struct cpuinfo_ia64 {
DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
/*
* The "local" data pointer. It points to the per-CPU data of the currently executing
* The "local" data variable. It refers to the per-CPU data of the currently executing
* CPU, much like "current" points to the per-task data of the currently executing task.
* Do not use the address of local_cpu_data, since it will be different from
* cpu_data(smp_processor_id())!
*/
#define local_cpu_data (&__get_cpu_var(cpu_info))
#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
extern void identify_cpu (struct cpuinfo_ia64 *);
......
......@@ -20,9 +20,9 @@
#include <asm/percpu.h>
/* 0xa000000000000000 - 0xa000000000000000+PERCPU_PAGE_SIZE remain unmapped */
#define PERCPU_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE)
#define GATE_ADDR (0xa000000000000000 + 2*PERCPU_PAGE_SIZE)
#define GATE_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE)
#define KERNEL_START 0xa000000100000000
#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
#ifndef __ASSEMBLY__
......
......@@ -126,7 +126,7 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
static inline struct mmu_gather *
tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
{
struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id());
struct mmu_gather *tlb = &__get_cpu_var(mmu_gathers);
tlb->mm = mm;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment