Commit 75b08514 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/to-linus-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents b1d7a6ca 358e377e
On some platforms, so-called memory-mapped I/O is weakly ordered. On such
platforms, driver writers are responsible for ensuring that I/O writes to
memory-mapped addresses on their device arrive in the order intended. This is
typically done by reading a 'safe' device or bridge register, causing the I/O
chipset to flush pending writes to the device before any reads are posted. A
driver would usually use this technique immediately prior to the exit of a
critical section of code protected by spinlocks. This would ensure that
subsequent writes to I/O space arrived only after all prior writes (much like a
memory barrier op, mb(), only with respect to I/O).
A more concrete example from a hypothetical device driver:
...
CPU A: spin_lock_irqsave(&dev_lock, flags)
CPU A: val = readl(my_status);
CPU A: ...
CPU A: writel(newval, ring_ptr);
CPU A: spin_unlock_irqrestore(&dev_lock, flags)
...
CPU B: spin_lock_irqsave(&dev_lock, flags)
CPU B: val = readl(my_status);
CPU B: ...
CPU B: writel(newval2, ring_ptr);
CPU B: spin_unlock_irqrestore(&dev_lock, flags)
...
In the case above, the device may receive newval2 before it receives newval,
which could cause problems. Fixing it is easy enough though:
...
CPU A: spin_lock_irqsave(&dev_lock, flags)
CPU A: val = readl(my_status);
CPU A: ...
CPU A: writel(newval, ring_ptr);
CPU A: (void)readl(safe_register); /* maybe a config register? */
CPU A: spin_unlock_irqrestore(&dev_lock, flags)
...
CPU B: spin_lock_irqsave(&dev_lock, flags)
CPU B: val = readl(my_status);
CPU B: ...
CPU B: writel(newval2, ring_ptr);
CPU B: (void)readl(safe_register); /* maybe a config register? */
CPU B: spin_unlock_irqrestore(&dev_lock, flags)
Here, the reads from safe_register will cause the I/O chipset to flush any
pending writes before actually posting the read to the chipset, preventing
possible data corruption.
......@@ -401,6 +401,15 @@ config HUGETLB_PAGE_SIZE_256KB
endchoice
config IA64_PAL_IDLE
bool "Use PAL_HALT_LIGHT in idle loop"
---help---
Say Y here to enable use of PAL_HALT_LIGHT in the cpu_idle loop.
This allows the CPU to enter a low power state when idle. You
can enable CONFIG_IA64_PALINFO and check /proc/pal/cpu0/power_info
to see the power consumption and latency for this state. If you're
unsure your firmware supports it, answer N.
config SMP
bool "SMP support"
---help---
......
......@@ -14,6 +14,7 @@ export AWK
OBJCOPYFLAGS := --strip-all
LDFLAGS_vmlinux := -static
LDFLAGS_MODULE += -T arch/ia64/module.lds
AFLAGS_KERNEL := -mconstant-gp
EXTRA :=
......@@ -23,7 +24,7 @@ CFLAGS_KERNEL := -mconstant-gp
GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
GAS_STATUS=$(shell arch/ia64/scripts/check-gas $(CC))
GAS_STATUS=$(shell arch/ia64/scripts/check-gas $(CC) $(OBJDUMP))
ifeq ($(GAS_STATUS),buggy)
$(error Sorry, you need a newer version of the assember, one that is built from \
......@@ -50,11 +51,8 @@ core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ arch/ia64/hp/common/ arch/ia64/hp/zx1/ \
arch/ia64/hp/sim/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/kernel/ \
arch/ia64/sn/io/ \
arch/ia64/sn/io/sn2/ \
arch/ia64/sn/io/sn2/pcibr/ \
arch/ia64/sn/kernel/sn2/
core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
......
......@@ -9,7 +9,6 @@
#
targets-$(CONFIG_IA64_HP_SIM) += bootloader
targets-$(CONFIG_IA64_GENERIC) += bootloader
targets := vmlinux.bin vmlinux.gz $(targets-y)
quiet_cmd_cptotop = LN $@
......
......@@ -1497,7 +1497,7 @@ static int sba_proc_info(char *buf, char **start, off_t offset, int len)
ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
sprintf(buf, "%s rev %d.%d\n", "Hewlett Packard zx1 SBA",
sprintf(buf, "%s rev %d.%d\n", "Hewlett-Packard zx1 SBA",
((sba_dev->hw_rev >> 4) & 0xF), (sba_dev->hw_rev & 0xF));
sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", buf,
(int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages);
......
......@@ -253,7 +253,7 @@ ia32_syscall_table:
data8 sys_umount /* recycled never used phys( */
data8 sys32_ni_syscall /* old lock syscall holder */
data8 sys32_ioctl
data8 sys32_fcntl /* 55 */
data8 compat_sys_fcntl /* 55 */
data8 sys32_ni_syscall /* old mpx syscall holder */
data8 sys_setpgid
data8 sys32_ni_syscall /* old ulimit syscall holder */
......@@ -419,7 +419,7 @@ ia32_syscall_table:
data8 sys_mincore
data8 sys_madvise
data8 sys_getdents64 /* 220 */
data8 sys32_fcntl64
data8 compat_sys_fcntl64
data8 sys_ni_syscall /* reserved for TUX */
data8 sys_ni_syscall /* reserved for Security */
data8 sys_gettid
......
......@@ -114,6 +114,7 @@ copy_siginfo_from_user32 (siginfo_t *to, siginfo_t32 *from)
int
copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
{
unsigned int addr;
int err;
if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t32)))
......@@ -148,6 +149,12 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd);
break;
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
addr = (unsigned long) from->si_ptr;
err |= __put_user(addr, &to->si_ptr);
break;
/* case __SI_RT: This is not generated by the kernel as of now. */
}
}
......
......@@ -119,10 +119,8 @@ nargs (unsigned int arg, char **ap)
asmlinkage long
sys32_execve (char *filename, unsigned int argv, unsigned int envp,
int dummy3, int dummy4, int dummy5, int dummy6, int dummy7,
int stack)
struct pt_regs *regs)
{
struct pt_regs *regs = (struct pt_regs *)&stack;
unsigned long old_map_base, old_task_size, tssd;
char **av, **ae;
int na, ne, len;
......@@ -1701,7 +1699,7 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
return shmctl32(first, second, (void *)AA(ptr));
default:
return -EINVAL;
return -ENOSYS;
}
return -EINVAL;
}
......@@ -2156,26 +2154,23 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
ret = -ESRCH;
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
if (child)
get_task_struct(child);
read_unlock(&tasklist_lock);
if (!child)
goto out;
ret = -EPERM;
if (pid == 1) /* no messing around with init! */
goto out;
goto out_tsk;
if (request == PTRACE_ATTACH) {
ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
goto out;
}
ret = -ESRCH;
if (!(child->ptrace & PT_PTRACED))
goto out;
if (child->state != TASK_STOPPED) {
if (request != PTRACE_KILL)
goto out;
goto out_tsk;
}
if (child->parent != current)
goto out;
ret = ptrace_check_attach(child, request == PTRACE_KILL);
if (ret < 0)
goto out_tsk;
switch (request) {
case PTRACE_PEEKTEXT:
......@@ -2185,12 +2180,12 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
ret = put_user(value, (unsigned int *) A(data));
else
ret = -EIO;
goto out;
goto out_tsk;
case PTRACE_POKETEXT:
case PTRACE_POKEDATA: /* write the word at location addr */
ret = ia32_poke(regs, child, addr, data);
goto out;
goto out_tsk;
case PTRACE_PEEKUSR: /* read word at addr in USER area */
ret = -EIO;
......@@ -2265,43 +2260,13 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
break;
}
out_tsk:
put_task_struct(child);
out:
unlock_kernel();
return ret;
}
extern asmlinkage long sys_fcntl (unsigned int fd, unsigned int cmd, unsigned long arg);
asmlinkage long
sys32_fcntl (unsigned int fd, unsigned int cmd, unsigned int arg)
{
mm_segment_t old_fs;
struct flock f;
long ret;
switch (cmd) {
case F_GETLK:
case F_SETLK:
case F_SETLKW:
if (get_compat_flock(&f, (struct compat_flock *) A(arg)))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_fcntl(fd, cmd, (unsigned long) &f);
set_fs(old_fs);
if (cmd == F_GETLK && put_compat_flock(&f, (struct compat_flock *) A(arg)))
return -EFAULT;
return ret;
default:
/*
* `sys_fcntl' lies about arg, for the F_SETOWN
* sub-function arg can have a negative value.
*/
return sys_fcntl(fd, cmd, arg);
}
}
asmlinkage long sys_ni_syscall(void);
asmlinkage long
......@@ -2596,66 +2561,6 @@ sys32_setgroups16 (int gidsetsize, short *grouplist)
return ret;
}
/*
* Unfortunately, the x86 compiler aligns variables of type "long long" to a 4 byte boundary
* only, which means that the x86 version of "struct flock64" doesn't match the ia64 version
* of struct flock.
*/
static inline long
ia32_put_flock (struct flock *l, unsigned long addr)
{
return (put_user(l->l_type, (short *) addr)
| put_user(l->l_whence, (short *) (addr + 2))
| put_user(l->l_start, (long *) (addr + 4))
| put_user(l->l_len, (long *) (addr + 12))
| put_user(l->l_pid, (int *) (addr + 20)));
}
static inline long
ia32_get_flock (struct flock *l, unsigned long addr)
{
unsigned int start_lo, start_hi, len_lo, len_hi;
int err = (get_user(l->l_type, (short *) addr)
| get_user(l->l_whence, (short *) (addr + 2))
| get_user(start_lo, (int *) (addr + 4))
| get_user(start_hi, (int *) (addr + 8))
| get_user(len_lo, (int *) (addr + 12))
| get_user(len_hi, (int *) (addr + 16))
| get_user(l->l_pid, (int *) (addr + 20)));
l->l_start = ((unsigned long) start_hi << 32) | start_lo;
l->l_len = ((unsigned long) len_hi << 32) | len_lo;
return err;
}
asmlinkage long
sys32_fcntl64 (unsigned int fd, unsigned int cmd, unsigned int arg)
{
mm_segment_t old_fs;
struct flock f;
long ret;
switch (cmd) {
case F_GETLK64:
case F_SETLK64:
case F_SETLKW64:
if (ia32_get_flock(&f, arg))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_fcntl(fd, cmd, (unsigned long) &f);
set_fs(old_fs);
if (cmd == F_GETLK && ia32_put_flock(&f, arg))
return -EFAULT;
break;
default:
ret = sys32_fcntl(fd, cmd, arg);
break;
}
return ret;
}
asmlinkage long
sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
{
......
......@@ -4,16 +4,15 @@
extra-y := head.o init_task.o
obj-y := acpi.o entry.o gate.o efi.o efi_stub.o ia64_ksyms.o \
irq.o irq_ia64.o irq_lsapic.o ivt.o \
machvec.o pal.o process.o perfmon.o ptrace.o sal.o \
semaphore.o setup.o \
signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o irq_lsapic.o \
ivt.o machvec.o pal.o perfmon.o process.o ptrace.o sal.o semaphore.o setup.o signal.o \
sys_ia64.o time.o traps.o unaligned.o unwind.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_FSYS) += fsys.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
/*
* arch/ia64/kernel/acpi-ext.c
*
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) Alex Williamson
*
* Vendor specific extensions to ACPI. These are used by both
* HP and NEC.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <asm/acpi-ext.h>
/*
* Note: Strictly speaking, this is only needed for HP and NEC machines.
* However, NEC machines identify themselves as DIG-compliant, so there is
* no easy way to #ifdef this out.
*/
acpi_status
hp_acpi_csr_space (acpi_handle obj, u64 *csr_base, u64 *csr_length)
{
int i, offset = 0;
acpi_status status;
struct acpi_buffer buf;
struct acpi_resource_vendor *res;
struct acpi_hp_vendor_long *hp_res;
efi_guid_t vendor_guid;
*csr_base = 0;
*csr_length = 0;
status = acpi_get_crs(obj, &buf);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get _CRS data on object\n");
return status;
}
res = (struct acpi_resource_vendor *)acpi_get_crs_type(&buf, &offset, ACPI_RSTYPE_VENDOR);
if (!res) {
printk(KERN_ERR PREFIX "Failed to find config space for device\n");
acpi_dispose_crs(&buf);
return AE_NOT_FOUND;
}
hp_res = (struct acpi_hp_vendor_long *)(res->reserved);
if (res->length != HP_CCSR_LENGTH || hp_res->guid_id != HP_CCSR_TYPE) {
printk(KERN_ERR PREFIX "Unknown Vendor data\n");
acpi_dispose_crs(&buf);
return AE_TYPE; /* Revisit error? */
}
memcpy(&vendor_guid, hp_res->guid, sizeof(efi_guid_t));
if (efi_guidcmp(vendor_guid, HP_CCSR_GUID) != 0) {
printk(KERN_ERR PREFIX "Vendor GUID does not match\n");
acpi_dispose_crs(&buf);
return AE_TYPE; /* Revisit error? */
}
for (i = 0 ; i < 8 ; i++) {
*csr_base |= ((u64)(hp_res->csr_base[i]) << (i * 8));
*csr_length |= ((u64)(hp_res->csr_length[i]) << (i * 8));
}
acpi_dispose_crs(&buf);
return AE_OK;
}
......@@ -9,7 +9,7 @@
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
* Copyright (C) 2001 Takayoshi Kochi <t-kouchi@cq.jp.nec.com>
* Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
* Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
......@@ -109,8 +109,6 @@ acpi_get_sysname (void)
return "sn2";
# elif defined (CONFIG_IA64_DIG)
return "dig";
# elif defined (CONFIG_IA64_HP_ZX1)
return "hpzx1";
# else
# error Unknown platform. Fix acpi.c.
# endif
......@@ -176,6 +174,73 @@ acpi_dispose_crs (struct acpi_buffer *buf)
kfree(buf->pointer);
}
void
acpi_get_crs_addr (struct acpi_buffer *buf, int type, u64 *base, u64 *size, u64 *tra)
{
int offset = 0;
struct acpi_resource_address16 *addr16;
struct acpi_resource_address32 *addr32;
struct acpi_resource_address64 *addr64;
for (;;) {
struct acpi_resource *res = acpi_get_crs_next(buf, &offset);
if (!res)
return;
switch (res->id) {
case ACPI_RSTYPE_ADDRESS16:
addr16 = (struct acpi_resource_address16 *) &res->data;
if (type == addr16->resource_type) {
*base = addr16->min_address_range;
*size = addr16->address_length;
*tra = addr16->address_translation_offset;
return;
}
break;
case ACPI_RSTYPE_ADDRESS32:
addr32 = (struct acpi_resource_address32 *) &res->data;
if (type == addr32->resource_type) {
*base = addr32->min_address_range;
*size = addr32->address_length;
*tra = addr32->address_translation_offset;
return;
}
break;
case ACPI_RSTYPE_ADDRESS64:
addr64 = (struct acpi_resource_address64 *) &res->data;
if (type == addr64->resource_type) {
*base = addr64->min_address_range;
*size = addr64->address_length;
*tra = addr64->address_translation_offset;
return;
}
break;
}
}
}
int
acpi_get_addr_space(void *obj, u8 type, u64 *base, u64 *length, u64 *tra)
{
acpi_status status;
struct acpi_buffer buf;
*base = 0;
*length = 0;
*tra = 0;
status = acpi_get_crs((acpi_handle)obj, &buf);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get _CRS data on object\n");
return status;
}
acpi_get_crs_addr(&buf, type, base, length, tra);
acpi_dispose_crs(&buf);
return AE_OK;
}
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_BOOT
......@@ -808,6 +873,7 @@ acpi_get_prt (struct pci_vector_struct **vectors, int *count)
list_for_each(node, &acpi_prt.entries) {
entry = (struct acpi_prt_entry *)node;
vector[i].segment = entry->id.segment;
vector[i].bus = entry->id.bus;
vector[i].pci_id = ((u32) entry->id.device << 16) | 0xffff;
vector[i].pin = entry->pin;
......
......@@ -91,7 +91,7 @@ ENTRY(ia64_execve)
END(ia64_execve)
/*
* sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 child_tidptr, u64 parent_tidptr,
* sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr,
* u64 tls)
*/
GLOBAL_ENTRY(sys_clone2)
......@@ -105,10 +105,10 @@ GLOBAL_ENTRY(sys_clone2)
mov out1=in1
mov out3=in2
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
mov out4=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
;;
(p6) st8 [r2]=in5 // store TLS in r16 for copy_thread()
mov out5=in4 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK
br.call.sptk.many rp=do_fork
......@@ -126,12 +126,12 @@ GLOBAL_ENTRY(sys_clone2)
END(sys_clone2)
/*
* sys_clone(u64 flags, u64 ustack_base, u64 user_tid, u64 tls)
* sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls)
* Deprecated. Use sys_clone2() instead.
*/
GLOBAL_ENTRY(sys_clone)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
alloc r16=ar.pfs,4,2,5,0
alloc r16=ar.pfs,5,2,6,0
DO_SAVE_SWITCH_STACK
adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
mov loc0=rp
......@@ -140,9 +140,10 @@ GLOBAL_ENTRY(sys_clone)
mov out1=in1
mov out3=16 // stacksize (compensates for 16-byte scratch area)
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
mov out4=in2 // out4 = user_tid (optional)
mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
;;
(p6) st8 [r2]=in3 // store TLS in r13 (tp)
(p6) st8 [r2]=in4 // store TLS in r13 (tp)
mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK
br.call.sptk.many rp=do_fork
......
......@@ -533,15 +533,15 @@ fsyscall_table:
data8 fsys_fallback_syscall // epoll_wait // 1245
data8 fsys_fallback_syscall // restart_syscall
data8 fsys_fallback_syscall // semtimedop
data8 fsys_fallback_syscall
data8 fsys_fallback_syscall
data8 fsys_fallback_syscall // 1250
data8 fsys_fallback_syscall
data8 fsys_fallback_syscall
data8 fsys_fallback_syscall
data8 fsys_fallback_syscall
data8 fsys_fallback_syscall // 1255
data8 fsys_fallback_syscall
data8 fsys_fallback_syscall // timer_create
data8 fsys_fallback_syscall // timer_settime
data8 fsys_fallback_syscall // timer_gettime // 1250
data8 fsys_fallback_syscall // timer_getoverrun
data8 fsys_fallback_syscall // timer_delete
data8 fsys_fallback_syscall // clock_settime
data8 fsys_fallback_syscall // clock_gettime
data8 fsys_fallback_syscall // clock_getres // 1255
data8 fsys_fallback_syscall // clock_nanosleep
data8 fsys_fallback_syscall
data8 fsys_fallback_syscall
data8 fsys_fallback_syscall
......
......@@ -733,73 +733,3 @@ SET_REG(b4);
SET_REG(b5);
#endif /* CONFIG_IA64_BRL_EMU */
#ifdef CONFIG_SMP
/*
* This routine handles spinlock contention. It uses a simple exponential backoff
* algorithm to reduce unnecessary bus traffic. The initial delay is selected from
* the low-order bits of the cycle counter (a cheap "randomizer"). I'm sure this
* could use additional tuning, especially on systems with a large number of CPUs.
* Also, I think the maximum delay should be made a function of the number of CPUs in
* the system. --davidm 00/08/05
*
* WARNING: This is not a normal procedure. It gets called from C code without
* the compiler knowing about it. Thus, we must not use any scratch registers
* beyond those that were declared "clobbered" at the call-site (see spin_lock()
* macro). We may not even use the stacked registers, because that could overwrite
* output registers. Similarly, we can't use the scratch stack area as it may be
* in use, too.
*
* Inputs:
* ar.ccv = 0 (and available for use)
* r28 = available for use
* r29 = available for use
* r30 = non-zero (and available for use)
* r31 = address of lock we're trying to acquire
* p15 = available for use
*/
# define delay r28
# define timeout r29
# define tmp r30
GLOBAL_ENTRY(ia64_spinlock_contention)
mov tmp=ar.itc
;;
and delay=0x3f,tmp
;;
.retry: add timeout=tmp,delay
shl delay=delay,1
;;
dep delay=delay,r0,0,13 // limit delay to 8192 cycles
;;
// delay a little...
.wait: sub tmp=tmp,timeout
or delay=0xf,delay // make sure delay is non-zero (otherwise we get stuck with 0)
;;
cmp.lt p15,p0=tmp,r0
mov tmp=ar.itc
(p15) br.cond.sptk .wait
;;
ld4 tmp=[r31]
;;
cmp.ne p15,p0=tmp,r0
mov tmp=ar.itc
(p15) br.cond.sptk .retry // lock is still busy
;;
// try acquiring lock (we know ar.ccv is still zero!):
mov tmp=1
;;
cmpxchg4.acq tmp=[r31],tmp,ar.ccv
;;
cmp.eq p15,p0=tmp,r0
mov tmp=ar.itc
(p15) br.ret.sptk.many b7 // got lock -> return
br .retry // still no luck, retry
END(ia64_spinlock_contention)
#endif
......@@ -57,9 +57,7 @@ EXPORT_SYMBOL_NOVERS(__up);
EXPORT_SYMBOL(clear_page);
#include <asm/processor.h>
# ifndef CONFIG_NUMA
EXPORT_SYMBOL(cpu_info__per_cpu);
# endif
EXPORT_SYMBOL(kernel_thread);
#include <asm/system.h>
......@@ -147,3 +145,19 @@ EXPORT_SYMBOL(machvec_noop);
EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem);
EXPORT_SYMBOL(pfm_remove_alternate_syswide_subsystem);
#endif
#ifdef CONFIG_NUMA
#include <asm/numa.h>
EXPORT_SYMBOL(cpu_to_node_map);
#endif
#include <asm/unwind.h>
EXPORT_SYMBOL(unw_init_from_blocked_task);
EXPORT_SYMBOL(unw_init_running);
EXPORT_SYMBOL(unw_unwind);
EXPORT_SYMBOL(unw_unwind_to_user);
EXPORT_SYMBOL(unw_access_gr);
EXPORT_SYMBOL(unw_access_br);
EXPORT_SYMBOL(unw_access_fr);
EXPORT_SYMBOL(unw_access_ar);
EXPORT_SYMBOL(unw_access_pr);
This diff is collapsed.
This diff is collapsed.
......@@ -27,9 +27,7 @@
#include <asm/sal.h>
#include <asm/page.h>
#include <asm/processor.h>
#ifdef CONFIG_SMP
#include <linux/smp.h>
#endif
MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
......@@ -37,12 +35,6 @@ MODULE_LICENSE("GPL");
#define PALINFO_VERSION "0.5"
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) 1
#endif
typedef int (*palinfo_func_t)(char*);
typedef struct {
......@@ -933,7 +925,7 @@ palinfo_init(void)
*/
for (i=0; i < NR_CPUS; i++) {
if (!cpu_is_online(i)) continue;
if (!cpu_online(i)) continue;
sprintf(cpustr,CPUSTR, i);
......
......@@ -24,6 +24,7 @@
#include <linux/wrapper.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/smp.h>
#include <asm/bitops.h>
#include <asm/errno.h>
......@@ -134,12 +135,6 @@
#define PFM_CPUINFO_CLEAR(v) __get_cpu_var(pfm_syst_info) &= ~(v)
#define PFM_CPUINFO_SET(v) __get_cpu_var(pfm_syst_info) |= (v)
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) (i==0)
#endif
/*
* debugging
*/
......@@ -1082,7 +1077,7 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
* and it must be a valid CPU
*/
cpu = ffz(~pfx->ctx_cpu_mask);
if (cpu_is_online(cpu) == 0) {
if (cpu_online(cpu) == 0) {
DBprintk(("CPU%d is not online\n", cpu));
return -EINVAL;
}
......@@ -3153,7 +3148,7 @@ pfm_proc_info(char *page)
p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val);
for(i=0; i < NR_CPUS; i++) {
if (cpu_is_online(i) == 0) continue;
if (cpu_online(i) == 0) continue;
p += sprintf(p, "CPU%-2d overflow intrs : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_count);
p += sprintf(p, "CPU%-2d spurious intrs : %lu\n", i, pfm_stats[i].pfm_spurious_ovfl_intr_count);
p += sprintf(p, "CPU%-2d recorded samples : %lu\n", i, pfm_stats[i].pfm_recorded_samples_count);
......
......@@ -66,10 +66,7 @@ do_show_stack (struct unw_frame_info *info, void *arg)
void
show_trace_task (struct task_struct *task)
{
struct unw_frame_info info;
unw_init_from_blocked_task(&info, task);
do_show_stack(&info, 0);
show_stack(task);
}
void
......@@ -169,7 +166,10 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
void
default_idle (void)
{
/* may want to do PAL_LIGHT_HALT here... */
#ifdef CONFIG_IA64_PAL_IDLE
if (!need_resched())
safe_halt();
#endif
}
void __attribute__((noreturn))
......@@ -177,6 +177,10 @@ cpu_idle (void *unused)
{
/* endless idle loop with no priority at all */
while (1) {
void (*idle)(void) = pm_idle;
if (!idle)
idle = default_idle;
#ifdef CONFIG_SMP
if (!need_resched())
min_xtp();
......@@ -186,10 +190,7 @@ cpu_idle (void *unused)
#ifdef CONFIG_IA64_SGI_SN
snidle();
#endif
if (pm_idle)
(*pm_idle)();
else
default_idle();
(*idle)();
}
#ifdef CONFIG_IA64_SGI_SN
......@@ -581,6 +582,15 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
tid = clone(flags | CLONE_VM | CLONE_UNTRACED, 0);
if (parent != current) {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(ia64_task_regs(current))) {
/* A kernel thread is always a 64-bit process. */
current->thread.map_base = DEFAULT_MAP_BASE;
current->thread.task_size = DEFAULT_TASK_SIZE;
ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
}
#endif
result = (*fn)(arg);
_exit(result);
}
......@@ -751,7 +761,7 @@ dup_task_struct(struct task_struct *orig)
}
void
__put_task_struct (struct task_struct *tsk)
free_task_struct (struct task_struct *tsk)
{
free_pages((unsigned long) tsk, KERNEL_STACK_SIZE_ORDER);
}
......@@ -191,6 +191,10 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
err |= __put_user(from->si_pfm_ovfl[2], &to->si_pfm_ovfl[2]);
err |= __put_user(from->si_pfm_ovfl[3], &to->si_pfm_ovfl[3]);
}
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_value, &to->si_value);
break;
default:
err |= __put_user(from->si_uid, &to->si_uid);
......
......@@ -279,12 +279,15 @@ smp_callin (void)
smp_setup_percpu_timer();
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
/*
* Synchronize the ITC with the BP
*/
Dprintk("Going to syncup ITC with BP.\n");
ia64_sync_itc(0);
}
/*
* Get our bogomips.
*/
......
......@@ -60,7 +60,7 @@ do_profile (unsigned long ip)
}
/*
* Return the number of micro-seconds that elapsed since the last update to jiffy. The
* Return the number of nano-seconds that elapsed since the last update to jiffy. The
* xtime_lock must be at least read-locked when calling this routine.
*/
static inline unsigned long
......@@ -86,6 +86,9 @@ gettimeoffset (void)
void
do_settimeofday (struct timeval *tv)
{
time_t sec = tv->tv_sec;
long nsec = tv->tv_usec * 1000;
write_seqlock_irq(&xtime_lock);
{
/*
......@@ -94,22 +97,22 @@ do_settimeofday (struct timeval *tv)
* Discover what correction gettimeofday would have done, and then undo
* it!
*/
tv->tv_usec -= gettimeoffset();
tv->tv_usec -= (jiffies - wall_jiffies) * (1000000 / HZ);
nsec -= gettimeoffset();
while (tv->tv_usec < 0) {
tv->tv_usec += 1000000;
tv->tv_sec--;
while (nsec < 0) {
nsec += 1000000000;
sec--;
}
xtime.tv_sec = tv->tv_sec;
xtime.tv_nsec = 1000 * tv->tv_usec;
xtime.tv_sec = sec;
xtime.tv_nsec = nsec;
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
}
write_sequnlock_irq(&xtime_lock);
clock_was_set();
}
void
......
......@@ -338,8 +338,8 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
fpu_swa_count = 0;
if ((++fpu_swa_count < 5) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
last_time = jiffies;
printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx\n",
current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri);
printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr);
}
exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
......
......@@ -253,6 +253,11 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
struct pt_regs *pt;
if ((unsigned) regnum - 1 >= 127) {
if (regnum == 0 && !write) {
*val = 0; /* read r0 always returns 0 */
*nat = 0;
return 0;
}
UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
__FUNCTION__, regnum);
return -1;
......@@ -318,13 +323,8 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
}
} else {
/* access a scratch register */
if (!info->pt) {
UNW_DPRINT(0, "unwind.%s: no pt-regs; cannot access r%d\n",
__FUNCTION__, regnum);
return -1;
}
pt = get_scratch_regs(info);
addr = (unsigned long *) (pt + pt_regs_off(regnum));
addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
if (info->pri_unat_loc)
nat_addr = info->pri_unat_loc;
else
......
......@@ -87,12 +87,31 @@ ia64_outl (unsigned int val, unsigned long port)
__ia64_outl(val, port);
}
void
ia64_mmiob (void)
unsigned char
ia64_readb (void *addr)
{
return __ia64_readb (addr);
}
unsigned short
ia64_readw (void *addr)
{
__ia64_mmiob();
return __ia64_readw (addr);
}
unsigned int
ia64_readl (void *addr)
{
return __ia64_readl (addr);
}
unsigned long
ia64_readq (void *addr)
{
return __ia64_readq (addr)
}
/* define aliases: */
asm (".global __ia64_inb, __ia64_inw, __ia64_inl");
......@@ -105,7 +124,11 @@ asm ("__ia64_outb = ia64_outb");
asm ("__ia64_outw = ia64_outw");
asm ("__ia64_outl = ia64_outl");
asm (".global __ia64_mmiob");
asm ("__ia64_mmiob = ia64_mmiob");
asm (".global __ia64_readb, __ia64_readw, __ia64_readl, __ia64_readq");
asm ("__ia64_readb = ia64_readb");
asm ("__ia64_readw = ia64_readw");
asm ("__ia64_readl = ia64_readl");
asm ("__ia64_readq = ia64_readq");
#endif /* CONFIG_IA64_GENERIC */
......@@ -473,12 +473,6 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
}
unsigned long
swiotlb_dma_address (struct scatterlist *sg)
{
return sg->dma_address;
}
/*
* Return whether the given PCI device DMA address mask can be supported properly. For
* example, if your device can only drive the low 24-bits during PCI bus mastering, then
......@@ -497,7 +491,6 @@ EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL(swiotlb_unmap_sg);
EXPORT_SYMBOL(swiotlb_sync_single);
EXPORT_SYMBOL(swiotlb_sync_sg);
EXPORT_SYMBOL(swiotlb_dma_address);
EXPORT_SYMBOL(swiotlb_alloc_consistent);
EXPORT_SYMBOL(swiotlb_free_consistent);
EXPORT_SYMBOL(swiotlb_pci_dma_supported);
......@@ -194,6 +194,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
up_read(&mm->mmap_sem);
if (current->pid == 1) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
printk(KERN_CRIT "VM: killing process %s\n", current->comm);
......
SECTIONS {
/* Group unwind sections into a single section: */
.IA_64.unwind_info : { *(.IA_64.unwind_info*) }
.IA_64.unwind : { *(.IA_64.unwind*) }
/*
* Create place-holder sections to hold the PLTs, GOT, and
* official procedure-descriptors (.opd).
*/
.core.plt : { BYTE(0) }
.init.plt : { BYTE(0) }
.got : { BYTE(0) }
.opd : { BYTE(0) }
}
......@@ -49,11 +49,13 @@ struct pci_fixup pcibios_fixups[1];
/*
* Low-level SAL-based PCI configuration access functions. Note that SAL
* calls are already serialized (via sal_lock), so we don't need another
* synchronization mechanism here. Not using segment number (yet).
* synchronization mechanism here.
*/
#define PCI_SAL_ADDRESS(bus, dev, fn, reg) \
((u64)(bus << 16) | (u64)(dev << 11) | (u64)(fn << 8) | (u64)(reg))
#define PCI_SAL_ADDRESS(seg, bus, dev, fn, reg) \
((u64)(seg << 24) | (u64)(bus << 16) | \
(u64)(dev << 11) | (u64)(fn << 8) | (u64)(reg))
static int
__pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
......@@ -61,10 +63,10 @@ __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
int result = 0;
u64 data = 0;
if (!value || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
if (!value || (seg > 255) || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL;
result = ia64_sal_pci_config_read(PCI_SAL_ADDRESS(bus, dev, fn, reg), len, &data);
result = ia64_sal_pci_config_read(PCI_SAL_ADDRESS(seg, bus, dev, fn, reg), len, &data);
*value = (u32) data;
......@@ -74,24 +76,24 @@ __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
static int
__pci_sal_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value)
{
if ((bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
if ((seg > 255) || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL;
return ia64_sal_pci_config_write(PCI_SAL_ADDRESS(bus, dev, fn, reg), len, value);
return ia64_sal_pci_config_write(PCI_SAL_ADDRESS(seg, bus, dev, fn, reg), len, value);
}
static int
pci_sal_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
{
return __pci_sal_read(0, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
return __pci_sal_read(PCI_SEGMENT(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
where, size, value);
}
static int
pci_sal_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
{
return __pci_sal_write(0, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
return __pci_sal_write(PCI_SEGMENT(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
where, size, value);
}
......@@ -114,24 +116,91 @@ pci_acpi_init (void)
subsys_initcall(pci_acpi_init);
static void __init
pcibios_fixup_resource(struct resource *res, u64 offset)
{
res->start += offset;
res->end += offset;
}
void __init
pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus)
{
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
if (!dev->resource[i].start)
continue;
if (dev->resource[i].flags & IORESOURCE_MEM)
pcibios_fixup_resource(&dev->resource[i],
PCI_CONTROLLER(dev)->mem_offset);
}
}
/* Called by ACPI when it finds a new root bus. */
static struct pci_controller *
alloc_pci_controller(int seg)
{
struct pci_controller *controller;
controller = kmalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
return NULL;
memset(controller, 0, sizeof(*controller));
controller->segment = seg;
return controller;
}
struct pci_bus *
pcibios_scan_root (int bus)
scan_root_bus(int bus, struct pci_ops *ops, void *sysdata)
{
struct list_head *list;
struct pci_bus *pci_bus;
list_for_each(list, &pci_root_buses) {
pci_bus = pci_bus_b(list);
if (pci_bus->number == bus) {
/* Already scanned */
printk("PCI: Bus (%02x) already probed\n", bus);
return pci_bus;
}
}
struct pci_bus *b;
printk("PCI: Probing PCI hardware on bus (%02x)\n", bus);
return pci_scan_bus(bus, pci_root_ops, NULL);
/*
* We know this is a new root bus we haven't seen before, so
* scan it, even if we've seen the same bus number in a different
* segment.
*/
b = kmalloc(sizeof(*b), GFP_KERNEL);
if (!b)
return NULL;
memset(b, 0, sizeof(*b));
INIT_LIST_HEAD(&b->children);
INIT_LIST_HEAD(&b->devices);
list_add_tail(&b->node, &pci_root_buses);
b->number = b->secondary = bus;
b->resource[0] = &ioport_resource;
b->resource[1] = &iomem_resource;
b->sysdata = sysdata;
b->ops = ops;
b->subordinate = pci_do_scan_bus(b);
return b;
}
struct pci_bus *
pcibios_scan_root(void *handle, int seg, int bus)
{
struct pci_controller *controller;
u64 base, size, offset;
printk("PCI: Probing PCI hardware on bus (%02x:%02x)\n", seg, bus);
controller = alloc_pci_controller(seg);
if (!controller)
return NULL;
controller->acpi_handle = handle;
acpi_get_addr_space(handle, ACPI_MEMORY_RANGE, &base, &size, &offset);
controller->mem_offset = offset;
return scan_root_bus(bus, pci_root_ops, controller);
}
/*
......@@ -140,6 +209,11 @@ pcibios_scan_root (int bus)
void __devinit
pcibios_fixup_bus (struct pci_bus *b)
{
struct list_head *ln;
for (ln = b->devices.next; ln != &b->devices; ln = ln->next)
pcibios_fixup_device_resources(pci_dev_b(ln), b);
return;
}
......
#!/bin/sh
dir=$(dirname $0)
CC=$1
OBJDUMP=$2
$CC -c $dir/check-gas-asm.S
res=$(objdump -r --section .data check-gas-asm.o | fgrep 00004 | tr -s ' ' |cut -f3 -d' ')
res=$($OBJDUMP -r --section .data check-gas-asm.o | fgrep 00004 | tr -s ' ' |cut -f3 -d' ')
if [ $res != ".text" ]; then
echo buggy
else
......
# arch/ia64/sn/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2003 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn ia64 subplatform
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += kernel/ # io/
# arch/ia64/sn/fakeprom/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (c) 2000-2001 Silicon Graphics, Inc. All rights reserved.
# Copyright (c) 2000-2003 Silicon Graphics, Inc. All rights reserved.
#
# Medusa fake PROM support
#
EXTRA_TARGETS := fpromasm.o main.o fw-emu.o fpmem.o klgraph_init.o \
fprom vmlinux.sym
OBJS := $(obj)/fpromasm.o $(obj)/main.o $(obj)/fw-emu.o $(obj)/fpmem.o \
$(obj)/klgraph_init.o
LDFLAGS_fprom = -static -T
obj-y=fpromasm.o main.o fw-emu.o fpmem.o klgraph_init.o
.PHONY: fprom
fprom: $(OBJ)
$(LD) -static -Tfprom.lds -o fprom $(OBJ) $(LIB)
fprom: $(obj)/fprom
.S.o:
$(CC) -D__ASSEMBLY__ $(AFLAGS) $(AFLAGS_KERNEL) -c -o $*.o $<
.c.o:
$(CC) $(CFLAGS) $(CFLAGS_KERNEL) -c -o $*.o $<
$(obj)/fprom: $(src)/fprom.lds $(OBJS) arch/ia64/lib/lib.a FORCE
$(call if_changed,ld)
clean:
rm -f *.o fprom
$(obj)/vmlinux.sym: $(src)/make_textsym System.map
$(src)/make_textsym vmlinux > vmlinux.sym
$(call cmd,cptotop)
/*
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
This directory contains the files required to build
the fake PROM image that is currently being used to
boot IA64 kernels running under the SGI Medusa kernel.
......
......@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
......@@ -168,13 +168,13 @@ GetBankSize(int index, node_memmap_t nmemmap)
#endif
void
build_mem_desc(efi_memory_desc_t *md, int type, long paddr, long numbytes)
build_mem_desc(efi_memory_desc_t *md, int type, long paddr, long numbytes, long attr)
{
md->type = type;
md->phys_addr = paddr;
md->virt_addr = 0;
md->num_pages = numbytes >> 12;
md->attribute = EFI_MEMORY_WB;
md->attribute = attr;
}
int
......@@ -236,28 +236,40 @@ build_efi_memmap(void *md, int mdsize)
*/
if (bank == 0) {
if (cnode == 0) {
hole = 2*1024*1024;
build_mem_desc(md, EFI_PAL_CODE, paddr, hole, EFI_MEMORY_WB|EFI_MEMORY_WB);
numbytes -= hole;
paddr += hole;
count++ ;
md += mdsize;
hole = 1*1024*1024;
build_mem_desc(md, EFI_PAL_CODE, paddr, hole);
build_mem_desc(md, EFI_CONVENTIONAL_MEMORY, paddr, hole, EFI_MEMORY_UC);
numbytes -= hole;
paddr += hole;
count++ ;
md += mdsize;
hole = 3*1024*1024;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole);
hole = 1*1024*1024;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole, EFI_MEMORY_WB|EFI_MEMORY_WB);
numbytes -= hole;
paddr += hole;
count++ ;
md += mdsize;
} else {
hole = PROMRESERVED_SIZE;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole);
hole = 2*1024*1024;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole, EFI_MEMORY_WB|EFI_MEMORY_WB);
numbytes -= hole;
paddr += hole;
count++ ;
md += mdsize;
hole = 2*1024*1024;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole, EFI_MEMORY_UC);
numbytes -= hole;
paddr += hole;
count++ ;
md += mdsize;
}
}
build_mem_desc(md, EFI_CONVENTIONAL_MEMORY, paddr, numbytes);
build_mem_desc(md, EFI_CONVENTIONAL_MEMORY, paddr, numbytes, EFI_MEMORY_WB|EFI_MEMORY_WB);
md += mdsize ;
count++ ;
......
/*
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
OUTPUT_FORMAT("elf64-ia64-little")
OUTPUT_ARCH(ia64)
......
# arch/ia64/sn/io/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
......@@ -5,7 +6,8 @@
#
# Copyright (C) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn kernel routines.
# Makefile for the sn io routines.
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN
......@@ -19,4 +21,6 @@ obj-$(CONFIG_IA64_SGI_SN) += stubs.o sgi_if.o xswitch.o klgraph_hack.o \
alenlist.o pci.o pci_dma.o ate_utils.o \
ifconfig_net.o io.o ioconfig_bus.o
obj-$(CONFIG_IA64_SGI_SN2) += sn2/
obj-$(CONFIG_PCIBA) += pciba.o
# arch/ia64/sn/io/sn2/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
......@@ -6,14 +7,14 @@
# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn2 specific io routines.
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += bte_error.o geo_op.o klconflib.o klgraph.o l1.o \
obj-y += pcibr/ bte_error.o geo_op.o klconflib.o klgraph.o l1.o \
l1_command.o ml_iograph.o ml_SN_init.o ml_SN_intr.o module.o \
pci_bus_cvlink.o pciio.o pic.o sgi_io_init.o shub.o shuberror.o \
shub_intr.o shubio.o xbow.o xtalk.o
obj-$(CONFIG_KDB) += kdba_io.o
obj-$(CONFIG_SHUB_1_0_SPECIFIC) += efi-rtc.o
......@@ -154,7 +154,7 @@ int iobrick_module_get(nasid_t nasid)
return ret;
}
#ifdef CONFIG_PCI
/*
* iobrick_module_get_nasid() returns a module_id which has the brick
* type encoded in bits 15-12, but this is not the true brick type...
......@@ -185,7 +185,7 @@ iobrick_type_get_nasid(nasid_t nasid)
return -1; /* unknown brick */
}
#endif
int iobrick_module_get_nasid(nasid_t nasid)
{
int io_moduleid;
......
# arch/ia64/sn/io/sn2/pcibr/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
......@@ -6,6 +7,7 @@
# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn2 specific pci bridge routines.
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN
......
# arch/ia64/sn/kernel/Makefile
#
# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All Rights Reserved.
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of version 2 of the GNU General Public License
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
# Mountain View, CA 94043, or:
#
# http://www.sgi.com
#
# For further information regarding this notice, see:
#
# http://oss.sgi.com/projects/GenInfo/NoticeExplan
# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN
.S.s:
$(CPP) $(AFLAGS) $(AFLAGS_KERNEL) -o $*.s $<
.S.o:
$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $*.o $<
obj-y := probe.o setup.o sn_asm.o sv.o bte.o iomv.o \
irq.o mca.o
obj-y = probe.o setup.o sn_asm.o sv.o bte.o iomv.o
obj-$(CONFIG_IA64_SGI_SN1) += irq.o mca.o
obj-$(CONFIG_IA64_SGI_SN2) += irq.o mca.o
obj-$(CONFIG_IA64_SGI_SN2) += sn2/
obj-$(CONFIG_IA64_SGI_AUTOTEST) += llsc4.o misctest.o
obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_MODULES) += sn_ksyms.o
......
......@@ -237,7 +237,7 @@ sn_setup(char **cmdline_p)
"%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR);
panic("PROM version too old\n");
}
#ifdef CONFIG_PCI
#ifdef CONFIG_IA64_SGI_SN2
{
extern void io_sh_swapper(int, int);
......@@ -253,7 +253,7 @@ sn_setup(char **cmdline_p)
(void)get_master_baseio_nasid();
}
#endif
#endif /* CONFIG_PCI */
status = ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &drift);
if (status != 0 || ticks_per_sec < 100000) {
printk(KERN_WARNING "unable to determine platform RTC clock frequency, guessing.\n");
......@@ -349,7 +349,7 @@ sn_init_pdas(char **cmdline_p)
for (cnode=0; cnode < numnodes; cnode++)
memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, sizeof(nodepdaindr));
#ifdef CONFIG_PCI
/*
* Set up IO related platform-dependent nodepda fields.
* The following routine actually sets up the hubinfo struct
......@@ -359,6 +359,7 @@ sn_init_pdas(char **cmdline_p)
init_platform_nodepda(nodepdaindr[cnode], cnode);
bte_init_node (nodepdaindr[cnode], cnode);
}
#endif
}
/**
......
#
# arch/ia64/sn/kernel/sn2/Makefile
#
# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of version 2 of the GNU General Public License
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
# Mountain View, CA 94043, or:
#
# http://www.sgi.com
#
# For further information regarding this notice, see:
# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
#
# http://oss.sgi.com/projects/GenInfo/NoticeExplan
# sn2 specific kernel files
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += cache.o iomv.o ptc_deadlock.o sn2_smp.o \
sn_proc_fs.o
obj-y += cache.o iomv.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
*
* The generic kernel requires function pointers to these routines, so
* we wrap the inlines from asm/ia64/sn/sn2/io.h here.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <asm/sn/sn2/io.h>
#ifdef CONFIG_IA64_GENERIC
unsigned int
sn_inb (unsigned long port)
{
return __sn_inb(port);
}
unsigned int
sn_inw (unsigned long port)
{
return __sn_inw(port);
}
unsigned int
sn_inl (unsigned long port)
{
return __sn_inl(port);
}
void
sn_outb (unsigned char val, unsigned long port)
{
__sn_outb(val, port);
}
void
sn_outw (unsigned short val, unsigned long port)
{
__sn_outw(val, port);
}
void
sn_outl (unsigned int val, unsigned long port)
{
__sn_outl(val, port);
}
unsigned char
sn_readb (void *addr)
{
return __sn_readb (addr);
}
unsigned short
sn_readw (void *addr)
{
return __sn_readw (addr);
}
unsigned int
sn_readl (void *addr)
{
return __sn_readl (addr);
}
unsigned long
sn_readq (void *addr)
{
return __sn_readq (addr)
}
/* define aliases: */
asm (".global __sn_inb, __sn_inw, __sn_inl");
asm ("__sn_inb = sn_inb");
asm ("__sn_inw = sn_inw");
asm ("__sn_inl = sn_inl");
asm (".global __sn_outb, __sn_outw, __sn_outl");
asm ("__sn_outb = sn_outb");
asm ("__sn_outw = sn_outw");
asm ("__sn_outl = sn_outl");
asm (".global __sn_readb, __sn_readw, __sn_readl, __sn_readq");
asm ("__sn_readb = sn_readb");
asm ("__sn_readw = sn_readw");
asm ("__sn_readl = sn_readl");
asm ("__sn_readq = sn_readq");
#endif /* CONFIG_IA64_GENERIC */
......@@ -3,10 +3,11 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/pci.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/sn/simulator.h>
......@@ -46,8 +47,10 @@ sn_io_addr(unsigned long port)
}
}
EXPORT_SYMBOL(sn_io_addr);
/**
* sn2_mmiob - I/O space memory barrier
* sn_mmiob - I/O space memory barrier
*
* Acts as a memory mapped I/O barrier for platforms that queue writes to
* I/O space. This ensures that subsequent writes to I/O space arrive after
......@@ -60,9 +63,9 @@ sn_io_addr(unsigned long port)
*
*/
void
sn2_mmiob (void)
sn_mmiob (void)
{
while ((((volatile unsigned long) (*pda->pio_write_status_addr)) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
while ((((volatile unsigned long) (*pda.pio_write_status_addr)) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK)
udelay(1);
}
/*
* ia64/platform/hp/common/hp_acpi.h
*
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) Alex Williamson
*
* Vendor specific extensions to ACPI. The HP-specific extensiosn are also used by NEC.
*/
#ifndef _ASM_IA64_ACPI_EXT_H
#define _ASM_IA64_ACPI_EXT_H
#include <linux/types.h>
#define HP_CCSR_LENGTH 0x21
#define HP_CCSR_TYPE 0x2
#define HP_CCSR_GUID EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, \
0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad)
struct acpi_hp_vendor_long {
u8 guid_id;
u8 guid[16];
u8 csr_base[8];
u8 csr_length[8];
};
extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
extern acpi_status acpi_get_crs (acpi_handle, struct acpi_buffer *);
extern struct acpi_resource *acpi_get_crs_next (struct acpi_buffer *, int *);
extern union acpi_resource_data *acpi_get_crs_type (struct acpi_buffer *, int *, int);
extern void acpi_dispose_crs (struct acpi_buffer *);
#endif /* _ASM_IA64_ACPI_EXT_H */
......@@ -100,7 +100,9 @@ const char *acpi_get_sysname (void);
int acpi_request_vector (u32 int_type);
int acpi_get_prt (struct pci_vector_struct **vectors, int *count);
int acpi_get_interrupt_model (int *type);
int acpi_register_irq (u32 gsi, u32 polarity, u32 trigger);
int acpi_irq_to_vector (u32 irq);
int acpi_get_addr_space (void *obj, u8 type, u64 *base, u64 *length,u64 *tra);
#ifdef CONFIG_ACPI_NUMA
#include <asm/numa.h>
......
......@@ -55,6 +55,13 @@ ia64_atomic_sub (int i, atomic_t *v)
return new;
}
#define atomic_add_return(i,v) \
((__builtin_constant_p(i) && \
( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
|| (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
? ia64_fetch_and_add(i, &(v)->counter) \
: ia64_atomic_add(i, v))
/*
* Atomically add I to V and return TRUE if the resulting value is
* negative.
......@@ -62,15 +69,9 @@ ia64_atomic_sub (int i, atomic_t *v)
static __inline__ int
atomic_add_negative (int i, atomic_t *v)
{
return ia64_atomic_add(i, v) < 0;
return atomic_add_return(i, v) < 0;
}
#define atomic_add_return(i,v) \
((__builtin_constant_p(i) && \
( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
|| (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
? ia64_fetch_and_add(i, &(v)->counter) \
: ia64_atomic_add(i, v))
#define atomic_sub_return(i,v) \
((__builtin_constant_p(i) && \
......
......@@ -275,7 +275,7 @@ __test_and_change_bit (int nr, void *addr)
}
static __inline__ int
test_bit (int nr, volatile void *addr)
test_bit (int nr, const volatile void *addr)
{
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
}
......
......@@ -27,6 +27,11 @@ typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
......@@ -68,6 +73,22 @@ struct compat_flock {
compat_pid_t l_pid;
};
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
/*
* IA32 uses 4 byte alignment for 64 bit quantities,
* so we need to pack this structure.
*/
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
} __attribute__((packed));
struct compat_statfs {
int f_type;
int f_bsize;
......@@ -88,4 +109,20 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not be used for syscall parameters,
* just declare them as pointers because the syscall entry code will have appropriately
* comverted them already.
*/
typedef u32 compat_uptr_t;
static inline void *
compat_ptr (compat_uptr_t uptr)
{
return (void *) (unsigned long) uptr;
}
#endif /* _ASM_IA64_COMPAT_H */
......@@ -78,9 +78,6 @@ struct flock {
pid_t l_pid;
};
#ifdef __KERNEL__
# define flock64 flock
#endif
#define F_LINUX_SPECIFIC_BASE 1024
#endif /* _ASM_IA64_FCNTL_H */
......@@ -2,7 +2,7 @@
#define _ASM_IA64_FPU_H
/*
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
......@@ -57,8 +57,9 @@
struct ia64_fpreg {
union {
unsigned long bits[2];
long double __dummy; /* force 16-byte alignment */
} u;
} __attribute__ ((aligned (16)));
};
# endif /* __ASSEMBLY__ */
......
......@@ -18,10 +18,6 @@
#define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK)
#define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
/* sigcontext.h */
/*
* As documented in the iBCS2 standard..
......@@ -214,8 +210,11 @@ typedef struct siginfo32 {
/* POSIX.1b timers */
struct {
unsigned int _timer1;
unsigned int _timer2;
timer_t _tid; /* timer id */
int _overrun; /* overrun count */
char _pad[sizeof(unsigned int) - sizeof(int)];
sigval_t32 _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
......
......@@ -46,14 +46,10 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \
else if ((i) == -4) \
IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \
else if ((i) == -2) \
IA64_FETCHADD(_tmp, _v, -2, sizeof(*(v))); \
else if ((i) == -1) \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \
else if ((i) == 1) \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \
else if ((i) == 2) \
IA64_FETCHADD(_tmp, _v, 2, sizeof(*(v))); \
else if ((i) == 4) \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \
else if ((i) == 8) \
......
......@@ -69,22 +69,6 @@ phys_to_virt (unsigned long address)
*/
#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
/**
* __ia64_mmiob - I/O space memory barrier
*
* Acts as a memory mapped I/O barrier for platforms that queue writes to
* I/O space. This ensures that subsequent writes to I/O space arrive after
* all previous writes. For most ia64 platforms, this is a simple
* 'mf.a' instruction, so the address is ignored. For other platforms,
* the address may be required to ensure proper ordering of writes to I/O space
* since a 'dummy' read might be necessary to barrier the write operation.
*/
static inline void
__ia64_mmiob (void)
{
__ia64_mf_a();
}
static inline const unsigned long
__ia64_get_io_port_base (void)
{
......@@ -287,7 +271,6 @@ __outsl (unsigned long port, void *src, unsigned long count)
#define __outb platform_outb
#define __outw platform_outw
#define __outl platform_outl
#define __mmiob platform_mmiob
#define inb(p) __inb(p)
#define inw(p) __inw(p)
......@@ -301,31 +284,35 @@ __outsl (unsigned long port, void *src, unsigned long count)
#define outsb(p,s,c) __outsb(p,s,c)
#define outsw(p,s,c) __outsw(p,s,c)
#define outsl(p,s,c) __outsl(p,s,c)
#define mmiob() __mmiob()
/*
* The address passed to these functions are ioremap()ped already.
*
* We need these to be machine vectors since some platforms don't provide
* DMA coherence via PIO reads (PCI drivers and the spec imply that this is
* a good idea). Writes are ok though for all existing ia64 platforms (and
* hopefully it'll stay that way).
*/
static inline unsigned char
__readb (void *addr)
__ia64_readb (void *addr)
{
return *(volatile unsigned char *)addr;
}
static inline unsigned short
__readw (void *addr)
__ia64_readw (void *addr)
{
return *(volatile unsigned short *)addr;
}
static inline unsigned int
__readl (void *addr)
__ia64_readl (void *addr)
{
return *(volatile unsigned int *) addr;
}
static inline unsigned long
__readq (void *addr)
__ia64_readq (void *addr)
{
return *(volatile unsigned long *) addr;
}
......@@ -354,6 +341,11 @@ __writeq (unsigned long val, void *addr)
*(volatile unsigned long *) addr = val;
}
#define __readb platform_readb
#define __readw platform_readw
#define __readl platform_readl
#define __readq platform_readq
#define readb(a) __readb((void *)(a))
#define readw(a) __readw((void *)(a))
#define readl(a) __readl((void *)(a))
......
......@@ -43,7 +43,6 @@ typedef int ia64_mv_pci_map_sg (struct pci_dev *, struct scatterlist *, int, int
typedef void ia64_mv_pci_unmap_sg (struct pci_dev *, struct scatterlist *, int, int);
typedef void ia64_mv_pci_dma_sync_single (struct pci_dev *, dma_addr_t, size_t, int);
typedef void ia64_mv_pci_dma_sync_sg (struct pci_dev *, struct scatterlist *, int, int);
typedef unsigned long ia64_mv_pci_dma_address (struct scatterlist *);
typedef int ia64_mv_pci_dma_supported (struct pci_dev *, u64);
/*
......@@ -61,7 +60,10 @@ typedef unsigned int ia64_mv_inl_t (unsigned long);
typedef void ia64_mv_outb_t (unsigned char, unsigned long);
typedef void ia64_mv_outw_t (unsigned short, unsigned long);
typedef void ia64_mv_outl_t (unsigned int, unsigned long);
typedef void ia64_mv_mmiob_t (void);
typedef unsigned char ia64_mv_readb_t (void *);
typedef unsigned short ia64_mv_readw_t (void *);
typedef unsigned int ia64_mv_readl_t (void *);
typedef unsigned long ia64_mv_readq_t (void *);
extern void machvec_noop (void);
......@@ -99,7 +101,6 @@ extern void machvec_noop (void);
# define platform_pci_unmap_sg ia64_mv.unmap_sg
# define platform_pci_dma_sync_single ia64_mv.sync_single
# define platform_pci_dma_sync_sg ia64_mv.sync_sg
# define platform_pci_dma_address ia64_mv.dma_address
# define platform_pci_dma_supported ia64_mv.dma_supported
# define platform_irq_desc ia64_mv.irq_desc
# define platform_irq_to_vector ia64_mv.irq_to_vector
......@@ -110,7 +111,10 @@ extern void machvec_noop (void);
# define platform_outb ia64_mv.outb
# define platform_outw ia64_mv.outw
# define platform_outl ia64_mv.outl
# define platofrm_mmiob ia64_mv.mmiob
# define platform_readb ia64_mv.readb
# define platform_readw ia64_mv.readw
# define platform_readl ia64_mv.readl
# define platform_readq ia64_mv.readq
# endif
/* __attribute__((__aligned__(16))) is required to make size of the
......@@ -138,7 +142,6 @@ struct ia64_machine_vector {
ia64_mv_pci_unmap_sg *unmap_sg;
ia64_mv_pci_dma_sync_single *sync_single;
ia64_mv_pci_dma_sync_sg *sync_sg;
ia64_mv_pci_dma_address *dma_address;
ia64_mv_pci_dma_supported *dma_supported;
ia64_mv_irq_desc *irq_desc;
ia64_mv_irq_to_vector *irq_to_vector;
......@@ -149,8 +152,11 @@ struct ia64_machine_vector {
ia64_mv_outb_t *outb;
ia64_mv_outw_t *outw;
ia64_mv_outl_t *outl;
ia64_mv_mmiob_t *mmiob;
} __attribute__((__aligned__(16)));
ia64_mv_readb_t *readb;
ia64_mv_readw_t *readw;
ia64_mv_readl_t *readl;
ia64_mv_readq_t *readq;
};
#define MACHVEC_INIT(name) \
{ \
......@@ -173,7 +179,6 @@ struct ia64_machine_vector {
platform_pci_unmap_sg, \
platform_pci_dma_sync_single, \
platform_pci_dma_sync_sg, \
platform_pci_dma_address, \
platform_pci_dma_supported, \
platform_irq_desc, \
platform_irq_to_vector, \
......@@ -184,7 +189,10 @@ struct ia64_machine_vector {
platform_outb, \
platform_outw, \
platform_outl, \
platform_mmiob \
platform_readb, \
platform_readw, \
platform_readl, \
platform_readq, \
}
extern struct ia64_machine_vector ia64_mv;
......@@ -206,7 +214,6 @@ extern ia64_mv_pci_map_sg swiotlb_map_sg;
extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg;
extern ia64_mv_pci_dma_sync_single swiotlb_sync_single;
extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg;
extern ia64_mv_pci_dma_address swiotlb_dma_address;
extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
/*
......@@ -267,9 +274,6 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
#ifndef platform_pci_dma_sync_sg
# define platform_pci_dma_sync_sg swiotlb_sync_sg
#endif
#ifndef platform_pci_dma_address
# define platform_pci_dma_address swiotlb_dma_address
#endif
#ifndef platform_pci_dma_supported
# define platform_pci_dma_supported swiotlb_pci_dma_supported
#endif
......@@ -300,8 +304,17 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
#ifndef platform_outl
# define platform_outl __ia64_outl
#endif
#ifndef platform_mmiob
# define platform_mmiob __ia64_mmiob
#ifndef platform_readb
# define platform_readb __ia64_readb
#endif
#ifndef platform_readw
# define platform_readw __ia64_readw
#endif
#ifndef platform_readl
# define platform_readl __ia64_readl
#endif
#ifndef platform_readq
# define platform_readq __ia64_readq
#endif
#endif /* _ASM_IA64_MACHVEC_H */
......@@ -8,7 +8,6 @@ extern ia64_mv_pci_map_single sba_map_single;
extern ia64_mv_pci_unmap_single sba_unmap_single;
extern ia64_mv_pci_map_sg sba_map_sg;
extern ia64_mv_pci_unmap_sg sba_unmap_sg;
extern ia64_mv_pci_dma_address sba_dma_address;
extern ia64_mv_pci_dma_supported sba_dma_supported;
/*
......@@ -29,7 +28,6 @@ extern ia64_mv_pci_dma_supported sba_dma_supported;
#define platform_pci_unmap_sg sba_unmap_sg
#define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop)
#define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop)
#define platform_pci_dma_address sba_dma_address
#define platform_pci_dma_supported sba_dma_supported
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
......@@ -16,7 +16,6 @@ extern ia64_mv_inl_t __ia64_inl;
extern ia64_mv_outb_t __ia64_outb;
extern ia64_mv_outw_t __ia64_outw;
extern ia64_mv_outl_t __ia64_outl;
extern ia64_mv_mmiob_t __ia64_mmiob;
#define MACHVEC_HELPER(name) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
......
......@@ -44,7 +44,6 @@ extern ia64_mv_inl_t sn1_inl;
extern ia64_mv_outb_t sn1_outb;
extern ia64_mv_outw_t sn1_outw;
extern ia64_mv_outl_t sn1_outl;
extern ia64_mv_mmiob_t sn_mmiob;
extern ia64_mv_pci_alloc_consistent sn1_pci_alloc_consistent;
extern ia64_mv_pci_free_consistent sn1_pci_free_consistent;
extern ia64_mv_pci_map_single sn1_pci_map_single;
......@@ -53,7 +52,6 @@ extern ia64_mv_pci_map_sg sn1_pci_map_sg;
extern ia64_mv_pci_unmap_sg sn1_pci_unmap_sg;
extern ia64_mv_pci_dma_sync_single sn1_pci_dma_sync_single;
extern ia64_mv_pci_dma_sync_sg sn1_pci_dma_sync_sg;
extern ia64_mv_pci_dma_address sn1_dma_address;
/*
* This stuff has dual use!
......@@ -74,7 +72,6 @@ extern ia64_mv_pci_dma_address sn1_dma_address;
#define platform_outb sn1_outb
#define platform_outw sn1_outw
#define platform_outl sn1_outl
#define platform_mmiob sn_mmiob
#define platform_pci_dma_init machvec_noop
#define platform_pci_alloc_consistent sn1_pci_alloc_consistent
#define platform_pci_free_consistent sn1_pci_free_consistent
......@@ -84,6 +81,5 @@ extern ia64_mv_pci_dma_address sn1_dma_address;
#define platform_pci_unmap_sg sn1_pci_unmap_sg
#define platform_pci_dma_sync_single sn1_pci_dma_sync_single
#define platform_pci_dma_sync_sg sn1_pci_dma_sync_sg
#define platform_pci_dma_address sn1_dma_address
#endif /* _ASM_IA64_MACHVEC_SN1_h */
/*
* Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
......@@ -41,13 +41,16 @@ extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
extern ia64_mv_irq_desc sn_irq_desc;
extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_inb_t sn_inb;
extern ia64_mv_inw_t sn_inw;
extern ia64_mv_inl_t sn_inl;
extern ia64_mv_outb_t sn_outb;
extern ia64_mv_outw_t sn_outw;
extern ia64_mv_outl_t sn_outl;
extern ia64_mv_mmiob_t sn2_mmiob;
extern ia64_mv_inb_t __sn_inb;
extern ia64_mv_inw_t __sn_inw;
extern ia64_mv_inl_t __sn_inl;
extern ia64_mv_outb_t __sn_outb;
extern ia64_mv_outw_t __sn_outw;
extern ia64_mv_outl_t __sn_outl;
extern ia64_mv_readb_t __sn_readb;
extern ia64_mv_readw_t __sn_readw;
extern ia64_mv_readl_t __sn_readl;
extern ia64_mv_readq_t __sn_readq;
extern ia64_mv_pci_alloc_consistent sn_pci_alloc_consistent;
extern ia64_mv_pci_free_consistent sn_pci_free_consistent;
extern ia64_mv_pci_map_single sn_pci_map_single;
......@@ -56,7 +59,6 @@ extern ia64_mv_pci_map_sg sn_pci_map_sg;
extern ia64_mv_pci_unmap_sg sn_pci_unmap_sg;
extern ia64_mv_pci_dma_sync_single sn_pci_dma_sync_single;
extern ia64_mv_pci_dma_sync_sg sn_pci_dma_sync_sg;
extern ia64_mv_pci_dma_address sn_dma_address;
extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
/*
......@@ -72,13 +74,17 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
#define platform_irq_init sn_irq_init
#define platform_send_ipi sn2_send_IPI
#define platform_global_tlb_purge sn2_global_tlb_purge
#define platform_inb sn_inb
#define platform_inw sn_inw
#define platform_inl sn_inl
#define platform_outb sn_outb
#define platform_outw sn_outw
#define platform_outl sn_outl
#define platform_mmiob sn2_mmiob
#define platform_pci_fixup sn_pci_fixup
#define platform_inb __sn_inb
#define platform_inw __sn_inw
#define platform_inl __sn_inl
#define platform_outb __sn_outb
#define platform_outw __sn_outw
#define platform_outl __sn_outl
#define platform_readb __sn_readb
#define platform_readw __sn_readw
#define platform_readl __sn_readl
#define platform_readq __sn_readq
#define platform_irq_desc sn_irq_desc
#define platform_irq_to_vector sn_irq_to_vector
#define platform_local_vector_to_irq sn_local_vector_to_irq
......@@ -91,7 +97,6 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
#define platform_pci_unmap_sg sn_pci_unmap_sg
#define platform_pci_dma_sync_single sn_pci_dma_sync_single
#define platform_pci_dma_sync_sg sn_pci_dma_sync_sg
#define platform_pci_dma_address sn_dma_address
#define platform_pci_dma_supported sn_pci_dma_supported
#endif /* _ASM_IA64_MACHVEC_SN2_H */
......@@ -24,7 +24,7 @@ enum {
IA64_MCA_FAILURE = 1
};
#define IA64_MCA_RENDEZ_TIMEOUT (100 * HZ) /* 1000 milliseconds */
#define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */
#define IA64_CMC_INT_DISABLE 0
#define IA64_CMC_INT_ENABLE 1
......
#ifndef _ASM_IA64_MODULE_H
#define _ASM_IA64_MODULE_H
/* Module support currently broken (due to in-kernel module loader). */
/*
* IA-64-specific support for kernel module loader.
*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
struct elf64_shdr; /* forward declration */
struct mod_arch_specific {
struct elf64_shdr *core_plt; /* core PLT section */
struct elf64_shdr *init_plt; /* init PLT section */
struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */
unsigned long gp; /* global-pointer for module */
void *unw_table; /* unwind-table cookie returned by unwinder */
unsigned int next_got_entry; /* index of next available got entry */
};
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr
#define MODULE_PROC_FAMILY "ia64"
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
#define ARCH_SHF_SMALL SHF_IA_64_SHORT
#endif /* _ASM_IA64_MODULE_H */
......@@ -21,7 +21,7 @@
#define PCIBIOS_MIN_MEM 0x10000000
void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int bus);
struct pci_bus * pcibios_scan_root(void *acpi_handle, int segment, int bus);
struct pci_dev;
......@@ -58,7 +58,6 @@ extern int pcibios_prep_mwi (struct pci_dev *);
#define pci_unmap_sg platform_pci_unmap_sg
#define pci_dma_sync_single platform_pci_dma_sync_single
#define pci_dma_sync_sg platform_pci_dma_sync_sg
#define sg_dma_address platform_pci_dma_address
#define pci_dma_supported platform_pci_dma_supported
/* pci_unmap_{single,page} is not a nop, thus... */
......@@ -92,11 +91,23 @@ extern int pcibios_prep_mwi (struct pci_dev *);
#define pci_controller_num(PDEV) (0)
#define sg_dma_len(sg) ((sg)->dma_length)
#define sg_dma_address(sg) ((sg)->dma_address)
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
struct pci_controller {
void *acpi_handle;
void *iommu;
int segment;
u64 mem_offset;
};
#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
#define PCI_SEGMENT(busdev) (PCI_CONTROLLER(busdev)->segment)
/* generic pci stuff */
#include <asm-generic/pci.h>
......
......@@ -59,6 +59,9 @@
#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
/* Valid only for a PTE with the present bit cleared: */
#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
#define _PFN_MASK _PAGE_PPN_MASK
#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_A | _PAGE_D)
......@@ -253,6 +256,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
/*
* Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
* access rights:
......@@ -402,12 +406,35 @@ pte_same (pte_t a, pte_t b)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void);
#define __swp_type(entry) (((entry).val >> 1) & 0xff)
/*
* Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
* bits in the swap-type field of the swap pte. It would be nice to
* enforce that, but we can't easily include <linux/swap.h> here.
* (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
*
* Format of swap pte:
* bit 0 : present bit (must be zero)
* bit 1 : _PAGE_FILE (must be zero)
* bits 2- 8: swap-type
* bits 9-62: swap offset
* bit 63 : _PAGE_PROTNONE bit
*
* Format of file pte:
* bit 0 : present bit (must be zero)
* bit 1 : _PAGE_FILE (must be one)
* bits 2-62: file_offset/PAGE_SIZE
* bit 63 : _PAGE_PROTNONE bit
*/
#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
#define __swp_offset(entry) (((entry).val << 1) >> 10)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) })
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define PTE_FILE_MAX_BITS 61
#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
#define io_remap_page_range remap_page_range /* XXX is this right? */
/*
......
......@@ -379,7 +379,7 @@ extern unsigned long get_wchan (struct task_struct *p);
static inline unsigned long
ia64_get_kr (unsigned long regnum)
{
unsigned long r;
unsigned long r = 0;
switch (regnum) {
case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break;
......@@ -915,13 +915,13 @@ ia64_tpa (__u64 addr)
#define ARCH_HAS_SPINLOCK_PREFETCH
#define PREFETCH_STRIDE 256
extern inline void
static inline void
prefetch (const void *x)
{
__asm__ __volatile__ ("lfetch [%0]" : : "r"(x));
}
extern inline void
static inline void
prefetchw (const void *x)
{
__asm__ __volatile__ ("lfetch.excl [%0]" : : "r"(x));
......
......@@ -37,13 +37,23 @@ extern spinlock_t sal_lock;
result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7)
# define SAL_CALL(result,args...) do { \
unsigned long flags; \
struct ia64_fpreg fr[6]; \
ia64_save_scratch_fpregs(fr); \
spin_lock_irqsave(&sal_lock, flags); \
__SAL_CALL(result,args); \
spin_unlock_irqrestore(&sal_lock, flags); \
ia64_load_scratch_fpregs(fr); \
unsigned long __ia64_sc_flags; \
struct ia64_fpreg __ia64_sc_fr[6]; \
ia64_save_scratch_fpregs(__ia64_sc_fr); \
spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \
__SAL_CALL(result, args); \
spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \
ia64_load_scratch_fpregs(__ia64_sc_fr); \
} while (0)
# define SAL_CALL_NOLOCK(result,args...) do { \
unsigned long __ia64_scn_flags; \
struct ia64_fpreg __ia64_scn_fr[6]; \
ia64_save_scratch_fpregs(__ia64_scn_fr); \
local_irq_save(__ia64_scn_flags); \
__SAL_CALL(result, args); \
local_irq_restore(__ia64_scn_flags); \
ia64_load_scratch_fpregs(__ia64_scn_fr); \
} while (0)
#define SAL_SET_VECTORS 0x01000000
......@@ -686,13 +696,14 @@ ia64_sal_get_state_info_size (u64 sal_info_type)
/*
* Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from
* the monarch processor.
* the monarch processor. Must not lock, because it will not return on any cpu until the
* monarch processor sends a wake up.
*/
static inline s64
ia64_sal_mc_rendez (void)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
return isrv.status;
}
......
......@@ -3,7 +3,7 @@
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2001-2002 Hewlett-Packard Co
* Copyright (C) 2001-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef _ASM_IA64_SMP_H
......@@ -74,7 +74,7 @@ cpu_logical_id (int cpuid)
int i;
for (i = 0; i < NR_CPUS; ++i)
if (cpu_physical_id(i) == (__u32) cpuid)
if (cpu_physical_id(i) == cpuid)
break;
return i;
}
......
......@@ -3,8 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 2000 Ralf Baechle
* Copyright (C) 2000-2001 Silicon Graphics, Inc.
*/
#ifndef _ASM_IA64_SN_IO_H
#define _ASM_IA64_SN_IO_H
......@@ -78,4 +78,9 @@
#include <asm/sn/sn2/shubio.h>
#endif
/*
* Used to ensure write ordering (like mb(), but for I/O space)
*/
extern void sn_mmiob(void);
#endif /* _ASM_IA64_SN_IO_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_SN2_IO_H
#define _ASM_SN_SN2_IO_H
extern void * sn_io_addr(unsigned long port); /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */
#define __sn_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
extern void sn_dma_flush(unsigned long);
/*
* The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform
* inX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned int
__sn_inb (unsigned long port)
{
volatile unsigned char *addr = sn_io_addr(port);
unsigned char ret;
ret = *addr;
sn_dma_flush((unsigned long)addr);
__sn_mf_a();
return ret;
}
static inline unsigned int
__sn_inw (unsigned long port)
{
volatile unsigned short *addr = sn_io_addr(port);
unsigned short ret;
ret = *addr;
sn_dma_flush((unsigned long)addr);
__sn_mf_a();
return ret;
}
static inline unsigned int
__sn_inl (unsigned long port)
{
volatile unsigned int *addr = sn_io_addr(port);
unsigned int ret;
ret = *addr;
sn_dma_flush((unsigned long)addr);
__sn_mf_a();
return ret;
}
static inline void
__sn_outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr = sn_io_addr(port);
*addr = val;
sn_mmiob();
}
static inline void
__sn_outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr = sn_io_addr(port);
*addr = val;
sn_mmiob();
}
static inline void
__sn_outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr = sn_io_addr(port);
*addr = val;
sn_mmiob();
}
/*
* The following routines are SN Platform specific, called when
* a reference is made to readX/writeX set macros. SN Platform
* readX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned char
__sn_readb (void *addr)
{
unsigned char val;
val = *(volatile unsigned char *)addr;
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned short
__sn_readw (void *addr)
{
unsigned short val;
val = *(volatile unsigned short *)addr;
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned int
__sn_readl (void *addr)
{
unsigned int val;
val = *(volatile unsigned int *) addr;
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned long
__sn_readq (void *addr)
{
unsigned long val;
val = *(volatile unsigned long *) addr;
sn_dma_flush((unsigned long)addr);
return val;
}
/*
* For generic and SN2 kernels, we have a set of fast access
* PIO macros. These macros are provided on SN Platform
* because the normal inX and readX macros perform an
* additional task of flushing Post DMA request on the Bridge.
*
* These routines should be self explainatory.
*/
static inline unsigned int
sn_inb_fast (unsigned long port)
{
volatile unsigned char *addr = (unsigned char *)port;
unsigned char ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inw_fast (unsigned long port)
{
volatile unsigned short *addr = (unsigned short *)port;
unsigned short ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inl_fast (unsigned long port)
{
volatile unsigned int *addr = (unsigned int *)port;
unsigned int ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned char
sn_readb_fast (void *addr)
{
return *(volatile unsigned char *)addr;
}
static inline unsigned short
sn_readw_fast (void *addr)
{
return *(volatile unsigned short *)addr;
}
static inline unsigned int
sn_readl_fast (void *addr)
{
return *(volatile unsigned int *) addr;
}
static inline unsigned long
sn_readq_fast (void *addr)
{
return *(volatile unsigned long *) addr;
}
#endif
......@@ -2,7 +2,7 @@
#define _ASM_IA64_SPINLOCK_H
/*
* Copyright (C) 1998-2002 Hewlett-Packard Co
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
*
......@@ -15,58 +15,6 @@
#include <asm/bitops.h>
#include <asm/atomic.h>
#undef NEW_LOCK
#ifdef NEW_LOCK
typedef struct {
volatile unsigned int lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0)
/*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when
* there is contention.
*/
#define _raw_spin_lock(x) \
{ \
register char *addr __asm__ ("r31") = (char *) &(x)->lock; \
\
__asm__ __volatile__ ( \
"mov r30=1\n" \
"mov ar.ccv=r0\n" \
";;\n" \
"cmpxchg4.acq r30=[%0],r30,ar.ccv\n" \
";;\n" \
"cmp.ne p15,p0=r30,r0\n" \
"(p15) br.call.spnt.few b7=ia64_spinlock_contention\n" \
";;\n" \
"1:\n" /* force a new bundle */ \
:: "r"(addr) \
: "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory"); \
}
#define _raw_spin_trylock(x) \
({ \
register long result; \
\
__asm__ __volatile__ ( \
"mov ar.ccv=r0\n" \
";;\n" \
"cmpxchg4.acq %0=[%2],%1,ar.ccv\n" \
: "=r"(result) : "r"(1), "r"(&(x)->lock) : "ar.ccv", "memory"); \
(result == 0); \
})
#define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0;} while (0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#else /* !NEW_LOCK */
typedef struct {
volatile unsigned int lock;
} spinlock_t;
......@@ -123,8 +71,6 @@ do { \
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#endif /* !NEW_LOCK */
typedef struct {
volatile int read_counter:31;
volatile int write_lock:1;
......@@ -136,7 +82,7 @@ typedef struct {
#define _raw_read_lock(rw) \
do { \
int tmp = 0; \
int __read_lock_tmp = 0; \
__asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n" \
";;\n" \
"tbit.nz p6,p0 = %0, 31\n" \
......@@ -151,15 +97,15 @@ do { \
"br.cond.sptk.few 1b\n" \
";;\n" \
".previous\n" \
: "=&r" (tmp) \
: "=&r" (__read_lock_tmp) \
: "r" (rw) : "p6", "memory"); \
} while(0)
#define _raw_read_unlock(rw) \
do { \
int tmp = 0; \
int __read_unlock_tmp = 0; \
__asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n" \
: "=r" (tmp) \
: "=r" (__read_unlock_tmp) \
: "r" (rw) \
: "memory"); \
} while(0)
......
......@@ -31,6 +31,7 @@
#include <linux/types.h>
struct pci_vector_struct {
__u16 segment; /* PCI Segment number */
__u16 bus; /* PCI Bus number */
__u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
__u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
......@@ -108,7 +109,7 @@ ia64_insn_group_barrier (void)
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
#define safe_halt() ia64_pal_halt(1) /* PAL_HALT */
#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
/*
* The group barrier in front of the rsm & ssm are necessary to ensure
......
......@@ -7,8 +7,8 @@
* The main single-value unaligned transfer routines. Derived from
* the Linux/Alpha version.
*
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1998, 1999, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#define get_unaligned(ptr) \
((__typeof__(*(ptr)))ia64_get_unaligned((ptr), sizeof(*(ptr))))
......@@ -16,106 +16,101 @@
#define put_unaligned(x,ptr) \
ia64_put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr)))
/*
* EGCS 1.1 knows about arbitrary unaligned loads. Define some
* packed structures to talk about such things with.
*/
struct __una_u64 { __u64 x __attribute__((packed)); };
struct __una_u32 { __u32 x __attribute__((packed)); };
struct __una_u16 { __u16 x __attribute__((packed)); };
static inline unsigned long
__uldq (const unsigned long * r11)
__uld8 (const unsigned long * addr)
{
const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
return ptr->x;
}
static inline unsigned long
__uldl (const unsigned int * r11)
__uld4 (const unsigned int * addr)
{
const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
return ptr->x;
}
static inline unsigned long
__uldw (const unsigned short * r11)
__uld2 (const unsigned short * addr)
{
const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
return ptr->x;
}
static inline void
__ustq (unsigned long r5, unsigned long * r11)
__ust8 (unsigned long val, unsigned long * addr)
{
struct __una_u64 *ptr = (struct __una_u64 *) r11;
ptr->x = r5;
struct __una_u64 *ptr = (struct __una_u64 *) addr;
ptr->x = val;
}
static inline void
__ustl (unsigned long r5, unsigned int * r11)
__ust4 (unsigned long val, unsigned int * addr)
{
struct __una_u32 *ptr = (struct __una_u32 *) r11;
ptr->x = r5;
struct __una_u32 *ptr = (struct __una_u32 *) addr;
ptr->x = val;
}
static inline void
__ustw (unsigned long r5, unsigned short * r11)
__ust2 (unsigned long val, unsigned short * addr)
{
struct __una_u16 *ptr = (struct __una_u16 *) r11;
ptr->x = r5;
struct __una_u16 *ptr = (struct __una_u16 *) addr;
ptr->x = val;
}
/*
* This function doesn't actually exist. The idea is that when
* someone uses the macros below with an unsupported size (datatype),
* the linker will alert us to the problem via an unresolved reference
* error.
* This function doesn't actually exist. The idea is that when someone uses the macros
* below with an unsupported size (datatype), the linker will alert us to the problem via
* an unresolved reference error.
*/
extern unsigned long ia64_bad_unaligned_access_length (void);
#define ia64_get_unaligned(_ptr,size) \
({ \
const void *ptr = (_ptr); \
unsigned long val; \
const void *__ia64_ptr = (_ptr); \
unsigned long __ia64_val; \
\
switch (size) { \
case 1: \
val = *(const unsigned char *) ptr; \
__ia64_val = *(const unsigned char *) __ia64_ptr; \
break; \
case 2: \
val = __uldw((const unsigned short *)ptr); \
__ia64_val = __uld2((const unsigned short *)__ia64_ptr); \
break; \
case 4: \
val = __uldl((const unsigned int *)ptr); \
__ia64_val = __uld4((const unsigned int *)__ia64_ptr); \
break; \
case 8: \
val = __uldq((const unsigned long *)ptr); \
__ia64_val = __uld8((const unsigned long *)__ia64_ptr); \
break; \
default: \
val = ia64_bad_unaligned_access_length(); \
__ia64_val = ia64_bad_unaligned_access_length(); \
} \
val; \
__ia64_val; \
})
#define ia64_put_unaligned(_val,_ptr,size) \
do { \
const void *ptr = (_ptr); \
unsigned long val = (_val); \
const void *__ia64_ptr = (_ptr); \
unsigned long __ia64_val = (_val); \
\
switch (size) { \
case 1: \
*(unsigned char *)ptr = (val); \
*(unsigned char *)__ia64_ptr = (__ia64_val); \
break; \
case 2: \
__ustw(val, (unsigned short *)ptr); \
__ust2(__ia64_val, (unsigned short *)__ia64_ptr); \
break; \
case 4: \
__ustl(val, (unsigned int *)ptr); \
__ust4(__ia64_val, (unsigned int *)__ia64_ptr); \
break; \
case 8: \
__ustq(val, (unsigned long *)ptr); \
__ust8(__ia64_val, (unsigned long *)__ia64_ptr); \
break; \
default: \
ia64_bad_unaligned_access_length(); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment