Commit b3203cbc authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/to-linus-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents e4fa49bd 3f4533c3
......@@ -414,12 +414,18 @@ CONFIG_IA64_GENERIC
HP-simulator For the HP simulator
(<http://software.hp.com/ia64linux/>).
HP-zx1 For HP zx1 platforms.
SN1-simulator For the SGI SN1 simulator.
DIG-compliant For DIG ("Developer's Interface Guide") compliant
system.
systems.
If you don't know what to do, choose "generic".
CONFIG_IA64_HP_ZX1
Build a kernel that runs on HP zx1-based systems. This adds support
for the zx1 IOMMU and makes root bus bridges appear in PCI config space
(required for zx1 agpgart support).
CONFIG_IA64_PAGE_SIZE_4KB
This lets you select the page size of the kernel. For best IA-64
performance, a page size of 8KB or 16KB is recommended. For best
......
......@@ -22,7 +22,7 @@ CFLAGS := $(CFLAGS) -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
# -ffunction-sections
CFLAGS_KERNEL := -mconstant-gp
GCC_VERSION=$(shell $(CROSS_COMPILE)$(HOSTCC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
ifneq ($(GCC_VERSION),2)
CFLAGS += -frename-registers --param max-inline-insns=2000
......@@ -33,16 +33,11 @@ ifeq ($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y)
endif
ifdef CONFIG_IA64_GENERIC
CORE_FILES := arch/$(ARCH)/hp/hp.a \
arch/$(ARCH)/sn/sn.o \
arch/$(ARCH)/dig/dig.a \
arch/$(ARCH)/sn/io/sgiio.o \
CORE_FILES := arch/$(ARCH)/hp/hp.o \
arch/$(ARCH)/dig/dig.a \
$(CORE_FILES)
SUBDIRS := arch/$(ARCH)/hp \
arch/$(ARCH)/sn/sn1 \
arch/$(ARCH)/sn \
arch/$(ARCH)/dig \
arch/$(ARCH)/sn/io \
$(SUBDIRS)
else # !GENERIC
......@@ -50,7 +45,16 @@ else # !GENERIC
ifdef CONFIG_IA64_HP_SIM
SUBDIRS := arch/$(ARCH)/hp \
$(SUBDIRS)
CORE_FILES := arch/$(ARCH)/hp/hp.a \
CORE_FILES := arch/$(ARCH)/hp/hp.o \
$(CORE_FILES)
endif
ifdef CONFIG_IA64_HP_ZX1
SUBDIRS := arch/$(ARCH)/hp \
arch/$(ARCH)/dig \
$(SUBDIRS)
CORE_FILES := arch/$(ARCH)/hp/hp.o \
arch/$(ARCH)/dig/dig.a \
$(CORE_FILES)
endif
......
......@@ -22,6 +22,7 @@ choice 'IA-64 system type' \
"generic CONFIG_IA64_GENERIC \
DIG-compliant CONFIG_IA64_DIG \
HP-simulator CONFIG_IA64_HP_SIM \
HP-zx1 CONFIG_IA64_HP_ZX1 \
SGI-SN1 CONFIG_IA64_SGI_SN1 \
SGI-SN2 CONFIG_IA64_SGI_SN2" generic
......@@ -56,7 +57,7 @@ if [ "$CONFIG_MCKINLEY" = "y" ]; then
fi
fi
if [ "$CONFIG_IA64_DIG" = "y" ]; then
if [ "$CONFIG_IA64_GENERIC" = "y" ] || [ "$CONFIG_IA64_DIG" = "y" ] || [ "$CONFIG_IA64_HP_ZX1" = "y" ]; then
bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA
define_bool CONFIG_PM y
fi
......
......@@ -23,7 +23,7 @@ CONFIG_MODVERSIONS=y
# CONFIG_KMOD is not set
#
# General setup
# Processor type and features
#
CONFIG_IA64=y
# CONFIG_ISA is not set
......@@ -32,21 +32,22 @@ CONFIG_IA64=y
# CONFIG_SBUS is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_ACPI=y
CONFIG_ACPI_EFI=y
CONFIG_ACPI_INTERPRETER=y
CONFIG_ACPI_KERNEL_CONFIG=y
CONFIG_ITANIUM=y
# CONFIG_MCKINLEY is not set
# CONFIG_IA64_GENERIC is not set
CONFIG_IA64_DIG=y
# CONFIG_IA64_HP_SIM is not set
# CONFIG_IA64_HP_ZX1 is not set
# CONFIG_IA64_SGI_SN1 is not set
# CONFIG_IA64_SGI_SN2 is not set
# CONFIG_IA64_PAGE_SIZE_4KB is not set
# CONFIG_IA64_PAGE_SIZE_8KB is not set
CONFIG_IA64_PAGE_SIZE_16KB=y
# CONFIG_IA64_PAGE_SIZE_64KB is not set
CONFIG_ACPI=y
CONFIG_ACPI_EFI=y
CONFIG_ACPI_INTERPRETER=y
CONFIG_ACPI_KERNEL_CONFIG=y
CONFIG_IA64_BRL_EMU=y
# CONFIG_ITANIUM_BSTEP_SPECIFIC is not set
CONFIG_IA64_L1_CACHE_SHIFT=6
......@@ -60,15 +61,23 @@ CONFIG_IA64_PALINFO=y
CONFIG_EFI_VARS=y
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
# CONFIG_ACPI_DEBUG is not set
CONFIG_ACPI_BUSMGR=y
CONFIG_ACPI_SYS=y
CONFIG_ACPI_CPU=y
#
# ACPI Support
#
CONFIG_ACPI=y
CONFIG_ACPI_EFI=y
CONFIG_ACPI_BOOT=y
CONFIG_ACPI_BUS=y
CONFIG_ACPI_INTERPRETER=y
CONFIG_ACPI_PCI=y
CONFIG_ACPI_POWER=y
CONFIG_ACPI_SYSTEM=y
CONFIG_ACPI_BUTTON=y
CONFIG_ACPI_AC=y
CONFIG_ACPI_EC=y
CONFIG_ACPI_CMBATT=y
CONFIG_ACPI_THERMAL=y
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_THERMAL=m
# CONFIG_ACPI_DEBUG is not set
CONFIG_PCI=y
CONFIG_PCI_NAMES=y
# CONFIG_HOTPLUG is not set
......@@ -231,15 +240,13 @@ CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_BLK_DEV_ALI15X3 is not set
# CONFIG_WDC_ALI15X3 is not set
# CONFIG_BLK_DEV_AMD74XX is not set
# CONFIG_AMD74XX_OVERRIDE is not set
# CONFIG_BLK_DEV_CMD64X is not set
# CONFIG_BLK_DEV_CY82C693 is not set
# CONFIG_BLK_DEV_CS5530 is not set
# CONFIG_BLK_DEV_HPT34X is not set
# CONFIG_HPT34X_AUTODMA is not set
# CONFIG_BLK_DEV_HPT366 is not set
CONFIG_BLK_DEV_PIIX=y
# CONFIG_PIIX_TUNING is not set
# CONFIG_BLK_DEV_PIIX is not set
# CONFIG_BLK_DEV_NS87415 is not set
# CONFIG_BLK_DEV_OPTI621 is not set
# CONFIG_BLK_DEV_PDC_ADMA is not set
......@@ -254,7 +261,6 @@ CONFIG_BLK_DEV_PIIX=y
# CONFIG_IDEDMA_IVB is not set
# CONFIG_IDEDMA_AUTO is not set
# CONFIG_DMA_NONPCI is not set
CONFIG_BLK_DEV_IDE_MODES=y
# CONFIG_BLK_DEV_ATARAID is not set
# CONFIG_BLK_DEV_ATARAID_PDC is not set
# CONFIG_BLK_DEV_ATARAID_HPT is not set
......@@ -691,8 +697,6 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_MINIX_SUBPARTITION is not set
# CONFIG_SOLARIS_X86_PARTITION is not set
# CONFIG_UNIXWARE_DISKLABEL is not set
CONFIG_EFI_PARTITION=y
# CONFIG_DEVFS_GUID is not set
# CONFIG_LDM_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
......@@ -726,6 +730,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_874 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
......@@ -814,8 +819,9 @@ CONFIG_USB_UHCI=m
# USB Device Class drivers
#
# CONFIG_USB_AUDIO is not set
# CONFIG_USB_EMI26 is not set
# CONFIG_USB_BLUETOOTH is not set
# CONFIG_USB_BLUETOOTH_TTY is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
# CONFIG_USB_STORAGE is not set
# CONFIG_USB_STORAGE_DEBUG is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
......@@ -825,13 +831,12 @@ CONFIG_USB_UHCI=m
# CONFIG_USB_STORAGE_HP8200e is not set
# CONFIG_USB_STORAGE_SDDR09 is not set
# CONFIG_USB_STORAGE_JUMPSHOT is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
#
# USB Human Interface Devices (HID)
#
CONFIG_USB_HID=m
CONFIG_USB_HIDINPUT=y
CONFIG_USB_HIDDEV=y
CONFIG_USB_KBD=m
CONFIG_USB_MOUSE=m
......@@ -849,23 +854,24 @@ CONFIG_USB_MOUSE=m
#
# USB Multimedia devices
#
# CONFIG_USB_DABUSB is not set
# CONFIG_USB_VICAM is not set
# CONFIG_USB_DSBR is not set
# CONFIG_USB_IBMCAM is not set
# CONFIG_USB_KONICAWC is not set
# CONFIG_USB_OV511 is not set
# CONFIG_USB_PWC is not set
# CONFIG_USB_SE401 is not set
# CONFIG_USB_STV680 is not set
# CONFIG_USB_VICAM is not set
# CONFIG_USB_DSBR is not set
# CONFIG_USB_DABUSB is not set
# CONFIG_USB_KONICAWC is not set
#
# USB Network adaptors
#
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_KAWETH is not set
# CONFIG_USB_CATC is not set
# CONFIG_USB_CDCETHER is not set
# CONFIG_USB_KAWETH is not set
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
#
......@@ -897,9 +903,11 @@ CONFIG_USB_MOUSE=m
# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
# CONFIG_USB_SERIAL_MCT_U232 is not set
# CONFIG_USB_SERIAL_KLSI is not set
# CONFIG_USB_SERIAL_MCT_U232 is not set
# CONFIG_USB_SERIAL_PL2303 is not set
# CONFIG_USB_SERIAL_SAFE is not set
# CONFIG_USB_SERIAL_SAFE_PADDED is not set
# CONFIG_USB_SERIAL_CYBERJACK is not set
# CONFIG_USB_SERIAL_XIRCOM is not set
# CONFIG_USB_SERIAL_OMNINET is not set
......@@ -907,8 +915,10 @@ CONFIG_USB_MOUSE=m
#
# USB Miscellaneous drivers
#
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_EMI26 is not set
# CONFIG_USB_TIGL is not set
# CONFIG_USB_AUERSWALD is not set
# CONFIG_USB_RIO500 is not set
#
# Library routines
......@@ -936,11 +946,3 @@ CONFIG_IA64_EARLY_PRINTK=y
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_IA64_DEBUG_CMPXCHG is not set
# CONFIG_IA64_DEBUG_IRQ is not set
CONFIG_KDB=y
# CONFIG_KDB_MODULES is not set
# CONFIG_KDB_OFF is not set
#
# Load all symbols for debugging is required for KDB
#
CONFIG_KALLSYMS=y
......@@ -33,9 +33,6 @@
* is sufficient (the IDE driver will autodetect the drive geometry).
*/
char drive_info[4*16];
extern int pcat_compat;
unsigned char aux_device_present = 0xaa; /* XXX remove this when legacy I/O is gone */
void __init
dig_setup (char **cmdline_p)
......@@ -85,16 +82,4 @@ dig_setup (char **cmdline_p)
void __init
dig_irq_init (void)
{
if (pcat_compat) {
/*
* Disable the compatibility mode interrupts (8259 style), needs IN/OUT support
* enabled.
*/
printk("%s: Disabling PC-AT compatible 8259 interrupts\n", __FUNCTION__);
outb(0xff, 0xA1);
outb(0xff, 0x21);
} else {
printk("%s: System doesn't have PC-AT compatible dual-8259 setup. "
"Nothing to be done\n", __FUNCTION__);
}
}
mainmenu_option next_comment
comment 'HP Simulator drivers'
bool 'Simulated Ethernet ' CONFIG_HP_SIMETH
bool 'Simulated serial driver support' CONFIG_HP_SIMSERIAL
if [ "$CONFIG_SCSI" != "n" ]; then
bool 'Simulated SCSI disk' CONFIG_HP_SIMSCSI
fi
endmenu
#
# ia64/platform/hp/Makefile
#
# Copyright (C) 2002 Hewlett-Packard Co.
# David Mosberger-Tang <davidm@hpl.hp.com>
# Copyright (C) 1999 Silicon Graphics, Inc.
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
#
# arch/ia64/hp/Makefile
# Copyright (c) 2002 Matthew Wilcox for Hewlett Packard
all: hp.a
ALL_SUB_DIRS := sim zx1 common
O_TARGET := hp.a
O_TARGET := hp.o
obj-y := hpsim_console.o hpsim_irq.o hpsim_setup.o
obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
subdir-$(CONFIG_IA64_GENERIC) += $(ALL_SUB_DIRS)
subdir-$(CONFIG_IA64_HP_SIM) += sim
subdir-$(CONFIG_IA64_HP_ZX1) += zx1 common
obj-$(CONFIG_SIMETH) += simeth.o
obj-$(CONFIG_SIM_SERIAL) += simserial.o
obj-$(CONFIG_SCSI_SIM) += simscsi.o
clean::
SUB_DIRS := $(subdir-y)
obj-y += $(join $(subdir-y),$(subdir-y:%=/%.o))
include $(TOPDIR)/Rules.make
#
# ia64/platform/hp/common/Makefile
#
# Copyright (C) 2002 Hewlett Packard
# Copyright (C) Alex Williamson (alex_williamson@hp.com)
#
O_TARGET := common.o
export-objs := sba_iommu.o
obj-y := sba_iommu.o
include $(TOPDIR)/Rules.make
#
# ia64/platform/hp/common/Makefile
#
# Copyright (C) 2002 Hewlett Packard
# Copyright (C) Alex Williamson (alex_williamson@hp.com)
#
O_TARGET := common.o
export-objs := sba_iommu.o
obj-y := sba_iommu.o
include $(TOPDIR)/Rules.make
This source diff could not be displayed because it is too large. You can view the blob instead.
#
# ia64/platform/hp/sim/Makefile
#
# Copyright (C) 2002 Hewlett-Packard Co.
# David Mosberger-Tang <davidm@hpl.hp.com>
# Copyright (C) 1999 Silicon Graphics, Inc.
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
#
O_TARGET := sim.o
obj-y := hpsim_console.o hpsim_irq.o hpsim_setup.o
obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
obj-$(CONFIG_HP_SIMETH) += simeth.o
obj-$(CONFIG_HP_SIMSERIAL) += simserial.o
obj-$(CONFIG_HP_SIMSCSI) += simscsi.o
include $(TOPDIR)/Rules.make
#
# ia64/platform/hp/zx1/Makefile
#
# Copyright (C) 2002 Hewlett Packard
# Copyright (C) Alex Williamson (alex_williamson@hp.com)
#
O_TARGET := zx1.o
obj-y := hpzx1_misc.o
obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o
include $(TOPDIR)/Rules.make
#
# ia64/platform/hp/zx1/Makefile
#
# Copyright (C) 2002 Hewlett Packard
# Copyright (C) Alex Williamson (alex_williamson@hp.com)
#
O_TARGET := zx1.o
obj-y := hpzx1_misc.o
obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o
include $(TOPDIR)/Rules.make
#define MACHVEC_PLATFORM_NAME hpzx1
#include <asm/machvec_init.h>
#define MACHVEC_PLATFORM_NAME hpzx1
#include <asm/machvec_init.h>
/*
* Misc. support for HP zx1 chipset support
*
* Copyright (C) 2002 Hewlett-Packard Co
* Copyright (C) 2002 Alex Williamson <alex_williamson@hp.com>
* Copyright (C) 2002 Bjorn Helgaas <bjorn_helgaas@hp.com>
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <asm/iosapic.h>
#include <asm/efi.h>
#include "../drivers/acpi/include/platform/acgcc.h"
#include "../drivers/acpi/include/actypes.h"
#include "../drivers/acpi/include/acexcep.h"
#include "../drivers/acpi/include/acpixf.h"
#include "../drivers/acpi/include/actbl.h"
#include "../drivers/acpi/include/acconfig.h"
#include "../drivers/acpi/include/acmacros.h"
#include "../drivers/acpi/include/aclocal.h"
#include "../drivers/acpi/include/acobject.h"
#include "../drivers/acpi/include/acstruct.h"
#include "../drivers/acpi/include/acnamesp.h"
#include "../drivers/acpi/include/acutils.h"
#define PFX "hpzx1: "
struct fake_pci_dev {
struct fake_pci_dev *next;
unsigned char bus;
unsigned int devfn;
int sizing; // in middle of BAR sizing operation?
unsigned long csr_base;
unsigned int csr_size;
unsigned long mapped_csrs; // ioremapped
};
static struct fake_pci_dev *fake_pci_head, **fake_pci_tail = &fake_pci_head;
static struct pci_ops orig_pci_ops;
static inline struct fake_pci_dev *
fake_pci_find_slot(unsigned char bus, unsigned int devfn)
{
struct fake_pci_dev *dev;
for (dev = fake_pci_head; dev; dev = dev->next)
if (dev->bus == bus && dev->devfn == devfn)
return dev;
return NULL;
}
static struct fake_pci_dev *
alloc_fake_pci_dev(void)
{
struct fake_pci_dev *dev;
dev = kmalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
memset(dev, 0, sizeof(*dev));
*fake_pci_tail = dev;
fake_pci_tail = &dev->next;
return dev;
}
#define HP_CFG_RD(sz, bits, name) \
static int hp_cfg_read##sz (struct pci_dev *dev, int where, u##bits *value) \
{ \
struct fake_pci_dev *fake_dev; \
if (!(fake_dev = fake_pci_find_slot(dev->bus->number, dev->devfn))) \
return orig_pci_ops.name(dev, where, value); \
\
switch (where) { \
case PCI_COMMAND: \
*value = read##sz(fake_dev->mapped_csrs + where); \
*value |= PCI_COMMAND_MEMORY; /* SBA omits this */ \
break; \
case PCI_BASE_ADDRESS_0: \
if (fake_dev->sizing) \
*value = ~(fake_dev->csr_size - 1); \
else \
*value = (fake_dev->csr_base & \
PCI_BASE_ADDRESS_MEM_MASK) | \
PCI_BASE_ADDRESS_SPACE_MEMORY; \
fake_dev->sizing = 0; \
break; \
default: \
*value = read##sz(fake_dev->mapped_csrs + where); \
break; \
} \
return PCIBIOS_SUCCESSFUL; \
}
#define HP_CFG_WR(sz, bits, name) \
static int hp_cfg_write##sz (struct pci_dev *dev, int where, u##bits value) \
{ \
struct fake_pci_dev *fake_dev; \
if (!(fake_dev = fake_pci_find_slot(dev->bus->number, dev->devfn))) \
return orig_pci_ops.name(dev, where, value); \
\
switch (where) { \
case PCI_BASE_ADDRESS_0: \
if (value == ~0) \
fake_dev->sizing = 1; \
break; \
default: \
write##sz(value, fake_dev->mapped_csrs + where); \
break; \
} \
return PCIBIOS_SUCCESSFUL; \
}
HP_CFG_RD(b, 8, read_byte)
HP_CFG_RD(w, 16, read_word)
HP_CFG_RD(l, 32, read_dword)
HP_CFG_WR(b, 8, write_byte)
HP_CFG_WR(w, 16, write_word)
HP_CFG_WR(l, 32, write_dword)
static struct pci_ops hp_pci_conf = {
hp_cfg_readb,
hp_cfg_readw,
hp_cfg_readl,
hp_cfg_writeb,
hp_cfg_writew,
hp_cfg_writel,
};
/*
* Assume we'll never have a physical slot higher than 0x10, so we can
* use slots above that for "fake" PCI devices to represent things
* that only show up in the ACPI namespace.
*/
#define HP_MAX_SLOT 0x10
static struct fake_pci_dev *
hpzx1_fake_pci_dev(unsigned long addr, unsigned int bus, unsigned int size)
{
struct fake_pci_dev *dev;
int slot;
// Note: lspci thinks 0x1f is invalid
for (slot = 0x1e; slot > HP_MAX_SLOT; slot--) {
if (!fake_pci_find_slot(bus, PCI_DEVFN(slot, 0)))
break;
}
if (slot == HP_MAX_SLOT) {
printk(KERN_ERR PFX
"no slot space for device (0x%p) on bus 0x%02x\n",
(void *) addr, bus);
return NULL;
}
dev = alloc_fake_pci_dev();
if (!dev) {
printk(KERN_ERR PFX
"no memory for device (0x%p) on bus 0x%02x\n",
(void *) addr, bus);
return NULL;
}
dev->bus = bus;
dev->devfn = PCI_DEVFN(slot, 0);
dev->csr_base = addr;
dev->csr_size = size;
/*
* Drivers should ioremap what they need, but we have to do
* it here, too, so PCI config accesses work.
*/
dev->mapped_csrs = ioremap(dev->csr_base, dev->csr_size);
return dev;
}
typedef struct {
u8 guid_id;
u8 guid[16];
u8 csr_base[8];
u8 csr_length[8];
} acpi_hp_vendor_long;
#define HP_CCSR_LENGTH 0x21
#define HP_CCSR_TYPE 0x2
#define HP_CCSR_GUID EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, \
0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad)
extern acpi_status acpi_get_crs(acpi_handle, acpi_buffer *);
extern acpi_resource *acpi_get_crs_next(acpi_buffer *, int *);
extern acpi_resource_data *acpi_get_crs_type(acpi_buffer *, int *, int);
extern void acpi_dispose_crs(acpi_buffer *);
extern acpi_status acpi_cf_evaluate_method(acpi_handle, UINT8 *, NATIVE_UINT *);
static acpi_status
hp_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
{
int i, offset = 0;
acpi_status status;
acpi_buffer buf;
acpi_resource_vendor *res;
acpi_hp_vendor_long *hp_res;
efi_guid_t vendor_guid;
*csr_base = 0;
*csr_length = 0;
status = acpi_get_crs(obj, &buf);
if (status != AE_OK) {
printk(KERN_ERR PFX "Unable to get _CRS data on object\n");
return status;
}
res = (acpi_resource_vendor *)acpi_get_crs_type(&buf, &offset, ACPI_RSTYPE_VENDOR);
if (!res) {
printk(KERN_ERR PFX "Failed to find config space for device\n");
acpi_dispose_crs(&buf);
return AE_NOT_FOUND;
}
hp_res = (acpi_hp_vendor_long *)(res->reserved);
if (res->length != HP_CCSR_LENGTH || hp_res->guid_id != HP_CCSR_TYPE) {
printk(KERN_ERR PFX "Unknown Vendor data\n");
acpi_dispose_crs(&buf);
return AE_TYPE; /* Revisit error? */
}
memcpy(&vendor_guid, hp_res->guid, sizeof(efi_guid_t));
if (efi_guidcmp(vendor_guid, HP_CCSR_GUID) != 0) {
printk(KERN_ERR PFX "Vendor GUID does not match\n");
acpi_dispose_crs(&buf);
return AE_TYPE; /* Revisit error? */
}
for (i = 0 ; i < 8 ; i++) {
*csr_base |= ((u64)(hp_res->csr_base[i]) << (i * 8));
*csr_length |= ((u64)(hp_res->csr_length[i]) << (i * 8));
}
acpi_dispose_crs(&buf);
return AE_OK;
}
static acpi_status
hpzx1_sba_probe(acpi_handle obj, u32 depth, void *context, void **ret)
{
u64 csr_base = 0, csr_length = 0;
char *name = context;
struct fake_pci_dev *dev;
acpi_status status;
status = hp_csr_space(obj, &csr_base, &csr_length);
if (status != AE_OK)
return status;
/*
* Only SBA shows up in ACPI namespace, so its CSR space
* includes both SBA and IOC. Make SBA and IOC show up
* separately in PCI space.
*/
if ((dev = hpzx1_fake_pci_dev(csr_base, 0, 0x1000)))
printk(KERN_INFO PFX "%s SBA at 0x%lx; pci dev %02x:%02x.%d\n",
name, csr_base, dev->bus,
PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
if ((dev = hpzx1_fake_pci_dev(csr_base + 0x1000, 0, 0x1000)))
printk(KERN_INFO PFX "%s IOC at 0x%lx; pci dev %02x:%02x.%d\n",
name, csr_base + 0x1000, dev->bus,
PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
return AE_OK;
}
static acpi_status
hpzx1_lba_probe(acpi_handle obj, u32 depth, void *context, void **ret)
{
acpi_status status;
u64 csr_base = 0, csr_length = 0;
char *name = context;
NATIVE_UINT busnum = 0;
struct fake_pci_dev *dev;
status = hp_csr_space(obj, &csr_base, &csr_length);
if (status != AE_OK)
return status;
status = acpi_cf_evaluate_method(obj, METHOD_NAME__BBN, &busnum);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PFX "evaluate _BBN fail=0x%x\n", status);
busnum = 0; // no _BBN; stick it on bus 0
}
if ((dev = hpzx1_fake_pci_dev(csr_base, busnum, csr_length)))
printk(KERN_INFO PFX "%s LBA at 0x%lx, _BBN 0x%02x; "
"pci dev %02x:%02x.%d\n",
name, csr_base, busnum, dev->bus,
PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
return AE_OK;
}
static void
hpzx1_acpi_dev_init(void)
{
extern struct pci_ops pci_conf;
/*
* Make fake PCI devices for the following hardware in the
* ACPI namespace. This makes it more convenient for drivers
* because they can claim these devices based on PCI
* information, rather than needing to know about ACPI. The
* 64-bit "HPA" space for this hardware is available as BAR
* 0/1.
*
* HWP0001: Single IOC SBA w/o IOC in namespace
* HWP0002: LBA device
* HWP0003: AGP LBA device
*/
acpi_get_devices("HWP0001", hpzx1_sba_probe, "HWP0001", NULL);
#ifdef CONFIG_IA64_HP_PROTO
if (fake_pci_tail != &fake_pci_head) {
#endif
acpi_get_devices("HWP0002", hpzx1_lba_probe, "HWP0002", NULL);
acpi_get_devices("HWP0003", hpzx1_lba_probe, "HWP0003", NULL);
#ifdef CONFIG_IA64_HP_PROTO
}
#define ZX1_FUNC_ID_VALUE (PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP
/*
* Early protos don't have bridges in the ACPI namespace, so
* if we didn't find anything, add the things we know are
* there.
*/
if (fake_pci_tail == &fake_pci_head) {
u64 hpa, csr_base;
struct fake_pci_dev *dev;
csr_base = 0xfed00000UL;
hpa = (u64) ioremap(csr_base, 0x1000);
if (__raw_readl(hpa) == ZX1_FUNC_ID_VALUE) {
if ((dev = hpzx1_fake_pci_dev(csr_base, 0, 0x1000)))
printk(KERN_INFO PFX "HWP0001 SBA at 0x%lx; "
"pci dev %02x:%02x.%d\n", csr_base,
dev->bus, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn));
if ((dev = hpzx1_fake_pci_dev(csr_base + 0x1000, 0,
0x1000)))
printk(KERN_INFO PFX "HWP0001 IOC at 0x%lx; "
"pci dev %02x:%02x.%d\n",
csr_base + 0x1000,
dev->bus, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn));
csr_base = 0xfed24000UL;
iounmap(hpa);
hpa = (u64) ioremap(csr_base, 0x1000);
if ((dev = hpzx1_fake_pci_dev(csr_base, 0x40, 0x1000)))
printk(KERN_INFO PFX "HWP0003 AGP LBA at "
"0x%lx; pci dev %02x:%02x.%d\n",
csr_base,
dev->bus, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn));
}
iounmap(hpa);
}
#endif
if (fake_pci_tail == &fake_pci_head)
return;
/*
* Replace PCI ops, but only if we made fake devices.
*/
orig_pci_ops = pci_conf;
pci_conf = hp_pci_conf;
}
extern void sba_init(void);
void
hpzx1_pci_fixup (int phase)
{
if (phase == 0)
hpzx1_acpi_dev_init();
iosapic_pci_fixup(phase);
if (phase == 1)
sba_init();
}
/*
* Misc. support for HP zx1 chipset support
*
* Copyright (C) 2002 Hewlett-Packard Co
* Copyright (C) 2002 Alex Williamson <alex_williamson@hp.com>
* Copyright (C) 2002 Bjorn Helgaas <bjorn_helgaas@hp.com>
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <asm/iosapic.h>
#include <asm/efi.h>
#include "../drivers/acpi/include/platform/acgcc.h"
#include "../drivers/acpi/include/actypes.h"
#include "../drivers/acpi/include/acexcep.h"
#include "../drivers/acpi/include/acpixf.h"
#include "../drivers/acpi/include/actbl.h"
#include "../drivers/acpi/include/acconfig.h"
#include "../drivers/acpi/include/acmacros.h"
#include "../drivers/acpi/include/aclocal.h"
#include "../drivers/acpi/include/acobject.h"
#include "../drivers/acpi/include/acstruct.h"
#include "../drivers/acpi/include/acnamesp.h"
#include "../drivers/acpi/include/acutils.h"
#define PFX "hpzx1: "
struct fake_pci_dev {
struct fake_pci_dev *next;
unsigned char bus;
unsigned int devfn;
int sizing; // in middle of BAR sizing operation?
unsigned long csr_base;
unsigned int csr_size;
unsigned long mapped_csrs; // ioremapped
};
static struct fake_pci_dev *fake_pci_head, **fake_pci_tail = &fake_pci_head;
static struct pci_ops orig_pci_ops;
static inline struct fake_pci_dev *
fake_pci_find_slot(unsigned char bus, unsigned int devfn)
{
struct fake_pci_dev *dev;
for (dev = fake_pci_head; dev; dev = dev->next)
if (dev->bus == bus && dev->devfn == devfn)
return dev;
return NULL;
}
static struct fake_pci_dev *
alloc_fake_pci_dev(void)
{
struct fake_pci_dev *dev;
dev = kmalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
memset(dev, 0, sizeof(*dev));
*fake_pci_tail = dev;
fake_pci_tail = &dev->next;
return dev;
}
#define HP_CFG_RD(sz, bits, name) \
static int hp_cfg_read##sz (struct pci_dev *dev, int where, u##bits *value) \
{ \
struct fake_pci_dev *fake_dev; \
if (!(fake_dev = fake_pci_find_slot(dev->bus->number, dev->devfn))) \
return orig_pci_ops.name(dev, where, value); \
\
switch (where) { \
case PCI_COMMAND: \
*value = read##sz(fake_dev->mapped_csrs + where); \
*value |= PCI_COMMAND_MEMORY; /* SBA omits this */ \
break; \
case PCI_BASE_ADDRESS_0: \
if (fake_dev->sizing) \
*value = ~(fake_dev->csr_size - 1); \
else \
*value = (fake_dev->csr_base & \
PCI_BASE_ADDRESS_MEM_MASK) | \
PCI_BASE_ADDRESS_SPACE_MEMORY; \
fake_dev->sizing = 0; \
break; \
default: \
*value = read##sz(fake_dev->mapped_csrs + where); \
break; \
} \
return PCIBIOS_SUCCESSFUL; \
}
#define HP_CFG_WR(sz, bits, name) \
static int hp_cfg_write##sz (struct pci_dev *dev, int where, u##bits value) \
{ \
struct fake_pci_dev *fake_dev; \
if (!(fake_dev = fake_pci_find_slot(dev->bus->number, dev->devfn))) \
return orig_pci_ops.name(dev, where, value); \
\
switch (where) { \
case PCI_BASE_ADDRESS_0: \
if (value == ~0) \
fake_dev->sizing = 1; \
break; \
default: \
write##sz(value, fake_dev->mapped_csrs + where); \
break; \
} \
return PCIBIOS_SUCCESSFUL; \
}
HP_CFG_RD(b, 8, read_byte)
HP_CFG_RD(w, 16, read_word)
HP_CFG_RD(l, 32, read_dword)
HP_CFG_WR(b, 8, write_byte)
HP_CFG_WR(w, 16, write_word)
HP_CFG_WR(l, 32, write_dword)
static struct pci_ops hp_pci_conf = {
hp_cfg_readb,
hp_cfg_readw,
hp_cfg_readl,
hp_cfg_writeb,
hp_cfg_writew,
hp_cfg_writel,
};
/*
* Assume we'll never have a physical slot higher than 0x10, so we can
* use slots above that for "fake" PCI devices to represent things
* that only show up in the ACPI namespace.
*/
#define HP_MAX_SLOT 0x10
static struct fake_pci_dev *
hpzx1_fake_pci_dev(unsigned long addr, unsigned int bus, unsigned int size)
{
struct fake_pci_dev *dev;
int slot;
// Note: lspci thinks 0x1f is invalid
for (slot = 0x1e; slot > HP_MAX_SLOT; slot--) {
if (!fake_pci_find_slot(bus, PCI_DEVFN(slot, 0)))
break;
}
if (slot == HP_MAX_SLOT) {
printk(KERN_ERR PFX
"no slot space for device (0x%p) on bus 0x%02x\n",
(void *) addr, bus);
return NULL;
}
dev = alloc_fake_pci_dev();
if (!dev) {
printk(KERN_ERR PFX
"no memory for device (0x%p) on bus 0x%02x\n",
(void *) addr, bus);
return NULL;
}
dev->bus = bus;
dev->devfn = PCI_DEVFN(slot, 0);
dev->csr_base = addr;
dev->csr_size = size;
/*
* Drivers should ioremap what they need, but we have to do
* it here, too, so PCI config accesses work.
*/
dev->mapped_csrs = (unsigned long) ioremap(dev->csr_base, dev->csr_size);
return dev;
}
typedef struct {
u8 guid_id;
u8 guid[16];
u8 csr_base[8];
u8 csr_length[8];
} acpi_hp_vendor_long;
#define HP_CCSR_LENGTH 0x21
#define HP_CCSR_TYPE 0x2
#define HP_CCSR_GUID \
((efi_guid_t) { 0x69e9adf9, 0x924f, 0xab5f, { 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad }})
extern acpi_status acpi_get_crs(acpi_handle, acpi_buffer *);
extern acpi_resource *acpi_get_crs_next(acpi_buffer *, int *);
extern acpi_resource_data *acpi_get_crs_type(acpi_buffer *, int *, int);
extern void acpi_dispose_crs(acpi_buffer *);
extern acpi_status acpi_cf_evaluate_method(acpi_handle, UINT8 *, NATIVE_UINT *);
static acpi_status
hp_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
{
int i, offset = 0;
acpi_status status;
acpi_buffer buf;
acpi_resource_vendor *res;
acpi_hp_vendor_long *hp_res;
efi_guid_t vendor_guid;
*csr_base = 0;
*csr_length = 0;
status = acpi_get_crs(obj, &buf);
if (status != AE_OK) {
printk(KERN_ERR PFX "Unable to get _CRS data on object\n");
return status;
}
res = (acpi_resource_vendor *)acpi_get_crs_type(&buf, &offset, ACPI_RSTYPE_VENDOR);
if (!res) {
printk(KERN_ERR PFX "Failed to find config space for device\n");
acpi_dispose_crs(&buf);
return AE_NOT_FOUND;
}
hp_res = (acpi_hp_vendor_long *)(res->reserved);
if (res->length != HP_CCSR_LENGTH || hp_res->guid_id != HP_CCSR_TYPE) {
printk(KERN_ERR PFX "Unknown Vendor data\n");
acpi_dispose_crs(&buf);
return AE_TYPE; /* Revisit error? */
}
memcpy(&vendor_guid, hp_res->guid, sizeof(efi_guid_t));
if (efi_guidcmp(vendor_guid, HP_CCSR_GUID) != 0) {
printk(KERN_ERR PFX "Vendor GUID does not match\n");
acpi_dispose_crs(&buf);
return AE_TYPE; /* Revisit error? */
}
for (i = 0 ; i < 8 ; i++) {
*csr_base |= ((u64)(hp_res->csr_base[i]) << (i * 8));
*csr_length |= ((u64)(hp_res->csr_length[i]) << (i * 8));
}
acpi_dispose_crs(&buf);
return AE_OK;
}
static acpi_status
hpzx1_sba_probe(acpi_handle obj, u32 depth, void *context, void **ret)
{
u64 csr_base = 0, csr_length = 0;
char *name = context;
struct fake_pci_dev *dev;
acpi_status status;
status = hp_csr_space(obj, &csr_base, &csr_length);
if (status != AE_OK)
return status;
/*
* Only SBA shows up in ACPI namespace, so its CSR space
* includes both SBA and IOC. Make SBA and IOC show up
* separately in PCI space.
*/
if ((dev = hpzx1_fake_pci_dev(csr_base, 0, 0x1000)))
printk(KERN_INFO PFX "%s SBA at 0x%lx; pci dev %02x:%02x.%d\n",
name, csr_base, dev->bus,
PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
if ((dev = hpzx1_fake_pci_dev(csr_base + 0x1000, 0, 0x1000)))
printk(KERN_INFO PFX "%s IOC at 0x%lx; pci dev %02x:%02x.%d\n",
name, csr_base + 0x1000, dev->bus,
PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
return AE_OK;
}
static acpi_status
hpzx1_lba_probe(acpi_handle obj, u32 depth, void *context, void **ret)
{
acpi_status status;
u64 csr_base = 0, csr_length = 0;
char *name = context;
NATIVE_UINT busnum = 0;
struct fake_pci_dev *dev;
status = hp_csr_space(obj, &csr_base, &csr_length);
if (status != AE_OK)
return status;
status = acpi_cf_evaluate_method(obj, METHOD_NAME__BBN, &busnum);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PFX "evaluate _BBN fail=0x%x\n", status);
busnum = 0; // no _BBN; stick it on bus 0
}
if ((dev = hpzx1_fake_pci_dev(csr_base, busnum, csr_length)))
printk(KERN_INFO PFX "%s LBA at 0x%lx, _BBN 0x%02x; "
"pci dev %02x:%02x.%d\n",
name, csr_base, busnum, dev->bus,
PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
return AE_OK;
}
static void
hpzx1_acpi_dev_init(void)
{
extern struct pci_ops pci_conf;
/*
* Make fake PCI devices for the following hardware in the
* ACPI namespace. This makes it more convenient for drivers
* because they can claim these devices based on PCI
* information, rather than needing to know about ACPI. The
* 64-bit "HPA" space for this hardware is available as BAR
* 0/1.
*
* HWP0001: Single IOC SBA w/o IOC in namespace
* HWP0002: LBA device
* HWP0003: AGP LBA device
*/
acpi_get_devices("HWP0001", hpzx1_sba_probe, "HWP0001", NULL);
#ifdef CONFIG_IA64_HP_PROTO
if (fake_pci_tail != &fake_pci_head) {
#endif
acpi_get_devices("HWP0002", hpzx1_lba_probe, "HWP0002", NULL);
acpi_get_devices("HWP0003", hpzx1_lba_probe, "HWP0003", NULL);
#ifdef CONFIG_IA64_HP_PROTO
}
#define ZX1_FUNC_ID_VALUE (PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP
/*
* Early protos don't have bridges in the ACPI namespace, so
* if we didn't find anything, add the things we know are
* there.
*/
if (fake_pci_tail == &fake_pci_head) {
u64 hpa, csr_base;
struct fake_pci_dev *dev;
csr_base = 0xfed00000UL;
hpa = (u64) ioremap(csr_base, 0x1000);
if (__raw_readl(hpa) == ZX1_FUNC_ID_VALUE) {
if ((dev = hpzx1_fake_pci_dev(csr_base, 0, 0x1000)))
printk(KERN_INFO PFX "HWP0001 SBA at 0x%lx; "
"pci dev %02x:%02x.%d\n", csr_base,
dev->bus, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn));
if ((dev = hpzx1_fake_pci_dev(csr_base + 0x1000, 0,
0x1000)))
printk(KERN_INFO PFX "HWP0001 IOC at 0x%lx; "
"pci dev %02x:%02x.%d\n",
csr_base + 0x1000,
dev->bus, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn));
csr_base = 0xfed24000UL;
iounmap(hpa);
hpa = (u64) ioremap(csr_base, 0x1000);
if ((dev = hpzx1_fake_pci_dev(csr_base, 0x40, 0x1000)))
printk(KERN_INFO PFX "HWP0003 AGP LBA at "
"0x%lx; pci dev %02x:%02x.%d\n",
csr_base,
dev->bus, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn));
}
iounmap(hpa);
}
#endif
if (fake_pci_tail == &fake_pci_head)
return;
/*
* Replace PCI ops, but only if we made fake devices.
*/
orig_pci_ops = pci_conf;
pci_conf = hp_pci_conf;
}
extern void sba_init(void);
void
hpzx1_pci_fixup (int phase)
{
if (phase == 0)
hpzx1_acpi_dev_init();
iosapic_pci_fixup(phase);
if (phase == 1)
sba_init();
}
......@@ -47,6 +47,8 @@ static void elf32_set_personality (void);
#define ELF_PLAT_INIT(_r) ia64_elf32_init(_r)
#define setup_arg_pages(bprm) ia32_setup_arg_pages(bprm)
#define elf_map elf32_map
#undef SET_PERSONALITY
#define SET_PERSONALITY(ex, ibcs2) elf32_set_personality()
/* Ugly but avoids duplication */
......
......@@ -3003,7 +3003,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
if (request != PTRACE_KILL)
goto out;
}
if (child->p_pptr != current)
if (child->parent != current)
goto out;
switch (request) {
......
......@@ -17,6 +17,7 @@ obj-y := acpi.o entry.o gate.o efi.o efi_stub.o ia64_ksyms.o irq.o irq_ia64.o ir
machvec.o pal.o process.o perfmon.o ptrace.o sal.o semaphore.o setup.o \
signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
obj-$(CONFIG_IA64_GENERIC) += iosapic.o
obj-$(CONFIG_IA64_HP_ZX1) += iosapic.o
obj-$(CONFIG_IA64_DIG) += iosapic.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_EFI_VARS) += efivars.o
......
/*
* Advanced Configuration and Power Interface
* acpi.c - Architecture-Specific Low-Level ACPI Support
*
* Based on 'ACPI Specification 1.0b' February 2, 1999 and
* 'IA-64 Extensions to ACPI Specification' Revision 0.6
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2000 Hewlett-Packard Co.
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2000 Intel Corp.
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999, 2000 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2000, 2002 Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2000 Intel Corp.
* Copyright (C) 2000, 2001 J.I. Lee <jung-ik.lee@intel.com>
* ACPI based kernel configuration manager.
* ACPI 2.0 & IA64 ext 0.71
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
......@@ -23,31 +36,16 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/irq.h>
#ifdef CONFIG_SERIAL_ACPI
#include <linux/acpi_serial.h>
#endif
#include <asm/acpi-ext.h>
#include <asm/acpikcfg.h>
#include <linux/acpi.h>
#include <asm/efi.h>
#include <asm/io.h>
#include <asm/iosapic.h>
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm/system.h>
#undef ACPI_DEBUG /* Guess what this does? */
/* global array to record platform interrupt vectors for generic int routing */
int platform_irq_list[ACPI_MAX_PLATFORM_IRQS];
/* These are ugly but will be reclaimed by the kernel */
int __initdata available_cpus;
int __initdata total_cpus;
int __initdata pcat_compat;
void (*pm_idle) (void);
void (*pm_power_off) (void);
#define PREFIX "ACPI: "
asm (".weak iosapic_register_irq");
asm (".weak iosapic_register_legacy_irq");
......@@ -55,10 +53,16 @@ asm (".weak iosapic_register_platform_irq");
asm (".weak iosapic_init");
asm (".weak iosapic_version");
void (*pm_idle) (void);
void (*pm_power_off) (void);
/*
* TBD: Should go away once we have an ACPI parser.
*/
const char *
acpi_get_sysname (void)
{
/* the following should go away once we have an ACPI parser: */
#ifdef CONFIG_IA64_GENERIC
return "hpsim";
#else
......@@ -74,16 +78,21 @@ acpi_get_sysname (void)
# error Unknown platform. Fix acpi.c.
# endif
#endif
}
#ifdef CONFIG_ACPI_BOOT
#define ACPI_MAX_PLATFORM_IRQS 256
/* Array to record platform interrupt vectors for generic interrupt routing. */
int platform_irq_list[ACPI_MAX_PLATFORM_IRQS];
/*
* Interrupt routing API for device drivers.
* Provides the interrupt vector for a generic platform event
* (currently only CPEI implemented)
* Interrupt routing API for device drivers. Provides interrupt vector for
* a generic platform event. Currently only CPEI is implemented.
*/
int
acpi_request_vector(u32 int_type)
acpi_request_vector (u32 int_type)
{
int vector = -1;
......@@ -96,586 +105,494 @@ acpi_request_vector(u32 int_type)
return vector;
}
/*
* Configure legacy IRQ information.
*/
static void __init
acpi_legacy_irq (char *p)
{
acpi_entry_int_override_t *legacy = (acpi_entry_int_override_t *) p;
unsigned long polarity = 0, edge_triggered = 0;
/*
* If the platform we're running doesn't define
* iosapic_register_legacy_irq(), we ignore this info...
*/
if (!iosapic_register_legacy_irq)
return;
switch (legacy->flags) {
case 0x5: polarity = 1; edge_triggered = 1; break;
case 0x7: polarity = 0; edge_triggered = 1; break;
case 0xd: polarity = 1; edge_triggered = 0; break;
case 0xf: polarity = 0; edge_triggered = 0; break;
default:
printk(" ACPI Legacy IRQ 0x%02x: Unknown flags 0x%x\n", legacy->isa_irq,
legacy->flags);
break;
}
iosapic_register_legacy_irq(legacy->isa_irq, legacy->pin, polarity, edge_triggered);
}
/* --------------------------------------------------------------------------
Boot-time Table Parsing
-------------------------------------------------------------------------- */
static int total_cpus __initdata;
static int available_cpus __initdata;
struct acpi_table_madt * acpi_madt __initdata;
/*
* ACPI 2.0 tables parsing functions
*/
static unsigned long
readl_unaligned(void *p)
static int __init
acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header)
{
unsigned long ret;
struct acpi_table_lapic_addr_ovr *lapic = NULL;
lapic = (struct acpi_table_lapic_addr_ovr *) header;
if (!lapic)
return -EINVAL;
acpi_table_print_madt_entry(header);
if (lapic->address) {
iounmap((void *) ipi_base_addr);
ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
}
memcpy(&ret, p, sizeof(long));
return ret;
return 0;
}
/*
* Identify usable CPU's and remember them for SMP bringup later.
*/
static void __init
acpi20_lsapic (char *p)
static int __init
acpi_parse_lsapic (acpi_table_entry_header *header)
{
int add = 1;
struct acpi_table_lsapic *lsapic = NULL;
acpi20_entry_lsapic_t *lsapic = (acpi20_entry_lsapic_t *) p;
printk(" CPU %.04x:%.04x: ", lsapic->eid, lsapic->id);
lsapic = (struct acpi_table_lsapic *) header;
if (!lsapic)
return -EINVAL;
if ((lsapic->flags & LSAPIC_ENABLED) == 0) {
printk("disabled.\n");
add = 0;
}
acpi_table_print_madt_entry(header);
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[total_cpus] = -1;
#endif
if (add) {
printk("CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid);
if (lsapic->flags.enabled) {
available_cpus++;
printk("available");
printk(" enabled");
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[total_cpus] = (lsapic->id << 8) | lsapic->eid;
if (hard_smp_processor_id() == smp_boot_data.cpu_phys_id[total_cpus])
printk(" (BSP)");
#endif
printk(".\n");
}
else {
printk(" disabled");
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[total_cpus] = -1;
#endif
}
printk("\n");
total_cpus++;
return 0;
}
/*
* Extract iosapic info from madt (again) to determine which iosapic
* this platform interrupt resides in
*/
static int __init
acpi_parse_lapic_nmi (acpi_table_entry_header *header)
{
struct acpi_table_lapic_nmi *lacpi_nmi = NULL;
lacpi_nmi = (struct acpi_table_lapic_nmi*) header;
if (!lacpi_nmi)
return -EINVAL;
acpi_table_print_madt_entry(header);
/* TBD: Support lapic_nmi entries */
return 0;
}
static int __init
acpi20_which_iosapic (int global_vector, acpi_madt_t *madt, u32 *irq_base, char **iosapic_address)
acpi_find_iosapic (int global_vector, u32 *irq_base, char **iosapic_address)
{
acpi_entry_iosapic_t *iosapic;
char *p, *end;
int ver, max_pin;
struct acpi_table_iosapic *iosapic = NULL;
int ver = 0;
int max_pin = 0;
char *p = 0;
char *end = 0;
if (!irq_base || !iosapic_address)
return -ENODEV;
p = (char *) (madt + 1);
end = p + (madt->header.length - sizeof(acpi_madt_t));
p = (char *) (acpi_madt + 1);
end = p + (acpi_madt->header.length - sizeof(struct acpi_table_madt));
while (p < end) {
switch (*p) {
case ACPI20_ENTRY_IO_SAPIC:
/* collect IOSAPIC info for platform int use later */
iosapic = (acpi_entry_iosapic_t *)p;
*irq_base = iosapic->irq_base;
if (*p == ACPI_MADT_IOSAPIC) {
iosapic = (struct acpi_table_iosapic *) p;
*irq_base = iosapic->global_irq_base;
*iosapic_address = ioremap(iosapic->address, 0);
/* is this the iosapic we're looking for? */
ver = iosapic_version(*iosapic_address);
max_pin = (ver >> 16) & 0xff;
if ((global_vector - *irq_base) <= max_pin)
return 0; /* found it! */
break;
default:
break;
return 0; /* Found it! */
}
p += p[1];
}
return 1;
return -ENODEV;
}
/*
* Info on platform interrupt sources: NMI, PMI, INIT, etc.
*/
static void __init
acpi20_platform (char *p, acpi_madt_t *madt)
static int __init
acpi_parse_iosapic (acpi_table_entry_header *header)
{
int vector;
u32 irq_base;
char *iosapic_address;
unsigned long polarity = 0, trigger = 0;
acpi20_entry_platform_src_t *plat = (acpi20_entry_platform_src_t *) p;
struct acpi_table_iosapic *iosapic;
printk("PLATFORM: IOSAPIC %x -> Vector %x on CPU %.04u:%.04u\n",
plat->iosapic_vector, plat->global_vector, plat->eid, plat->id);
iosapic = (struct acpi_table_iosapic *) header;
if (!iosapic)
return -EINVAL;
/* record platform interrupt vectors for generic int routing code */
acpi_table_print_madt_entry(header);
if (!iosapic_register_platform_irq) {
printk("acpi20_platform(): no ACPI platform IRQ support\n");
return;
if (iosapic_init) {
#ifndef CONFIG_ITANIUM
/* PCAT_COMPAT flag indicates dual-8259 setup */
iosapic_init(iosapic->address, iosapic->global_irq_base,
acpi_madt->flags.pcat_compat);
#else
/* Firmware on old Itanium systems is broken */
iosapic_init(iosapic->address, iosapic->global_irq_base, 1);
#endif
}
return 0;
}
static int __init
acpi_parse_plat_int_src (acpi_table_entry_header *header)
{
struct acpi_table_plat_int_src *plintsrc = NULL;
int vector = 0;
u32 irq_base = 0;
char *iosapic_address = NULL;
plintsrc = (struct acpi_table_plat_int_src *) header;
if (!plintsrc)
return -EINVAL;
/* extract polarity and trigger info from flags */
switch (plat->flags) {
case 0x5: polarity = 1; trigger = 1; break;
case 0x7: polarity = 0; trigger = 1; break;
case 0xd: polarity = 1; trigger = 0; break;
case 0xf: polarity = 0; trigger = 0; break;
default:
printk("acpi20_platform(): unknown flags 0x%x\n", plat->flags);
break;
acpi_table_print_madt_entry(header);
if (!iosapic_register_platform_irq) {
printk(KERN_WARNING PREFIX "No ACPI platform IRQ support\n");
return -ENODEV;
}
/* which iosapic does this IRQ belong to? */
if (acpi20_which_iosapic(plat->global_vector, madt, &irq_base, &iosapic_address)) {
printk("acpi20_platform(): I/O SAPIC not found!\n");
return;
if (0 != acpi_find_iosapic(plintsrc->global_irq, &irq_base, &iosapic_address)) {
printk(KERN_WARNING PREFIX "IOSAPIC not found\n");
return -ENODEV;
}
/*
* get vector assignment for this IRQ, set attributes, and program the IOSAPIC
* routing table
* Get vector assignment for this IRQ, set attributes, and program the
* IOSAPIC routing table.
*/
vector = iosapic_register_platform_irq(plat->int_type,
plat->global_vector,
plat->iosapic_vector,
plat->eid,
plat->id,
polarity,
trigger,
irq_base,
iosapic_address);
platform_irq_list[plat->int_type] = vector;
vector = iosapic_register_platform_irq (plintsrc->type,
plintsrc->global_irq,
plintsrc->iosapic_vector,
plintsrc->eid,
plintsrc->id,
(plintsrc->flags.polarity == 1) ? 1 : 0,
(plintsrc->flags.trigger == 1) ? 1 : 0,
irq_base,
iosapic_address);
platform_irq_list[plintsrc->type] = vector;
return 0;
}
/*
* Override the physical address of the local APIC in the MADT stable header.
*/
static void __init
acpi20_lapic_addr_override (char *p)
static int __init
acpi_parse_int_src_ovr (acpi_table_entry_header *header)
{
acpi20_entry_lapic_addr_override_t * lapic = (acpi20_entry_lapic_addr_override_t *) p;
struct acpi_table_int_src_ovr *p = NULL;
if (lapic->lapic_address) {
iounmap((void *)ipi_base_addr);
ipi_base_addr = (unsigned long) ioremap(lapic->lapic_address, 0);
p = (struct acpi_table_int_src_ovr *) header;
if (!p)
return -EINVAL;
printk("LOCAL ACPI override to 0x%lx(p=0x%lx)\n",
ipi_base_addr, lapic->lapic_address);
}
acpi_table_print_madt_entry(header);
/* Ignore if the platform doesn't support overrides */
if (!iosapic_register_legacy_irq)
return 0;
iosapic_register_legacy_irq(p->bus_irq, p->global_irq,
(p->flags.polarity == 1) ? 1 : 0,
(p->flags.trigger == 1) ? 1 : 0);
return 0;
}
/*
* Parse the ACPI Multiple APIC Description Table
*/
static void __init
acpi20_parse_madt (acpi_madt_t *madt)
static int __init
acpi_parse_nmi_src (acpi_table_entry_header *header)
{
acpi_entry_iosapic_t *iosapic = NULL;
acpi20_entry_lsapic_t *lsapic = NULL;
char *p, *end;
int i;
/* Base address of IPI Message Block */
if (madt->lapic_address) {
ipi_base_addr = (unsigned long) ioremap(madt->lapic_address, 0);
printk("Lapic address set to 0x%lx\n", ipi_base_addr);
} else
printk("Lapic address set to default 0x%lx\n", ipi_base_addr);
struct acpi_table_nmi_src *nmi_src = NULL;
/*
* The PCAT_COMPAT flag indicates that the system has a dual-8259 compatible
* setup.
*/
#ifdef CONFIG_ITANIUM
pcat_compat = 1; /* fw on some Itanium systems is broken... */
#else
pcat_compat = (madt->flags & MADT_PCAT_COMPAT);
#endif
nmi_src = (struct acpi_table_nmi_src*) header;
if (!nmi_src)
return -EINVAL;
p = (char *) (madt + 1);
end = p + (madt->header.length - sizeof(acpi_madt_t));
acpi_table_print_madt_entry(header);
/* Initialize platform interrupt vector array */
for (i = 0; i < ACPI_MAX_PLATFORM_IRQS; i++)
platform_irq_list[i] = -1;
/* TBD: Support nimsrc entries */
/*
* Split-up entry parsing to ensure ordering.
*/
while (p < end) {
switch (*p) {
case ACPI20_ENTRY_LOCAL_APIC_ADDR_OVERRIDE:
printk("ACPI 2.0 MADT: LOCAL APIC Override\n");
acpi20_lapic_addr_override(p);
break;
case ACPI20_ENTRY_LOCAL_SAPIC:
printk("ACPI 2.0 MADT: LOCAL SAPIC\n");
lsapic = (acpi20_entry_lsapic_t *) p;
acpi20_lsapic(p);
break;
case ACPI20_ENTRY_IO_SAPIC:
iosapic = (acpi_entry_iosapic_t *) p;
if (iosapic_init)
iosapic_init(iosapic->address, iosapic->irq_base, pcat_compat);
break;
case ACPI20_ENTRY_PLATFORM_INT_SOURCE:
printk("ACPI 2.0 MADT: PLATFORM INT SOURCE\n");
acpi20_platform(p, madt);
break;
case ACPI20_ENTRY_LOCAL_APIC:
printk("ACPI 2.0 MADT: LOCAL APIC entry\n"); break;
case ACPI20_ENTRY_IO_APIC:
printk("ACPI 2.0 MADT: IO APIC entry\n"); break;
case ACPI20_ENTRY_NMI_SOURCE:
printk("ACPI 2.0 MADT: NMI SOURCE entry\n"); break;
case ACPI20_ENTRY_LOCAL_APIC_NMI:
printk("ACPI 2.0 MADT: LOCAL APIC NMI entry\n"); break;
case ACPI20_ENTRY_INT_SRC_OVERRIDE:
break;
default:
printk("ACPI 2.0 MADT: unknown entry skip\n"); break;
break;
}
p += p[1];
}
return 0;
}
p = (char *) (madt + 1);
end = p + (madt->header.length - sizeof(acpi_madt_t));
while (p < end) {
switch (*p) {
case ACPI20_ENTRY_LOCAL_APIC:
if (lsapic) break;
printk("ACPI 2.0 MADT: LOCAL APIC entry\n");
/* parse local apic if there's no local Sapic */
break;
case ACPI20_ENTRY_IO_APIC:
if (iosapic) break;
printk("ACPI 2.0 MADT: IO APIC entry\n");
/* parse ioapic if there's no ioSapic */
break;
default:
break;
}
p += p[1];
}
static int __init
acpi_parse_madt (unsigned long phys_addr, unsigned long size)
{
int i = 0;
p = (char *) (madt + 1);
end = p + (madt->header.length - sizeof(acpi_madt_t));
if (!phys_addr || !size)
return -EINVAL;
while (p < end) {
switch (*p) {
case ACPI20_ENTRY_INT_SRC_OVERRIDE:
printk("ACPI 2.0 MADT: INT SOURCE Override\n");
acpi_legacy_irq(p);
break;
default:
break;
}
p += p[1];
acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
if (!acpi_madt) {
printk(KERN_WARNING PREFIX "Unable to map MADT\n");
return -ENODEV;
}
/* Make bootup pretty */
printk(" %d CPUs available, %d CPUs total\n",
available_cpus, total_cpus);
/* Initialize platform interrupt vector array */
for (i = 0; i < ACPI_MAX_PLATFORM_IRQS; i++)
platform_irq_list[i] = -1;
/* Get base address of IPI Message Block */
if (acpi_madt->lapic_address)
ipi_base_addr = (unsigned long)
ioremap(acpi_madt->lapic_address, 0);
printk(KERN_INFO PREFIX "Local APIC address 0x%lx\n", ipi_base_addr);
return 0;
}
int __init
acpi20_parse (acpi20_rsdp_t *rsdp20)
acpi_find_rsdp (unsigned long *rsdp_phys)
{
# ifdef CONFIG_ACPI
acpi_xsdt_t *xsdt;
acpi_desc_table_hdr_t *hdrp;
acpi_madt_t *madt = NULL;
int tables, i;
if (strncmp(rsdp20->signature, ACPI_RSDP_SIG, ACPI_RSDP_SIG_LEN)) {
printk("ACPI 2.0 RSDP signature incorrect!\n");
return 0;
} else {
printk("ACPI 2.0 Root System Description Ptr at 0x%lx\n",
(unsigned long)rsdp20);
}
if (!rsdp_phys)
return -EINVAL;
xsdt = __va(rsdp20->xsdt);
hdrp = &xsdt->header;
if (strncmp(hdrp->signature,
ACPI_XSDT_SIG, ACPI_XSDT_SIG_LEN)) {
printk("ACPI 2.0 XSDT signature incorrect. Trying RSDT\n");
/* RSDT parsing here */
if (efi.acpi20) {
(*rsdp_phys) = __pa(efi.acpi20);
return 0;
} else {
printk("ACPI 2.0 XSDT at 0x%lx (p=0x%lx)\n",
(unsigned long)xsdt, (unsigned long)rsdp20->xsdt);
}
else if (efi.acpi) {
printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n");
}
printk("ACPI 2.0: %.6s %.8s %d.%d\n",
hdrp->oem_id,
hdrp->oem_table_id,
hdrp->oem_revision >> 16,
hdrp->oem_revision & 0xffff);
return -ENODEV;
}
acpi_cf_init((void *)rsdp20);
tables =(hdrp->length -sizeof(acpi_desc_table_hdr_t))>>3;
#ifdef CONFIG_SERIAL_ACPI
for (i = 0; i < tables; i++) {
hdrp = (acpi_desc_table_hdr_t *) __va(readl_unaligned(&xsdt->entry_ptrs[i]));
printk(" :table %4.4s found\n", hdrp->signature);
#include <linux/acpi_serial.h>
/* Only interested int the MADT table for now ... */
if (strncmp(hdrp->signature,
ACPI_MADT_SIG, ACPI_MADT_SIG_LEN) != 0)
continue;
static int __init
acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
{
acpi_ser_t *spcr = NULL;
unsigned long global_int = 0;
/* Save MADT pointer for later */
madt = (acpi_madt_t *) hdrp;
acpi20_parse_madt(madt);
}
if (!phys_addr || !size)
return -EINVAL;
if (!iosapic_register_irq)
return -ENODEV;
#ifdef CONFIG_SERIAL_ACPI
/*
* Now we're interested in other tables. We want the iosapics already
* initialized, so we do it in a separate loop.
* ACPI is able to describe serial ports that live at non-standard
* memory addresses and use non-standard interrupts, either via
* direct SAPIC mappings or via PCI interrupts. We handle interrupt
* routing for SAPIC-based (non-PCI) devices here. Interrupt routing
* for PCI devices will be handled when processing the PCI Interrupt
* Routing Table (PRT).
*/
for (i = 0; i < tables; i++) {
hdrp = (acpi_desc_table_hdr_t *) __va(readl_unaligned(&xsdt->entry_ptrs[i]));
/*
* search for SPCR and DBGP table entries so we can enable
* non-pci interrupts to IO-SAPICs.
*/
if (!strncmp(hdrp->signature, ACPI_SPCRT_SIG, ACPI_SPCRT_SIG_LEN) ||
!strncmp(hdrp->signature, ACPI_DBGPT_SIG, ACPI_DBGPT_SIG_LEN))
{
acpi_ser_t *spcr = (void *)hdrp;
unsigned long global_int;
setup_serial_acpi(hdrp);
/*
* ACPI is able to describe serial ports that live at non-standard
* memory space addresses and use SAPIC interrupts. If not also
* PCI devices, there would be no interrupt vector information for
* them. This checks for and fixes that situation.
*/
if (spcr->length < sizeof(acpi_ser_t))
/* table is not long enough for full info, thus no int */
break;
/*
* If the device is not in PCI space, but uses a SAPIC interrupt,
* we need to program the SAPIC so that serial can autoprobe for
* the IA64 interrupt vector later on. If the device is in PCI
* space, it should already be setup via the PCI vectors
*/
if (spcr->base_addr.space_id != ACPI_SERIAL_PCICONF_SPACE &&
spcr->int_type == ACPI_SERIAL_INT_SAPIC)
{
u32 irq_base;
char *iosapic_address;
int vector;
/* We have a UART in memory space with a SAPIC interrupt */
global_int = ( (spcr->global_int[3] << 24)
| (spcr->global_int[2] << 16)
| (spcr->global_int[1] << 8)
| spcr->global_int[0]);
if (!iosapic_register_irq)
continue;
/* which iosapic does this IRQ belong to? */
if (acpi20_which_iosapic(global_int, madt, &irq_base,
&iosapic_address) == 0)
{
vector = iosapic_register_irq(global_int,
1, /* active high polarity */
1, /* edge triggered */
irq_base,
iosapic_address);
}
}
}
spcr = (acpi_ser_t *) __va(phys_addr);
if (!spcr) {
printk(KERN_WARNING PREFIX "Unable to map SPCR\n");
return -ENODEV;
}
#endif
acpi_cf_terminate();
# ifdef CONFIG_SMP
if (available_cpus == 0) {
printk("ACPI: Found 0 CPUS; assuming 1\n");
available_cpus = 1; /* We've got at least one of these, no? */
setup_serial_acpi(spcr);
if (spcr->length < sizeof(acpi_ser_t))
/* Table not long enough for full info, thus no interrupt */
return -ENODEV;
if ((spcr->base_addr.space_id != ACPI_SERIAL_PCICONF_SPACE) &&
(spcr->int_type == ACPI_SERIAL_INT_SAPIC))
{
u32 irq_base = 0;
char *iosapic_address = NULL;
int vector = 0;
/* We have a UART in memory space with an SAPIC interrupt */
global_int = ( (spcr->global_int[3] << 24) |
(spcr->global_int[2] << 16) |
(spcr->global_int[1] << 8) |
(spcr->global_int[0]) );
/* Which iosapic does this IRQ belong to? */
if (0 == acpi_find_iosapic(global_int, &irq_base, &iosapic_address)) {
vector = iosapic_register_irq (global_int, 1, 1,
irq_base, iosapic_address);
}
}
smp_boot_data.cpu_count = total_cpus;
# endif
# endif /* CONFIG_ACPI */
return 1;
return 0;
}
/*
* ACPI 1.0b with 0.71 IA64 extensions functions; should be removed once all
* platforms start supporting ACPI 2.0
*/
/*
* Identify usable CPU's and remember them for SMP bringup later.
*/
static void __init
acpi_lsapic (char *p)
#endif /*CONFIG_SERIAL_ACPI*/
int __init
acpi_boot_init (char *cmdline)
{
int add = 1;
int result = 0;
/* Initialize the ACPI boot-time table parser */
result = acpi_table_init(cmdline);
if (0 != result)
return result;
acpi_entry_lsapic_t *lsapic = (acpi_entry_lsapic_t *) p;
/*
* MADT
* ----
* Parse the Multiple APIC Description Table (MADT), if exists.
* Note that this table provides platform SMP configuration
* information -- the successor to MPS tables.
*/
if ((lsapic->flags & LSAPIC_PRESENT) == 0)
return;
result = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
if (1 > result)
return result;
printk(" CPU %d (%.04x:%.04x): ", total_cpus, lsapic->eid, lsapic->id);
/* Local APIC */
if ((lsapic->flags & LSAPIC_ENABLED) == 0) {
printk("Disabled.\n");
add = 0;
} else if (lsapic->flags & LSAPIC_PERFORMANCE_RESTRICTED) {
printk("Performance Restricted; ignoring.\n");
add = 0;
result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr);
if (0 > result) {
printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
return result;
}
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[total_cpus] = -1;
#endif
if (add) {
printk("Available.\n");
available_cpus++;
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[total_cpus] = (lsapic->id << 8) | lsapic->eid;
#endif /* CONFIG_SMP */
result = acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic);
if (1 > result) {
printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries!\n");
return -ENODEV;
}
total_cpus++;
}
/*
* Info on platform interrupt sources: NMI. PMI, INIT, etc.
*/
static void __init
acpi_platform (char *p)
{
acpi_entry_platform_src_t *plat = (acpi_entry_platform_src_t *) p;
result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi);
if (0 > result) {
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
return result;
}
printk("PLATFORM: IOSAPIC %x -> Vector %x on CPU %.04u:%.04u\n",
plat->iosapic_vector, plat->global_vector, plat->eid, plat->id);
}
/* I/O APIC */
/*
* Parse the ACPI Multiple SAPIC Table
*/
static void __init
acpi_parse_msapic (acpi_sapic_t *msapic)
{
acpi_entry_iosapic_t *iosapic;
char *p, *end;
result = acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic);
if (1 > result) {
printk(KERN_ERR PREFIX "Error parsing MADT - no IOAPIC entries!\n");
return ((result == 0) ? -ENODEV : result);
}
/* Base address of IPI Message Block */
ipi_base_addr = (unsigned long) ioremap(msapic->interrupt_block, 0);
/* System-Level Interrupt Routing */
p = (char *) (msapic + 1);
end = p + (msapic->header.length - sizeof(acpi_sapic_t));
result = acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src);
if (0 > result) {
printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n");
return result;
}
while (p < end) {
switch (*p) {
case ACPI_ENTRY_LOCAL_SAPIC:
acpi_lsapic(p);
break;
case ACPI_ENTRY_IO_SAPIC:
iosapic = (acpi_entry_iosapic_t *) p;
if (iosapic_init)
/*
* The ACPI I/O SAPIC table doesn't have a PCAT_COMPAT
* flag like the MADT table, but we can safely assume that
* ACPI 1.0b systems have a dual-8259 setup.
*/
iosapic_init(iosapic->address, iosapic->irq_base, 1);
break;
case ACPI_ENTRY_INT_SRC_OVERRIDE:
acpi_legacy_irq(p);
break;
case ACPI_ENTRY_PLATFORM_INT_SOURCE:
acpi_platform(p);
break;
default:
break;
}
result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr);
if (0 > result) {
printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
return result;
}
/* Move to next table entry. */
p += p[1];
result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src);
if (0 > result) {
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
return result;
}
#ifdef CONFIG_SERIAL_ACPI
/*
* TBD: Need phased approach to table parsing (only do those absolutely
* required during boot-up). Recommend expanding concept of fix-
* feature devices (LDM) to include table-based devices such as
* serial ports, EC, SMBus, etc.
*/
acpi_table_parse(ACPI_SPCR, acpi_parse_spcr);
#endif /*CONFIG_SERIAL_ACPI*/
#ifdef CONFIG_SMP
if (available_cpus == 0) {
printk("ACPI: Found 0 CPUS; assuming 1\n");
available_cpus = 1; /* We've got at least one of these, no? */
}
smp_boot_data.cpu_count = total_cpus;
#endif
/* Make boot-up look pretty */
printk("%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
/* Make bootup pretty */
printk(" %d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
return 0;
}
/* --------------------------------------------------------------------------
PCI Interrupt Routing
-------------------------------------------------------------------------- */
int __init
acpi_parse (acpi_rsdp_t *rsdp)
acpi_get_prt (struct pci_vector_struct **vectors, int *count)
{
# ifdef CONFIG_ACPI
acpi_rsdt_t *rsdt;
acpi_desc_table_hdr_t *hdrp;
long tables, i;
struct pci_vector_struct *vector = NULL;
struct list_head *node = NULL;
struct acpi_prt_entry *entry = NULL;
int i = 0;
if (strncmp(rsdp->signature, ACPI_RSDP_SIG, ACPI_RSDP_SIG_LEN)) {
printk("Uh-oh, ACPI RSDP signature incorrect!\n");
return 0;
}
if (!vectors || !count)
return -EINVAL;
rsdt = __va(rsdp->rsdt);
if (strncmp(rsdt->header.signature, ACPI_RSDT_SIG, ACPI_RSDT_SIG_LEN)) {
printk("Uh-oh, ACPI RDST signature incorrect!\n");
return 0;
*vectors = NULL;
*count = 0;
if (acpi_prts.count < 0) {
printk(KERN_ERR PREFIX "No PCI IRQ routing entries\n");
return -ENODEV;
}
printk("ACPI: %.6s %.8s %d.%d\n", rsdt->header.oem_id, rsdt->header.oem_table_id,
rsdt->header.oem_revision >> 16, rsdt->header.oem_revision & 0xffff);
/* Allocate vectors */
acpi_cf_init(rsdp);
*vectors = kmalloc(sizeof(struct pci_vector_struct) * acpi_prts.count, GFP_KERNEL);
if (!(*vectors))
return -ENOMEM;
tables = (rsdt->header.length - sizeof(acpi_desc_table_hdr_t)) / 8;
for (i = 0; i < tables; i++) {
hdrp = (acpi_desc_table_hdr_t *) __va(rsdt->entry_ptrs[i]);
/* Convert PRT entries to IOSAPIC PCI vectors */
/* Only interested int the MSAPIC table for now ... */
if (strncmp(hdrp->signature, ACPI_SAPIC_SIG, ACPI_SAPIC_SIG_LEN) != 0)
continue;
vector = *vectors;
acpi_parse_msapic((acpi_sapic_t *) hdrp);
list_for_each(node, &acpi_prts.entries) {
entry = (struct acpi_prt_entry *)node;
vector[i].bus = (u16) entry->id.bus;
vector[i].pci_id = (u32) entry->id.dev << 16 | 0xffff;
vector[i].pin = (u8) entry->id.pin;
vector[i].irq = (u8) entry->source.index;
i++;
}
*count = acpi_prts.count;
return 0;
}
acpi_cf_terminate();
/* Assume IA64 always use I/O SAPIC */
# ifdef CONFIG_SMP
if (available_cpus == 0) {
printk("ACPI: Found 0 CPUS; assuming 1\n");
available_cpus = 1; /* We've got at least one of these, no? */
}
smp_boot_data.cpu_count = total_cpus;
# endif
# endif /* CONFIG_ACPI */
return 1;
int __init
acpi_get_interrupt_model (int *type)
{
if (!type)
return -EINVAL;
*type = ACPI_INT_MODEL_IOSAPIC;
return 0;
}
#endif /* CONFIG_ACPI_BOOT */
......@@ -55,7 +55,7 @@ struct illegal_op_return
ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
{
unsigned long bundle[2];
unsigned long opcode, btype, qp, offset;
unsigned long opcode, btype, qp, offset, cpl;
unsigned long next_ip;
struct siginfo siginfo;
struct illegal_op_return rv;
......@@ -158,9 +158,9 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
* AR[PFS].pec = AR[EC]
* AR[PFS].ppl = PSR.cpl
*/
cpl = ia64_psr(regs)->cpl;
regs->ar_pfs = ((regs->cr_ifs & 0x3fffffffff)
| (ar_ec << 52)
| ((unsigned long) ia64_psr(regs)->cpl << 62));
| (ar_ec << 52) | (cpl << 62));
/*
* CFM.sof -= CFM.sol
......
......@@ -155,10 +155,10 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
case EFI_CONVENTIONAL_MEMORY:
if (!(md->attribute & EFI_MEMORY_WB))
continue;
if (md->phys_addr + (md->num_pages << 12) > mem_limit) {
if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) {
if (md->phys_addr > mem_limit)
continue;
md->num_pages = (mem_limit - md->phys_addr) >> 12;
md->num_pages = (mem_limit - md->phys_addr) >> EFI_PAGE_SHIFT;
}
if (md->num_pages == 0) {
printk("efi_memmap_walk: ignoring empty region at 0x%lx",
......@@ -167,7 +167,7 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
}
curr.start = PAGE_OFFSET + md->phys_addr;
curr.end = curr.start + (md->num_pages << 12);
curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT);
if (!prev_valid) {
prev = curr;
......@@ -250,16 +250,17 @@ efi_map_pal_code (void)
* dedicated ITR for the PAL code.
*/
if ((vaddr & mask) == (KERNEL_START & mask)) {
printk(__FUNCTION__ ": no need to install ITR for PAL code\n");
printk("%s: no need to install ITR for PAL code\n", __FUNCTION__);
continue;
}
if (md->num_pages << 12 > IA64_GRANULE_SIZE)
if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
panic("Woah! PAL code size bigger than a granule!");
mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
printk("CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
smp_processor_id(), md->phys_addr, md->phys_addr + (md->num_pages << 12),
smp_processor_id(), md->phys_addr,
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
/*
......@@ -375,7 +376,8 @@ efi_init (void)
md = p;
printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
i, md->type, md->attribute, md->phys_addr,
md->phys_addr + (md->num_pages<<12) - 1, md->num_pages >> 8);
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
md->num_pages >> (20 - EFI_PAGE_SHIFT));
}
}
#endif
......@@ -482,8 +484,50 @@ efi_get_iobase (void)
return 0;
}
u32
efi_mem_type (u64 phys_addr)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
u64 efi_desc_size;
efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
efi_desc_size = ia64_boot_param->efi_memdesc_size;
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
if ((md->phys_addr <= phys_addr) && (phys_addr <=
(md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1)))
return md->type;
}
return 0;
}
u64
efi_mem_attributes (u64 phys_addr)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
u64 efi_desc_size;
efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
efi_desc_size = ia64_boot_param->efi_memdesc_size;
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
if ((md->phys_addr <= phys_addr) && (phys_addr <=
(md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1)))
return md->attribute;
}
return 0;
}
static void __exit
efivars_exit(void)
efivars_exit (void)
{
#ifdef CONFIG_PROC_FS
remove_proc_entry(efi_dir->name, NULL);
......
......@@ -36,6 +36,7 @@
#include <asm/kregs.h>
#include <asm/offsets.h>
#include <asm/pgtable.h>
#include <asm/percpu.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
......@@ -214,62 +215,80 @@ GLOBAL_ENTRY(save_switch_stack)
.save @priunat,r17
mov r17=ar.unat // preserve caller's
.body
adds r3=80,sp
#ifdef CONFIG_ITANIUM
adds r2=16+128,sp
adds r3=16+64,sp
adds r14=SW(R4)+16,sp
;;
st8.spill [r14]=r4,16 // spill r4
lfetch.fault.excl.nt1 [r3],128
mov ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0
adds r2=16+128,sp
;;
lfetch.fault.excl.nt1 [r2],128
lfetch.fault.excl.nt1 [r3],128
adds r14=SW(R4)+16,sp
;;
lfetch.fault.excl [r2]
lfetch.fault.excl [r3]
adds r15=SW(R5)+16,sp
#else
add r2=16+3*128,sp
add r3=16,sp
add r14=SW(R4)+16,sp
;;
st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0
lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010
;;
mov r18=ar.fpsr // preserve fpsr
mov r19=ar.rnat
add r2=SW(F2)+16,sp // r2 = &sw->f2
.mem.offset 0,0; st8.spill [r14]=r4,16 // spill r4
.mem.offset 8,0; st8.spill [r15]=r5,16 // spill r5
add r3=SW(F3)+16,sp // r3 = &sw->f3
lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090
lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190
;;
lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110
lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210
adds r15=SW(R5)+16,sp
#endif
;;
st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5
mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0
add r2=SW(F2)+16,sp // r2 = &sw->f2
;;
st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6
mov.m r18=ar.fpsr // preserve fpsr
add r3=SW(F3)+16,sp // r3 = &sw->f3
;;
stf.spill [r2]=f2,32
stf.spill [r3]=f3,32
mov.m r19=ar.rnat
mov r21=b0
.mem.offset 0,0; st8.spill [r14]=r6,16 // spill r6
.mem.offset 8,0; st8.spill [r15]=r7,16 // spill r7
stf.spill [r3]=f3,32
st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7
mov r22=b1
;;
// since we're done with the spills, read and save ar.unat:
mov r29=ar.unat // M-unit
mov r20=ar.bspstore // M-unit
mov.m r29=ar.unat
mov.m r20=ar.bspstore
mov r23=b2
stf.spill [r2]=f4,32
stf.spill [r3]=f5,32
mov r24=b3
;;
st8 [r14]=r21,16 // save b0
st8 [r15]=r22,16 // save b1
st8 [r14]=r21,SW(B1)-SW(B0) // save b0
st8 [r15]=r23,SW(B3)-SW(B2) // save b2
mov r25=b4
stf.spill [r2]=f10,32
stf.spill [r3]=f11,32
mov r26=b5
;;
st8 [r14]=r23,16 // save b2
st8 [r15]=r24,16 // save b3
st8 [r14]=r22,SW(B4)-SW(B1) // save b1
st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3
mov r21=ar.lc // I-unit
stf.spill [r2]=f12,32
stf.spill [r3]=f13,32
;;
st8 [r14]=r25,16 // save b4
st8 [r15]=r26,16 // save b5
st8 [r14]=r25,SW(B5)-SW(B4) // save b4
st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs
stf.spill [r2]=f14,32
stf.spill [r3]=f15,32
;;
st8 [r14]=r16 // save ar.pfs
st8 [r15]=r21 // save ar.lc
st8 [r14]=r26 // save b5
st8 [r15]=r21 // save ar.lc
stf.spill [r2]=f16,32
stf.spill [r3]=f17,32
;;
......@@ -284,26 +303,26 @@ GLOBAL_ENTRY(save_switch_stack)
;;
stf.spill [r2]=f24,32
stf.spill [r3]=f25,32
add r14=SW(CALLER_UNAT)+16,sp
;;
stf.spill [r2]=f26,32
stf.spill [r3]=f27,32
add r15=SW(AR_FPSR)+16,sp
;;
stf.spill [r2]=f28,32
stf.spill [r3]=f29,32
st8 [r14]=r17 // save caller_unat
st8 [r15]=r18 // save fpsr
mov r21=pr
;;
stf.spill [r2]=f30,(SW(AR_UNAT)-SW(F30))
stf.spill [r3]=f31,(SW(AR_RNAT)-SW(F31))
stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
stf.spill [r3]=f31,SW(PR)-SW(F31)
add r14=SW(CALLER_UNAT)+16,sp
;;
st8 [r2]=r29,16 // save ar.unat
st8 [r3]=r19,16 // save ar.rnat
st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat
st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
mov r21=pr
;;
st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
st8 [r3]=r21 // save predicate registers
;;
st8 [r2]=r20 // save ar.bspstore
st8 [r3]=r21 // save predicate registers
st8 [r2]=r20 // save ar.bspstore
st8 [r14]=r18 // save fpsr
mov ar.rsc=3 // put RSE back into eager mode, pl 0
br.cond.sptk.many b7
END(save_switch_stack)
......@@ -647,23 +666,38 @@ dont_preserve_current_frame:
/*
* To prevent leaking bits between the kernel and user-space,
* we must clear the stacked registers in the "invalid" partition here.
* Not pretty, but at least it's fast (3.34 registers/cycle).
* Architecturally, this loop could go at 4.67 registers/cycle, but that would
* oversubscribe Itanium.
* Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
* 5 registers/cycle on McKinley).
*/
# define pRecurse p6
# define pReturn p7
#ifdef CONFIG_ITANIUM
# define Nregs 10
#else
# define Nregs 14
#endif
alloc loc0=ar.pfs,2,Nregs-2,2,0
shr.u loc1=r18,9 // RNaTslots <= dirtySize / (64*8) + 1
sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
;;
#if 1
.align 32 // see comment below about gas bug...
#endif
mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
shladd in0=loc1,3,r17
mov in1=0
#if 0
// gas-2.12.90 is unable to generate a stop bit after .align, which is bad,
// because alloc must be at the beginning of an insn-group.
.align 32
#else
nop 0
nop 0
nop 0
#endif
;;
// .align 32 // gas-2.11.90 is unable to generate a stop bit after .align
rse_clear_invalid:
#ifdef CONFIG_ITANIUM
// cycle 0
{ .mii
alloc loc0=ar.pfs,2,Nregs-2,2,0
......@@ -692,9 +726,31 @@ rse_clear_invalid:
mov loc7=0
(pReturn) br.ret.sptk.many b6
}
#else /* !CONFIG_ITANIUM */
alloc loc0=ar.pfs,2,Nregs-2,2,0
cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
add out0=-Nregs*8,in0
add out1=1,in1 // increment recursion count
mov loc1=0
mov loc2=0
;;
mov loc3=0
mov loc4=0
mov loc9=0
mov loc5=0
mov loc6=0
(pRecurse) br.call.sptk.many b6=rse_clear_invalid
;;
mov loc7=0
mov loc8=0
cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
mov loc10=0
mov loc11=0
(pReturn) br.ret.sptk.many b6
#endif /* !CONFIG_ITANIUM */
# undef pRecurse
# undef pReturn
;;
alloc r17=ar.pfs,0,0,0,0 // drop current register frame
;;
loadrs
......@@ -1087,7 +1143,11 @@ sys_call_table:
data8 sys_sched_get_priority_min
data8 sys_sched_rr_get_interval
data8 sys_nanosleep
#if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)
data8 sys_nfsservctl
#else
data8 sys_ni_syscall
#endif
data8 sys_prctl // 1170
data8 sys_getpagesize
data8 sys_mmap2
......@@ -1147,14 +1207,10 @@ sys_call_table:
data8 sys_removexattr
data8 sys_lremovexattr
data8 sys_fremovexattr
#if 0
data8 sys_tkill
#else
data8 ia64_ni_syscall
#endif
data8 ia64_ni_syscall // 1230
data8 ia64_ni_syscall
data8 ia64_ni_syscall
data8 sys_futex // 1230
data8 sys_sched_setaffinity
data8 sys_sched_getaffinity
data8 ia64_ni_syscall
data8 ia64_ni_syscall
data8 ia64_ni_syscall // 1235
......
......@@ -560,137 +560,114 @@ GLOBAL_ENTRY(__ia64_load_fpu)
END(__ia64_load_fpu)
GLOBAL_ENTRY(__ia64_init_fpu)
alloc r2=ar.pfs,0,0,0,0
stf.spill [sp]=f0
mov f32=f0
;;
ldf.fill f33=[sp]
ldf.fill f34=[sp]
mov f35=f0
;;
ldf.fill f36=[sp]
ldf.fill f37=[sp]
mov f38=f0
;;
ldf.fill f39=[sp]
ldf.fill f40=[sp]
mov f41=f0
;;
ldf.fill f42=[sp]
ldf.fill f43=[sp]
mov f44=f0
;;
ldf.fill f45=[sp]
ldf.fill f46=[sp]
mov f47=f0
;;
ldf.fill f48=[sp]
ldf.fill f49=[sp]
mov f50=f0
;;
ldf.fill f51=[sp]
ldf.fill f52=[sp]
mov f53=f0
;;
ldf.fill f54=[sp]
ldf.fill f55=[sp]
mov f56=f0
;;
ldf.fill f57=[sp]
ldf.fill f58=[sp]
mov f59=f0
;;
ldf.fill f60=[sp]
ldf.fill f61=[sp]
mov f62=f0
;;
ldf.fill f63=[sp]
ldf.fill f64=[sp]
mov f65=f0
;;
ldf.fill f66=[sp]
ldf.fill f67=[sp]
mov f68=f0
;;
ldf.fill f69=[sp]
ldf.fill f70=[sp]
mov f71=f0
;;
ldf.fill f72=[sp]
ldf.fill f73=[sp]
mov f74=f0
;;
ldf.fill f75=[sp]
ldf.fill f76=[sp]
mov f77=f0
;;
ldf.fill f78=[sp]
ldf.fill f79=[sp]
mov f80=f0
;;
ldf.fill f81=[sp]
ldf.fill f82=[sp]
mov f83=f0
;;
ldf.fill f84=[sp]
ldf.fill f85=[sp]
mov f86=f0
;;
ldf.fill f87=[sp]
ldf.fill f88=[sp]
mov f89=f0
;;
ldf.fill f90=[sp]
ldf.fill f91=[sp]
mov f92=f0
;;
ldf.fill f93=[sp]
ldf.fill f94=[sp]
mov f95=f0
;;
ldf.fill f96=[sp]
ldf.fill f97=[sp]
mov f98=f0
;;
ldf.fill f99=[sp]
ldf.fill f100=[sp]
mov f101=f0
;;
ldf.fill f102=[sp]
ldf.fill f103=[sp]
mov f104=f0
;;
ldf.fill f105=[sp]
ldf.fill f106=[sp]
mov f107=f0
;;
ldf.fill f108=[sp]
ldf.fill f109=[sp]
mov f110=f0
;;
ldf.fill f111=[sp]
ldf.fill f112=[sp]
mov f113=f0
;;
ldf.fill f114=[sp]
ldf.fill f115=[sp]
mov f116=f0
;;
ldf.fill f117=[sp]
ldf.fill f118=[sp]
mov f119=f0
;;
ldf.fill f120=[sp]
ldf.fill f121=[sp]
mov f122=f0
;;
ldf.fill f123=[sp]
ldf.fill f124=[sp]
mov f125=f0
stf.spill [sp]=f0 // M3
mov f32=f0 // F
nop.b 0
ldfps f33,f34=[sp] // M0
ldfps f35,f36=[sp] // M1
mov f37=f0 // F
;;
ldf.fill f126=[sp]
mov f127=f0
br.ret.sptk.many rp
setf.s f38=r0 // M2
setf.s f39=r0 // M3
mov f40=f0 // F
ldfps f41,f42=[sp] // M0
ldfps f43,f44=[sp] // M1
mov f45=f0 // F
setf.s f46=r0 // M2
setf.s f47=r0 // M3
mov f48=f0 // F
ldfps f49,f50=[sp] // M0
ldfps f51,f52=[sp] // M1
mov f53=f0 // F
setf.s f54=r0 // M2
setf.s f55=r0 // M3
mov f56=f0 // F
ldfps f57,f58=[sp] // M0
ldfps f59,f60=[sp] // M1
mov f61=f0 // F
setf.s f62=r0 // M2
setf.s f63=r0 // M3
mov f64=f0 // F
ldfps f65,f66=[sp] // M0
ldfps f67,f68=[sp] // M1
mov f69=f0 // F
setf.s f70=r0 // M2
setf.s f71=r0 // M3
mov f72=f0 // F
ldfps f73,f74=[sp] // M0
ldfps f75,f76=[sp] // M1
mov f77=f0 // F
setf.s f78=r0 // M2
setf.s f79=r0 // M3
mov f80=f0 // F
ldfps f81,f82=[sp] // M0
ldfps f83,f84=[sp] // M1
mov f85=f0 // F
setf.s f86=r0 // M2
setf.s f87=r0 // M3
mov f88=f0 // F
/*
* When the instructions are cached, it would be faster to initialize
* the remaining registers with simply mov instructions (F-unit).
* This gets the time down to ~29 cycles. However, this would use up
* 33 bundles, whereas continuing with the above pattern yields
* 10 bundles and ~30 cycles.
*/
ldfps f89,f90=[sp] // M0
ldfps f91,f92=[sp] // M1
mov f93=f0 // F
setf.s f94=r0 // M2
setf.s f95=r0 // M3
mov f96=f0 // F
ldfps f97,f98=[sp] // M0
ldfps f99,f100=[sp] // M1
mov f101=f0 // F
setf.s f102=r0 // M2
setf.s f103=r0 // M3
mov f104=f0 // F
ldfps f105,f106=[sp] // M0
ldfps f107,f108=[sp] // M1
mov f109=f0 // F
setf.s f110=r0 // M2
setf.s f111=r0 // M3
mov f112=f0 // F
ldfps f113,f114=[sp] // M0
ldfps f115,f116=[sp] // M1
mov f117=f0 // F
setf.s f118=r0 // M2
setf.s f119=r0 // M3
mov f120=f0 // F
ldfps f121,f122=[sp] // M0
ldfps f123,f124=[sp] // M1
mov f125=f0 // F
setf.s f126=r0 // M2
setf.s f127=r0 // M3
br.ret.sptk.many rp // F
END(__ia64_init_fpu)
/*
......
......@@ -6,7 +6,11 @@
#include <linux/module.h>
#include <linux/string.h>
EXPORT_SYMBOL_NOVERS(memset);
#undef memset
extern void *memset (void *, int, size_t);
EXPORT_SYMBOL_NOVERS(memset); /* gcc generates direct calls to memset()... */
EXPORT_SYMBOL_NOVERS(__memset_generic);
EXPORT_SYMBOL_NOVERS(__bzero);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL_NOVERS(memcpy);
......@@ -67,7 +71,7 @@ EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(last_cli_ip);
#endif
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
EXPORT_SYMBOL(flush_tlb_range);
......@@ -147,3 +151,10 @@ EXPORT_SYMBOL(efi);
#include <linux/proc_fs.h>
extern struct proc_dir_entry *efi_dir;
EXPORT_SYMBOL(efi_dir);
#include <asm/machvec.h>
#ifdef CONFIG_IA64_GENERIC
EXPORT_SYMBOL(ia64_mv);
#endif
EXPORT_SYMBOL(machvec_noop);
......@@ -22,6 +22,7 @@
* 02/01/07 E. Focht <efocht@ess.nec.de> Redirectable interrupt vectors in
* iosapic_set_affinity(), initializations for
* /proc/irq/#/smp_affinity
* 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
*/
/*
* Here is what the interrupt logic between a PCI device and the CPU looks like:
......@@ -56,9 +57,8 @@
#include <linux/smp_lock.h>
#include <linux/string.h>
#include <linux/irq.h>
#include <linux/acpi.h>
#include <asm/acpi-ext.h>
#include <asm/acpikcfg.h>
#include <asm/delay.h>
#include <asm/hw_irq.h>
#include <asm/io.h>
......@@ -92,11 +92,37 @@ static struct iosapic_irq {
unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
} iosapic_irq[IA64_NUM_VECTORS];
static struct iosapic {
char *addr; /* base address of IOSAPIC */
unsigned char pcat_compat; /* 8259 compatibility flag */
unsigned char base_irq; /* first irq assigned to this IOSAPIC */
unsigned short max_pin; /* max input pin supported in this IOSAPIC */
} iosapic_lists[256] __initdata;
static int num_iosapic = 0;
/*
* Find an IOSAPIC associated with an IRQ
*/
static inline int __init
find_iosapic (unsigned int irq)
{
int i;
for (i = 0; i < num_iosapic; i++) {
if ((irq - iosapic_lists[i].base_irq) < iosapic_lists[i].max_pin)
return i;
}
return -1;
}
/*
* Translate IOSAPIC irq number to the corresponding IA-64 interrupt vector. If no
* entry exists, return -1.
*/
int
static int
iosapic_irq_to_vector (int irq)
{
int vector;
......@@ -479,7 +505,7 @@ iosapic_register_platform_irq (u32 int_type, u32 global_vector,
int vector;
switch (int_type) {
case ACPI20_ENTRY_PIS_PMI:
case ACPI_INTERRUPT_PMI:
vector = iosapic_vector;
/*
* since PMI vector is alloc'd by FW(ACPI) not by kernel,
......@@ -488,15 +514,15 @@ iosapic_register_platform_irq (u32 int_type, u32 global_vector,
iosapic_reassign_vector(vector);
delivery = IOSAPIC_PMI;
break;
case ACPI20_ENTRY_PIS_CPEI:
vector = IA64_PCE_VECTOR;
delivery = IOSAPIC_LOWEST_PRIORITY;
break;
case ACPI20_ENTRY_PIS_INIT:
case ACPI_INTERRUPT_INIT:
vector = ia64_alloc_irq();
delivery = IOSAPIC_INIT;
break;
default:
case ACPI_INTERRUPT_CPEI:
vector = IA64_PCE_VECTOR;
delivery = IOSAPIC_LOWEST_PRIORITY;
break;
default:
printk("iosapic_register_platform_irq(): invalid int type\n");
return -1;
}
......@@ -542,31 +568,41 @@ iosapic_register_legacy_irq (unsigned long irq,
void __init
iosapic_init (unsigned long phys_addr, unsigned int base_irq, int pcat_compat)
{
int i, irq, max_pin, vector, pin;
int irq, max_pin, vector, pin;
unsigned int ver;
char *addr;
static int first_time = 1;
if (first_time) {
first_time = 0;
for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
iosapic_irq[vector].pin = -1; /* mark as unused */
}
if (pcat_compat) {
/*
* Fetch the PCI interrupt routing table:
* Disable the compatibility mode interrupts (8259 style), needs IN/OUT support
* enabled.
*/
acpi_cf_get_pci_vectors(&pci_irq.route, &pci_irq.num_routes);
printk("%s: Disabling PC-AT compatible 8259 interrupts\n", __FUNCTION__);
outb(0xff, 0xA1);
outb(0xff, 0x21);
}
addr = ioremap(phys_addr, 0);
ver = iosapic_version(addr);
max_pin = (ver >> 16) & 0xff;
iosapic_lists[num_iosapic].addr = addr;
iosapic_lists[num_iosapic].pcat_compat = pcat_compat;
iosapic_lists[num_iosapic].base_irq = base_irq;
iosapic_lists[num_iosapic].max_pin = max_pin;
num_iosapic++;
printk("IOSAPIC: version %x.%x, address 0x%lx, IRQs 0x%02x-0x%02x\n",
(ver & 0xf0) >> 4, (ver & 0x0f), phys_addr, base_irq, base_irq + max_pin);
if ((base_irq == 0) && pcat_compat)
if ((base_irq == 0) && pcat_compat) {
/*
* Map the legacy ISA devices into the IOSAPIC data. Some of these may
* get reprogrammed later on with data from the ACPI Interrupt Source
......@@ -590,11 +626,37 @@ iosapic_init (unsigned long phys_addr, unsigned int base_irq, int pcat_compat)
/* program the IOSAPIC routing table: */
set_rte(vector, (ia64_get_lid() >> 16) & 0xffff);
}
}
}
void __init
iosapic_init_pci_irq (void)
{
int i, index, vector, pin;
int base_irq, max_pin, pcat_compat;
unsigned int irq;
char *addr;
if (0 != acpi_get_prt(&pci_irq.route, &pci_irq.num_routes))
return;
for (i = 0; i < pci_irq.num_routes; i++) {
irq = pci_irq.route[i].irq;
if ((irq < (int)base_irq) || (irq > (int)(base_irq + max_pin)))
index = find_iosapic(irq);
if (index < 0) {
printk("PCI: IRQ %u has no IOSAPIC mapping\n", irq);
continue;
}
addr = iosapic_lists[index].addr;
base_irq = iosapic_lists[index].base_irq;
max_pin = iosapic_lists[index].max_pin;
pcat_compat = iosapic_lists[index].pcat_compat;
pin = irq - base_irq;
if ((unsigned) pin > max_pin)
/* the interrupt route is for another controller... */
continue;
......@@ -607,18 +669,13 @@ iosapic_init (unsigned long phys_addr, unsigned int base_irq, int pcat_compat)
vector = ia64_alloc_irq();
}
register_irq(irq, vector, irq - base_irq,
/* IOSAPIC_POL_LOW, IOSAPIC_LEVEL */
IOSAPIC_LOWEST_PRIORITY, 0, 0, base_irq, addr);
register_irq(irq, vector, pin, IOSAPIC_LOWEST_PRIORITY, 0, 0, base_irq, addr);
# ifdef DEBUG_IRQ_ROUTING
#ifdef DEBUG_IRQ_ROUTING
printk("PCI: (B%d,I%d,P%d) -> IOSAPIC irq 0x%02x -> vector 0x%02x\n",
pci_irq.route[i].bus, pci_irq.route[i].pci_id>>16, pci_irq.route[i].pin,
iosapic_irq[vector].base_irq + iosapic_irq[vector].pin, vector);
# endif
/* program the IOSAPIC routing table: */
set_rte(vector, (ia64_get_lid() >> 16) & 0xffff);
#endif
}
}
......@@ -631,6 +688,11 @@ iosapic_pci_fixup (int phase)
struct hw_interrupt_type *irq_type;
irq_desc_t *idesc;
if (phase == 0) {
iosapic_init_pci_irq();
return;
}
if (phase != 1)
return;
......@@ -670,10 +732,10 @@ iosapic_pci_fixup (int phase)
irq_type = &irq_type_iosapic_level;
idesc = irq_desc(vector);
if (idesc->handler != irq_type){
if (idesc->handler != irq_type) {
if (idesc->handler != &no_irq_type)
printk("iosapic_pci_fixup: changing vector 0x%02x from "
"%s to %s\n", vector,
printk("iosapic_pci_fixup: changing vector 0x%02x "
"from %s to %s\n", vector,
idesc->handler->typename,
irq_type->typename);
idesc->handler = irq_type;
......
......@@ -68,6 +68,23 @@
irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned =
{ [0 ... NR_IRQS-1] = { IRQ_DISABLED, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
#ifdef CONFIG_IA64_GENERIC
struct irq_desc * __ia64_irq_desc (unsigned int irq)
{
return _irq_desc + irq;
}
ia64_vector __ia64_irq_to_vector (unsigned int irq)
{
return (ia64_vector) irq;
}
unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
{
return (unsigned int) vec;
}
#endif
static void register_irq_proc (unsigned int irq);
/*
......
......@@ -127,7 +127,7 @@ ENTRY(vhpt_miss)
;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
srlz.d // ensure "rsm psr.dt" has taken effect
(p6) movl r19=__pa(SWAPPER_PGD_ADDR) // region 5 is rooted at swapper_pg_dir
(p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
;;
......@@ -413,7 +413,7 @@ ENTRY(nested_dtlb_miss)
;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
srlz.d
(p6) movl r19=__pa(SWAPPER_PGD_ADDR) // region 5 is rooted at swapper_pg_dir
(p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
;;
......
......@@ -41,6 +41,7 @@
#include <linux/irq.h>
#include <linux/smp_lock.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <asm/machvec.h>
#include <asm/page.h>
......@@ -51,7 +52,6 @@
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/acpi-ext.h>
#undef MCA_PRT_XTRA_DATA
......@@ -353,7 +353,9 @@ static int
verify_guid (efi_guid_t *test, efi_guid_t *target)
{
int rc;
#ifdef IA64_MCA_DEBUG_INFO
char out[40];
#endif
if ((rc = efi_guidcmp(*test, *target))) {
IA64_MCA_DEBUG(KERN_DEBUG
......@@ -497,7 +499,7 @@ ia64_mca_init(void)
{
irq_desc_t *desc;
unsigned int irq;
int cpev = acpi_request_vector(ACPI20_ENTRY_PIS_CPEI);
int cpev = acpi_request_vector(ACPI_INTERRUPT_CPEI);
if (cpev >= 0) {
for (irq = 0; irq < NR_IRQS; ++irq)
......
#include <linux/config.h>
#include <asm/cache.h>
#include "entry.h"
/*
......@@ -28,18 +30,19 @@
* on interrupts.
*/
#define MINSTATE_START_SAVE_MIN_VIRT \
dep r1=-1,r1,61,3; /* r1 = current (virtual) */ \
(pUser) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
dep r1=-1,r1,61,3; /* r1 = current (virtual) */ \
;; \
(pUser) mov.m rARRNAT=ar.rnat; \
(pUser) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pUser) mov rARRNAT=ar.rnat; \
(pKern) mov r1=sp; /* get sp */ \
;; \
(pUser) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUser) lfetch.fault.excl.nt1 [rKRBS]; \
(pUser) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
(pUser) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
;; \
(pKern) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
(pUser) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
(pKern) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
;; \
(pUser) mov r18=ar.bsp; \
(pUser) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
......@@ -125,52 +128,58 @@
;; \
SAVE_IFS; \
MINSTATE_START_SAVE_MIN \
add r17=L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ \
;; \
mov r16=r1; /* initialize first base pointer */ \
adds r17=8,r1; /* initialize second base pointer */ \
st8 [r1]=rCRIPSR; /* save cr.ipsr */ \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
add r16=16,r1; /* initialize first base pointer */ \
;; \
st8 [r16]=rCRIPSR,16; /* save cr.ipsr */ \
st8 [r17]=rCRIIP,16; /* save cr.iip */ \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
;; \
lfetch.fault.excl.nt1 [r17]; \
adds r17=8,r1; /* initialize second base pointer */ \
(pKern) mov r18=r0; /* make sure r18 isn't NaT */ \
;; \
st8 [r17]=rCRIIP,16; /* save cr.iip */ \
st8 [r16]=rCRIFS,16; /* save cr.ifs */ \
st8 [r17]=rARUNAT,16; /* save ar.unat */ \
(pUser) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */ \
;; \
st8 [r17]=rARUNAT,16; /* save ar.unat */ \
st8 [r16]=rARPFS,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \
st8 [r17]=rARRSC,16; /* save ar.rsc */ \
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT \
;; /* avoid RAW on r16 & r17 */ \
(pKern) adds r16=16,r16; /* skip over ar_rnat field */ \
(pKern) adds r17=16,r17; /* skip over ar_bspstore field */ \
(pUser) st8 [r16]=rARRNAT,16; /* save ar.rnat */ \
(pKern) adds r16=16,r16; /* skip over ar_rnat field */ \
;; /* avoid RAW on r16 & r17 */ \
(pUser) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */ \
;; \
st8 [r16]=rARPR,16; /* save predicates */ \
st8 [r17]=rB6,16; /* save b6 */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
(pKern) adds r17=16,r17; /* skip over ar_bspstore field */ \
;; \
st8 [r17]=rB6,16; /* save b6 */ \
st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \
st8.spill [r17]=rR1,16; /* save original r1 */ \
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT \
;; \
.mem.offset 8,0; st8.spill [r17]=rR1,16; /* save original r1 */ \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
;; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
.mem.offset 0,0; st8.spill [r16]=r12,16; \
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \
.mem.offset 0,0; st8.spill [r16]=r12,16; \
.mem.offset 8,0; st8.spill [r17]=r13,16; \
.mem.offset 8,0; st8.spill [r17]=r13,16; \
.mem.offset 0,0; st8.spill [r16]=r14,16; \
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r14,16; \
.mem.offset 8,0; st8.spill [r17]=r15,16; \
.mem.offset 8,0; st8.spill [r17]=r15,16; \
.mem.offset 0,0; st8.spill [r16]=r8,16; \
dep r14=-1,r0,61,3; \
;; \
.mem.offset 0,0; st8.spill [r16]=r8,16; \
.mem.offset 8,0; st8.spill [r17]=r9,16; \
.mem.offset 8,0; st8.spill [r17]=r9,16; \
.mem.offset 0,0; st8.spill [r16]=r10,16; \
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r10,16; \
.mem.offset 8,0; st8.spill [r17]=r11,16; \
.mem.offset 8,0; st8.spill [r17]=r11,16; \
mov r13=IA64_KR(CURRENT); /* establish `current' */ \
;; \
EXTRA; \
......@@ -190,10 +199,12 @@
*/
#define SAVE_REST \
.mem.offset 0,0; st8.spill [r2]=r16,16; \
.mem.offset 8,0; st8.spill [r3]=r17,16; \
;; \
.mem.offset 8,0; st8.spill [r3]=r17,16; \
.mem.offset 0,0; st8.spill [r2]=r18,16; \
;; \
.mem.offset 8,0; st8.spill [r3]=r19,16; \
.mem.offset 0,0; st8.spill [r2]=r20,16; \
;; \
mov r16=ar.ccv; /* M-unit */ \
movl r18=FPSR_DEFAULT /* L-unit */ \
......@@ -201,30 +212,29 @@
mov r17=ar.fpsr; /* M-unit */ \
mov ar.fpsr=r18; /* M-unit */ \
;; \
.mem.offset 0,0; st8.spill [r2]=r20,16; \
.mem.offset 8,0; st8.spill [r3]=r21,16; \
.mem.offset 0,0; st8.spill [r2]=r22,16; \
mov r18=b0; \
;; \
.mem.offset 0,0; st8.spill [r2]=r22,16; \
.mem.offset 8,0; st8.spill [r3]=r23,16; \
.mem.offset 0,0; st8.spill [r2]=r24,16; \
mov r19=b7; \
;; \
.mem.offset 0,0; st8.spill [r2]=r24,16; \
.mem.offset 8,0; st8.spill [r3]=r25,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r26,16; \
.mem.offset 8,0; st8.spill [r3]=r27,16; \
;; \
.mem.offset 8,0; st8.spill [r3]=r27,16; \
.mem.offset 0,0; st8.spill [r2]=r28,16; \
.mem.offset 8,0; st8.spill [r3]=r29,16; \
;; \
.mem.offset 8,0; st8.spill [r3]=r29,16; \
.mem.offset 0,0; st8.spill [r2]=r30,16; \
.mem.offset 8,0; st8.spill [r3]=r31,16; \
;; \
.mem.offset 8,0; st8.spill [r3]=r31,16; \
st8 [r2]=r16,16; /* ar.ccv */ \
st8 [r3]=r17,16; /* ar.fpsr */ \
;; \
st8 [r3]=r17,16; /* ar.fpsr */ \
st8 [r2]=r18,16; /* b0 */ \
;; \
st8 [r3]=r19,16+8; /* b7 */ \
;; \
stf.spill [r2]=f6,32; \
......
......@@ -42,101 +42,183 @@
extern void ia64_mca_check_errors( void );
#endif
struct pci_fixup pcibios_fixups[1];
struct pci_ops *pci_root_ops;
int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
/*
* This interrupt-safe spinlock protects all accesses to PCI
* configuration space.
* Low-level SAL-based PCI configuration access functions. Note that SAL
* calls are already serialized (via sal_lock), so we don't need another
* synchronization mechanism here. Not using segment number (yet).
*/
static spinlock_t pci_lock = SPIN_LOCK_UNLOCKED;
struct pci_fixup pcibios_fixups[] = {
{ 0 }
};
#define PCI_SAL_ADDRESS(bus, dev, fn, reg) \
((u64)(bus << 16) | (u64)(dev << 11) | (u64)(fn << 8) | (u64)(reg))
/* Macro to build a PCI configuration address to be passed as a parameter to SAL. */
static int
pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
{
int result = 0;
u64 data = 0;
#define PCI_CONFIG_ADDRESS(dev, where) \
(((u64) dev->bus->number << 16) | ((u64) (dev->devfn & 0xff) << 8) | (where & 0xff))
if (!value || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL;
result = ia64_sal_pci_config_read(PCI_SAL_ADDRESS(bus, dev, fn, reg), len, &data);
*value = (u32) data;
return result;
}
static int
pci_conf_read_config_byte(struct pci_dev *dev, int where, u8 *value)
pci_sal_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value)
{
s64 status;
u64 lval;
if ((bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL;
status = ia64_sal_pci_config_read(PCI_CONFIG_ADDRESS(dev, where), 1, &lval);
*value = lval;
return status;
return ia64_sal_pci_config_write(PCI_SAL_ADDRESS(bus, dev, fn, reg), len, value);
}
static int
pci_conf_read_config_word(struct pci_dev *dev, int where, u16 *value)
pci_sal_read_config_byte (struct pci_dev *dev, int where, u8 *value)
{
s64 status;
u64 lval;
int result = 0;
u32 data = 0;
if (!value)
return -EINVAL;
status = ia64_sal_pci_config_read(PCI_CONFIG_ADDRESS(dev, where), 2, &lval);
*value = lval;
return status;
result = pci_sal_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 1, &data);
*value = (u8) data;
return result;
}
static int
pci_conf_read_config_dword(struct pci_dev *dev, int where, u32 *value)
pci_sal_read_config_word (struct pci_dev *dev, int where, u16 *value)
{
s64 status;
u64 lval;
int result = 0;
u32 data = 0;
if (!value)
return -EINVAL;
result = pci_sal_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 2, &data);
*value = (u16) data;
status = ia64_sal_pci_config_read(PCI_CONFIG_ADDRESS(dev, where), 4, &lval);
*value = lval;
return status;
return result;
}
static int
pci_conf_write_config_byte (struct pci_dev *dev, int where, u8 value)
pci_sal_read_config_dword (struct pci_dev *dev, int where, u32 *value)
{
return ia64_sal_pci_config_write(PCI_CONFIG_ADDRESS(dev, where), 1, value);
if (!value)
return -EINVAL;
return pci_sal_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 4, value);
}
static int
pci_conf_write_config_word (struct pci_dev *dev, int where, u16 value)
pci_sal_write_config_byte (struct pci_dev *dev, int where, u8 value)
{
return ia64_sal_pci_config_write(PCI_CONFIG_ADDRESS(dev, where), 2, value);
return pci_sal_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 1, value);
}
static int
pci_conf_write_config_dword (struct pci_dev *dev, int where, u32 value)
pci_sal_write_config_word (struct pci_dev *dev, int where, u16 value)
{
return ia64_sal_pci_config_write(PCI_CONFIG_ADDRESS(dev, where), 4, value);
return pci_sal_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 2, value);
}
struct pci_ops pci_conf = {
pci_conf_read_config_byte,
pci_conf_read_config_word,
pci_conf_read_config_dword,
pci_conf_write_config_byte,
pci_conf_write_config_word,
pci_conf_write_config_dword
static int
pci_sal_write_config_dword (struct pci_dev *dev, int where, u32 value)
{
return pci_sal_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 4, value);
}
struct pci_ops pci_sal_ops = {
pci_sal_read_config_byte,
pci_sal_read_config_word,
pci_sal_read_config_dword,
pci_sal_write_config_byte,
pci_sal_write_config_word,
pci_sal_write_config_dword
};
/*
* Initialization. Uses the SAL interface
*/
struct pci_bus *
pcibios_scan_root(int seg, int bus)
{
struct list_head *list = NULL;
struct pci_bus *pci_bus = NULL;
list_for_each(list, &pci_root_buses) {
pci_bus = pci_bus_b(list);
if (pci_bus->number == bus) {
/* Already scanned */
printk("PCI: Bus (%02x:%02x) already probed\n", seg, bus);
return pci_bus;
}
}
printk("PCI: Probing PCI hardware on bus (%02x:%02x)\n", seg, bus);
return pci_scan_bus(bus, pci_root_ops, NULL);
}
void __init
pcibios_config_init (void)
{
if (pci_root_ops)
return;
printk("PCI: Using SAL to access configuration space\n");
pci_root_ops = &pci_sal_ops;
pci_config_read = pci_sal_read;
pci_config_write = pci_sal_write;
return;
}
void __init
pcibios_init (void)
{
# define PCI_BUSES_TO_SCAN 255
int i;
int i = 0;
#ifdef CONFIG_IA64_MCA
ia64_mca_check_errors(); /* For post-failure MCA error logging */
#endif
platform_pci_fixup(0); /* phase 0 initialization (before PCI bus has been scanned) */
pcibios_config_init();
platform_pci_fixup(0); /* phase 0 fixups (before buses scanned) */
printk("PCI: Probing PCI hardware\n");
for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
pci_scan_bus(i, &pci_conf, NULL);
pci_scan_bus(i, pci_root_ops, NULL);
platform_pci_fixup(1); /* phase 1 fixups (after buses scanned) */
platform_pci_fixup(1); /* phase 1 initialization (after PCI bus has been scanned) */
return;
}
......@@ -186,7 +268,12 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r
int
pcibios_enable_device (struct pci_dev *dev)
{
if (!dev)
return -EINVAL;
/* Not needed, since we enable all devices at startup. */
printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq, dev->slot_name);
return 0;
}
......@@ -233,8 +320,7 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_page_range(vma, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
......
......@@ -24,7 +24,7 @@
#include <asm/efi.h>
#include <asm/elf.h>
#include <asm/perfmon.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
#include <asm/uaccess.h>
......@@ -145,7 +145,7 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
/*
* We use this if we don't have any better idle routine..
*/
static void
void
default_idle (void)
{
/* may want to do PAL_LIGHT_HALT here... */
......@@ -660,7 +660,7 @@ dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
tsk = __get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER);
tsk = (void *) __get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER);
if (!tsk)
return NULL;
......
/*
* IA-64 semaphore implementation (derived from x86 version).
*
* Copyright (C) 1999-2000 Hewlett-Packard Co
* Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
......@@ -25,6 +25,7 @@
*/
#include <linux/sched.h>
#include <asm/errno.h>
#include <asm/semaphore.h>
/*
......
......@@ -19,6 +19,7 @@
#include <linux/config.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/delay.h>
......@@ -30,7 +31,6 @@
#include <linux/threads.h>
#include <linux/tty.h>
#include <asm/acpi-ext.h>
#include <asm/ia32.h>
#include <asm/page.h>
#include <asm/machvec.h>
......@@ -54,7 +54,10 @@
extern char _end;
#ifdef CONFIG_SMP
unsigned long __per_cpu_offset[NR_CPUS];
#endif
struct cpuinfo_ia64 cpu_info __per_cpu_data;
unsigned long ia64_phys_stacked_size_p8;
......@@ -64,6 +67,8 @@ struct screen_info screen_info;
unsigned long ia64_iobase; /* virtual address for I/O accesses */
unsigned char aux_device_present = 0xaa; /* XXX remove this when legacy I/O is gone */
#define COMMAND_LINE_SIZE 512
char saved_command_line[COMMAND_LINE_SIZE]; /* used in proc filesystem */
......@@ -282,6 +287,7 @@ void __init
setup_arch (char **cmdline_p)
{
extern unsigned long ia64_iobase;
unsigned long phys_iobase;
unw_init();
......@@ -314,24 +320,23 @@ setup_arch (char **cmdline_p)
#endif
/*
* Set `iobase' to the appropriate address in region 6
* (uncached access range)
* Set `iobase' to the appropriate address in region 6 (uncached access range).
*
* The EFI memory map is the "prefered" location to get the I/O port
* space base, rather the relying on AR.KR0. This should become more
* clear in future SAL specs. We'll fall back to getting it out of
* AR.KR0 if no appropriate entry is found in the memory map.
* The EFI memory map is the "preferred" location to get the I/O port space base,
* rather the relying on AR.KR0. This should become more clear in future SAL
* specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
* found in the memory map.
*/
ia64_iobase = efi_get_iobase();
if (ia64_iobase)
phys_iobase = efi_get_iobase();
if (phys_iobase)
/* set AR.KR0 since this is all we use it for anyway */
ia64_set_kr(IA64_KR_IO_BASE, ia64_iobase);
ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
else {
ia64_iobase = ia64_get_kr(IA64_KR_IO_BASE);
phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
printk("No I/O port range found in EFI memory map, falling back to AR.KR0\n");
printk("I/O port base = 0x%lx\n", ia64_iobase);
printk("I/O port base = 0x%lx\n", phys_iobase);
}
ia64_iobase = __IA64_UNCACHED_OFFSET | (ia64_iobase & ~PAGE_OFFSET);
ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
#ifdef CONFIG_SMP
cpu_physical_id(0) = hard_smp_processor_id();
......@@ -339,20 +344,23 @@ setup_arch (char **cmdline_p)
cpu_init(); /* initialize the bootstrap CPU */
if (efi.acpi20) {
/* Parse the ACPI 2.0 tables */
acpi20_parse(efi.acpi20);
} else if (efi.acpi) {
/* Parse the ACPI tables */
acpi_parse(efi.acpi);
}
#ifdef CONFIG_ACPI_BOOT
acpi_boot_init(*cmdline_p);
#endif
#ifdef CONFIG_VT
# if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
# elif defined(CONFIG_DUMMY_CONSOLE)
# if defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
# endif
# if defined(CONFIG_VGA_CONSOLE)
/*
* Non-legacy systems may route legacy VGA MMIO range to system
* memory. vga_con probes the MMIO hole, so memory looks like
* a VGA device to it. The EFI memory map can tell us if it's
* memory so we can avoid this problem.
*/
if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
conswitchp = &vga_con;
# endif
#endif
#ifdef CONFIG_IA64_MCA
......@@ -522,20 +530,25 @@ setup_per_cpu_areas (void)
void
cpu_init (void)
{
extern char __per_cpu_start[], __phys_per_cpu_start[], __per_cpu_end[];
extern char __per_cpu_start[], __phys_per_cpu_start[];
extern void __init ia64_mmu_init (void *);
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
unsigned int max_ctx;
struct cpuinfo_ia64 *my_cpu_info;
void *my_cpu_data;
#ifdef CONFIG_SMP
extern char __per_cpu_end[];
int cpu = smp_processor_id();
my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start);
memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start;
my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start);
#else
my_cpu_data = __phys_per_cpu_start;
#endif
my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start);
/*
......
......@@ -530,8 +530,8 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
info.si_signo = signr;
info.si_errno = 0;
info.si_code = SI_USER;
info.si_pid = current->p_pptr->pid;
info.si_uid = current->p_pptr->uid;
info.si_pid = current->parent->pid;
info.si_uid = current->parent->uid;
}
/* If the (new) signal is now blocked, requeue it. */
......@@ -570,7 +570,7 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
case SIGSTOP:
current->state = TASK_STOPPED;
current->exit_code = signr;
sig = current->p_pptr->sig;
sig = current->parent->sig;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
notify_parent(current, SIGCHLD);
schedule();
......
......@@ -48,6 +48,7 @@
#include <asm/ptrace.h>
#include <asm/sal.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h>
#include <asm/mca.h>
......@@ -236,7 +237,7 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
int cpus = 1;
if (cpuid == smp_processor_id()) {
printk(__FUNCTION__" trying to call self\n");
printk("%s: trying to call self\n", __FUNCTION__);
return -EBUSY;
}
......
......@@ -68,6 +68,7 @@ static volatile unsigned long go[SLAVE + 1];
extern void __init calibrate_delay(void);
extern void start_ap(void);
extern unsigned long ia64_iobase;
int cpucount;
task_t *task_for_booting_cpu;
......@@ -345,6 +346,11 @@ smp_callin (void)
*/
ia64_init_itm();
/*
* Set I/O port base per CPU
*/
ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
#ifdef CONFIG_IA64_MCA
ia64_mca_cmc_vector_setup(); /* Setup vector on AP & enable */
ia64_mca_check_errors(); /* For post-failure MCA error logging */
......
......@@ -47,7 +47,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (rgn_offset(addr) + len > RGN_MAP_LIMIT) /* no risk of overflow here... */
if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) /* no risk of overflow here... */
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
return addr;
......@@ -126,7 +126,7 @@ ia64_brk (unsigned long brk, long arg1, long arg2, long arg3,
}
/* Check against unimplemented/unmapped addresses: */
if ((newbrk - oldbrk) > RGN_MAP_LIMIT || rgn_offset(newbrk) > RGN_MAP_LIMIT)
if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
goto out;
/* Check against rlimit.. */
......@@ -206,7 +206,7 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
* or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
* (for some integer n <= 61) and len > 0.
*/
roff = rgn_offset(addr);
roff = REGION_OFFSET(addr);
if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) {
addr = -EINVAL;
goto out;
......
......@@ -214,7 +214,8 @@ ia64_ni_syscall (unsigned long arg0, unsigned long arg1, unsigned long arg2, uns
{
struct pt_regs *regs = (struct pt_regs *) &stack;
printk("<sc%ld(%lx,%lx,%lx,%lx)>\n", regs->r15, arg0, arg1, arg2, arg3);
printk("%s(%d): <sc%ld(%lx,%lx,%lx,%lx)>\n", current->comm, current->pid,
regs->r15, arg0, arg1, arg2, arg3);
return -ENOSYS;
}
......
......@@ -103,7 +103,7 @@ struct unw_state_record {
unsigned int in_body : 1; /* are we inside a body (as opposed to a prologue)? */
unsigned long flags; /* see UNW_FLAG_* in unwind.h */
u8 *imask; /* imask of of spill_mask record or NULL */
u8 *imask; /* imask of spill_mask record or NULL */
unsigned long pr_val; /* predicate values */
unsigned long pr_mask; /* predicate mask */
long spill_offset; /* psp-relative offset for spill base */
......
......@@ -16,6 +16,9 @@ obj-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
flush.o io.o do_csum.o \
memcpy.o memset.o strlen.o swiotlb.o
obj-$(CONFIG_ITANIUM) += copy_page.o
obj-$(CONFIG_MCKINLEY) += copy_page_mck.o
IGNORE_FLAGS_OBJS = __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
......
/*
* McKinley-optimized version of copy_page().
*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger <davidm@hpl.hp.com>
*
* Inputs:
* in0: address of target page
* in1: address of source page
* Output:
* no return value
*
* General idea:
* - use regular loads and stores to prefetch data to avoid consuming M-slot just for
* lfetches => good for in-cache performance
* - avoid l2 bank-conflicts by not storing into the same 16-byte bank within a single
* cycle
*
* Principle of operation:
* First, note that L1 has a line-size of 64 bytes and L2 a line-size of 128 bytes.
* To avoid secondary misses in L2, we prefetch both source and destination with a line-size
* of 128 bytes. When both of these lines are in the L2 and the first half of the
* source line is in L1, we start copying the remaining words. The second half of the
* source line is prefetched in an earlier iteration, so that by the time we start
* accessing it, it's also present in the L1.
*
* We use a software-pipelined loop to control the overall operation. The pipeline
* has 2*PREFETCH_DIST+K stages. The first PREFETCH_DIST stages are used for prefetching
* source cache-lines. The second PREFETCH_DIST stages are used for prefetching destination
* cache-lines, the last K stages are used to copy the cache-line words not copied by
* the prefetches. The four relevant points in the pipelined are called A, B, C, D:
* p[A] is TRUE if a source-line should be prefetched, p[B] is TRUE if a destination-line
* should be prefetched, p[C] is TRUE if the second half of an L2 line should be brought
* into L1D and p[D] is TRUE if a cacheline needs to be copied.
*
* This all sounds very complicated, but thanks to the modulo-scheduled loop support,
* the resulting code is very regular and quite easy to follow (once you get the idea).
*
* As a secondary optimization, the first 2*PREFETCH_DIST iterations are implemented
* as the separate .prefetch_loop. Logically, this loop performs exactly like the
* main-loop (.line_copy), but has all known-to-be-predicated-off instructions removed,
* so that each loop iteration is faster (again, good for cached case).
*
* When reading the code, it helps to keep the following picture in mind:
*
* word 0 word 1
* +------+------+---
* | v[x] | t1 | ^
* | t2 | t3 | |
* | t4 | t5 | |
* | t6 | t7 | | 128 bytes
* | n[y] | t9 | | (L2 cache line)
* | t10 | t11 | |
* | t12 | t13 | |
* | t14 | t15 | v
* +------+------+---
*
* Here, v[x] is copied by the (memory) prefetch. n[y] is loaded at p[C]
* to fetch the second-half of the L2 cache line into L1, and the tX words are copied in
* an order that avoids bank conflicts.
*/
#include <asm/asmmacro.h>
#include <asm/page.h>
#define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st)
#define src0 r2
#define src1 r3
#define dst0 r9
#define dst1 r10
#define src_pre_mem r11
#define dst_pre_mem r14
#define src_pre_l2 r15
#define dst_pre_l2 r16
#define t1 r17
#define t2 r18
#define t3 r19
#define t4 r20
#define t5 t1 // alias!
#define t6 t2 // alias!
#define t7 t3 // alias!
#define t9 t5 // alias!
#define t10 t4 // alias!
#define t11 t7 // alias!
#define t12 t6 // alias!
#define t14 t10 // alias!
#define t13 r21
#define t15 r22
#define saved_lc r23
#define saved_pr r24
#define A 0
#define B (PREFETCH_DIST)
#define C (B + PREFETCH_DIST)
#define D (C + 3)
#define N (D + 1)
#define Nrot ((N + 7) & ~7)
GLOBAL_ENTRY(copy_page)
.prologue
alloc r8 = ar.pfs, 2, Nrot-2, 0, Nrot
.rotr v[2*PREFETCH_DIST], n[D-C+1]
.rotp p[N]
.save ar.lc, saved_lc
mov saved_lc = ar.lc
.save pr, saved_pr
mov saved_pr = pr
.body
mov src_pre_mem = in1
mov pr.rot = 0x10000
mov ar.ec = 1 // special unrolled loop
mov dst_pre_mem = in0
mov ar.lc = 2*PREFETCH_DIST - 1
add src_pre_l2 = 8*8, in1
add dst_pre_l2 = 8*8, in0
add src0 = 8, in1 // first t1 src
add src1 = 3*8, in1 // first t3 src
add dst0 = 8, in0 // first t1 dst
add dst1 = 3*8, in0 // first t3 dst
mov t1 = (PAGE_SIZE/128) - (2*PREFETCH_DIST) - 1
nop.m 0
nop.i 0
;;
// same as .line_copy loop, but with all predicated-off instructions removed:
.prefetch_loop:
(p[A]) ld8 v[A] = [src_pre_mem], 128 // M0
(p[B]) st8 [dst_pre_mem] = v[B], 128 // M2
br.ctop.sptk .prefetch_loop
;;
cmp.eq p16, p0 = r0, r0 // reset p16 to 1 (br.ctop cleared it to zero)
mov ar.lc = t1 // with 64KB pages, t1 is too big to fit in 8 bits!
mov ar.ec = N // # of stages in pipeline
;;
.line_copy:
(p[D]) ld8 t2 = [src0], 3*8 // M0
(p[D]) ld8 t4 = [src1], 3*8 // M1
(p[B]) st8 [dst_pre_mem] = v[B], 128 // M2 prefetch dst from memory
(p[D]) st8 [dst_pre_l2] = n[D-C], 128 // M3 prefetch dst from L2
;;
(p[A]) ld8 v[A] = [src_pre_mem], 128 // M0 prefetch src from memory
(p[C]) ld8 n[0] = [src_pre_l2], 128 // M1 prefetch src from L2
(p[D]) st8 [dst0] = t1, 8 // M2
(p[D]) st8 [dst1] = t3, 8 // M3
;;
(p[D]) ld8 t5 = [src0], 8
(p[D]) ld8 t7 = [src1], 3*8
(p[D]) st8 [dst0] = t2, 3*8
(p[D]) st8 [dst1] = t4, 3*8
;;
(p[D]) ld8 t6 = [src0], 3*8
(p[D]) ld8 t10 = [src1], 8
(p[D]) st8 [dst0] = t5, 8
(p[D]) st8 [dst1] = t7, 3*8
;;
(p[D]) ld8 t9 = [src0], 3*8
(p[D]) ld8 t11 = [src1], 3*8
(p[D]) st8 [dst0] = t6, 3*8
(p[D]) st8 [dst1] = t10, 8
;;
(p[D]) ld8 t12 = [src0], 8
(p[D]) ld8 t14 = [src1], 8
(p[D]) st8 [dst0] = t9, 3*8
(p[D]) st8 [dst1] = t11, 3*8
;;
(p[D]) ld8 t13 = [src0], 4*8
(p[D]) ld8 t15 = [src1], 4*8
(p[D]) st8 [dst0] = t12, 8
(p[D]) st8 [dst1] = t14, 8
;;
(p[D-1])ld8 t1 = [src0], 8
(p[D-1])ld8 t3 = [src1], 8
(p[D]) st8 [dst0] = t13, 4*8
(p[D]) st8 [dst1] = t15, 4*8
br.ctop.sptk .line_copy
;;
mov ar.lc = saved_lc
mov pr = saved_pr, -1
br.ret.sptk.many rp
END(copy_page)
......@@ -8,9 +8,11 @@
* in0: address of buffer to checksum (char *)
* in1: length of the buffer (int)
*
* Copyright (C) 1999, 2001 Hewlett-Packard Co
* Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999, 2001-2002 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*
* 02/04/08 David Mosberger <davidm@hpl.hp.com>
* More cleanup and tuning.
* 01/04/18 Jun Nakajima <jun.nakajima@intel.com>
* Clean up and optimize and the software pipeline, loading two
* back-to-back 8-byte words per loop. Clean up the initialization
......@@ -71,8 +73,6 @@
// calculating the Internet checksum.
//
// NOT YET DONE:
// - use the lfetch instruction to augment the chances of the data being in
// the cache when we need it.
// - Maybe another algorithm which would take care of the folding at the
// end in a different manner
// - Work with people more knowledgeable than me on the network stack
......@@ -102,10 +102,6 @@
#define buf in0
#define len in1
#ifndef CONFIG_IA64_LOAD_LATENCY
#define CONFIG_IA64_LOAD_LATENCY 2
#endif
#define LOAD_LATENCY 2 // XXX fix me
#if (LOAD_LATENCY != 1) && (LOAD_LATENCY != 2)
......@@ -122,45 +118,46 @@ GLOBAL_ENTRY(do_csum)
.prologue
.save ar.pfs, saved_pfs
alloc saved_pfs=ar.pfs,2,16,1,16
.rotr word1[4], word2[4],result1[4],result2[4]
.rotp p[PIPE_DEPTH]
.rotr word1[4], word2[4],result1[LOAD_LATENCY+2],result2[LOAD_LATENCY+2]
.rotp p[PIPE_DEPTH], pC1[2], pC2[2]
mov ret0=r0 // in case we have zero length
cmp.lt p0,p6=r0,len // check for zero length or negative (32bit len)
;; // avoid WAW on CFM
mov tmp3=0x7 // a temporary mask/value
;;
add tmp1=buf,len // last byte's address
(p6) br.ret.spnt.many rp // return if true (hope we can avoid that)
.save pr, saved_pr
mov saved_pr=pr // preserve predicates (rotation)
(p6) br.ret.spnt.many rp // return if zero or negative length
and firstoff=7,buf // how many bytes off for first1 element
tbit.nz p15,p0=buf,0 // is buf an odd address ?
mov hmask=-1 // intialize head mask
;;
andcm first1=buf,tmp3 // 8byte aligned down address of first1 element
tbit.nz p15,p0=buf,0 // is buf an odd address?
and first1=-8,buf // 8-byte align down address of first1 element
and firstoff=7,buf // how many bytes off for first1 element
mov tmask=-1 // initialize tail mask
adds tmp2=-1,tmp1 // last-1
;;
adds tmp2=-1,tmp1 // last-1
and lastoff=7,tmp1 // how many bytes off for last element
andcm last=tmp2,tmp3 // address of word containing last byte
.save pr, saved_pr
mov saved_pr=pr // preserve predicates (rotation)
;;
sub tmp1=8,lastoff // complement to lastoff
and last=-8,tmp2 // address of word containing last byte
;;
sub tmp3=last,first1 // tmp3=distance from first1 to last
.save ar.lc, saved_lc
mov saved_lc=ar.lc // save lc
cmp.eq p8,p9=last,first1 // everything fits in one word ?
sub tmp1=8,lastoff // complement to lastoff
ld8 firstval=[first1],8 // load,ahead of time, "first1" word
ld8 firstval=[first1],8 // load, ahead of time, "first1" word
and tmp1=7, tmp1 // make sure that if tmp1==8 -> tmp1=0
shl tmp2=firstoff,3 // number of bits
;;
and tmp1=7, tmp1 // make sure that if tmp1==8 -> tmp1=0
(p9) ld8 lastval=[last] // load,ahead of time, "last" word, if needed
(p9) ld8 lastval=[last] // load, ahead of time, "last" word, if needed
shl tmp1=tmp1,3 // number of bits
(p9) adds tmp3=-8,tmp3 // effectively loaded
;;
(p8) mov lastval=r0 // we don't need lastval if first1==last
shl tmp1=tmp1,3 // number of bits
shl hmask=hmask,tmp2 // build head mask, mask off [0,first1off[
;;
shr.u tmask=tmask,tmp1 // build tail mask, mask off ]8,lastoff]
.save ar.lc, saved_lc
mov saved_lc=ar.lc // save lc
;;
.body
#define count tmp3
......@@ -171,8 +168,8 @@ GLOBAL_ENTRY(do_csum)
;;
// If count is odd, finish this 8-byte word so that we can
// load two back-to-back 8-byte words per loop thereafter.
tbit.nz p10,p11=count,0 // if (count is odd)
and word1[0]=firstval,hmask // and mask it as appropriate
tbit.nz p10,p11=count,0 // if (count is odd)
;;
(p8) mov result1[0]=word1[0]
(p9) add result1[0]=word1[0],word2[0]
......@@ -181,9 +178,8 @@ GLOBAL_ENTRY(do_csum)
;;
(p6) adds result1[0]=1,result1[0]
(p8) br.cond.dptk .do_csum_exit // if (within an 8-byte word)
;;
(p11) br.cond.dptk .do_csum16 // if (count is even)
;;
// Here count is odd.
ld8 word1[1]=[first1],8 // load an 8-byte word
cmp.eq p9,p10=1,count // if (count == 1)
......@@ -194,11 +190,9 @@ GLOBAL_ENTRY(do_csum)
cmp.ltu p6,p0=result1[0],word1[1]
;;
(p6) adds result1[0]=1,result1[0]
;;
(p9) br.cond.sptk .do_csum_exit // if (count == 1) exit
// Fall through to caluculate the checksum, feeding result1[0] as
// the initial value in result1[0].
;;
//
// Calculate the checksum loading two 8-byte words per loop.
//
......@@ -207,45 +201,36 @@ GLOBAL_ENTRY(do_csum)
shr.u count=count,1 // we do 16 bytes per loop
;;
cmp.eq p9,p10=r0,count // if (count == 0)
adds count=-1,count
brp.loop.imp 1f,2f
;;
adds count=-1,count
mov ar.ec=PIPE_DEPTH
;;
mov ar.lc=count // set lc
;;
// result1[0] must be initialized in advance.
mov result2[0]=r0
;;
mov pr.rot=1<<16
;;
mov carry1=r0
mov carry2=r0
;;
add first2=8,first1
;;
(p9) br.cond.sptk .do_csum_exit
;;
nop.m 0
nop.i 0
;;
.align 32
1:
(ELD_1) cmp.ltu p31,p0=result1[LOAD_LATENCY],word1[LOAD_LATENCY+1]
(p32) adds carry1=1,carry1
(ELD_1) cmp.ltu p47,p0=result2[LOAD_LATENCY],word2[LOAD_LATENCY+1]
(p48) adds carry2=1,carry2
(ELD_1) cmp.ltu pC1[0],p0=result1[LOAD_LATENCY],word1[LOAD_LATENCY+1]
(pC1[1])adds carry1=1,carry1
(ELD_1) cmp.ltu pC2[0],p0=result2[LOAD_LATENCY],word2[LOAD_LATENCY+1]
(pC2[1])adds carry2=1,carry2
(ELD) add result1[LOAD_LATENCY-1]=result1[LOAD_LATENCY],word1[LOAD_LATENCY]
(ELD) add result2[LOAD_LATENCY-1]=result2[LOAD_LATENCY],word2[LOAD_LATENCY]
2:
(p16) ld8 word1[0]=[first1],16
(p16) ld8 word2[0]=[first2],16
[2:]
(p[0]) ld8 word1[0]=[first1],16
(p[0]) ld8 word2[0]=[first2],16
br.ctop.sptk 1b
;;
// Since len is a 32-bit value, carry cannot be larger than
// a 64-bit value.
(p32) adds carry1=1,carry1 // since we miss the last one
(p48) adds carry2=1,carry2
// Since len is a 32-bit value, carry cannot be larger than a 64-bit value.
(pC1[1])adds carry1=1,carry1 // since we miss the last one
(pC2[1])adds carry2=1,carry2
;;
add result1[LOAD_LATENCY+1]=result1[LOAD_LATENCY+1],carry1
add result2[LOAD_LATENCY+1]=result2[LOAD_LATENCY+1],carry2
......@@ -263,18 +248,15 @@ GLOBAL_ENTRY(do_csum)
(p6) adds result1[0]=1,result1[0]
;;
.do_csum_exit:
movl tmp3=0xffffffff
;;
// XXX Fixme
//
// now fold 64 into 16 bits taking care of carry
// that's not very good because it has lots of sequentiality
//
and tmp1=result1[0],tmp3
mov tmp3=0xffff
zxt4 tmp1=result1[0]
shr.u tmp2=result1[0],32
;;
add result1[0]=tmp1,tmp2
shr.u tmp3=tmp3,16
;;
and tmp1=result1[0],tmp3
shr.u tmp2=result1[0],16
......
......@@ -9,8 +9,8 @@
* in1: byte value to use for storing
* in2: length of the buffer
*
* Copyright (C) 1999, 2001 Hewlett-Packard Co
* Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999, 2001, 2002 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#include <asm/asmmacro.h>
......@@ -30,7 +30,19 @@
#define saved_lc r20
#define tmp r21
GLOBAL_ENTRY(memset)
GLOBAL_ENTRY(__bzero)
.prologue
.save ar.pfs, saved_pfs
alloc saved_pfs=ar.pfs,0,0,3,0
mov out2=out1
mov out1=0
/* FALL THROUGH (explicit NOPs so that next alloc is preceded by stop bit!) */
nop.m 0
nop.f 0
nop.i 0
;;
END(__bzero)
GLOBAL_ENTRY(__memset_generic)
.prologue
.save ar.pfs, saved_pfs
alloc saved_pfs=ar.pfs,3,0,0,0 // cnt is sink here
......@@ -105,4 +117,7 @@ GLOBAL_ENTRY(memset)
;;
(p6) st1 [buf]=val // only 1 byte left
br.ret.sptk.many rp
END(memset)
END(__memset_generic)
.global memset
memset = __memset_generic // alias needed for gcc
......@@ -10,6 +10,7 @@
* unnecessary i-cache flushing.
*/
#include <linux/cache.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pci.h>
......@@ -24,9 +25,6 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#define ALIGN(val, align) ((unsigned long) \
(((unsigned long) (val) + ((align) - 1)) & ~((align) - 1)))
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
......@@ -276,8 +274,11 @@ swiotlb_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_ha
int gfp = GFP_ATOMIC;
void *ret;
if (!hwdev || hwdev->dma_mask <= 0xffffffff)
gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
/*
* Alloc_consistent() is defined to return memory < 4GB, no matter what the DMA
* mask says.
*/
gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
ret = (void *)__get_free_pages(gfp, get_order(size));
if (!ret)
return NULL;
......
......@@ -120,15 +120,15 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (rgn_index(address) != rgn_index(vma->vm_start)
|| rgn_offset(address) >= RGN_MAP_LIMIT)
if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
|| REGION_OFFSET(address) >= RGN_MAP_LIMIT)
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
} else {
vma = prev_vma;
if (rgn_index(address) != rgn_index(vma->vm_start)
|| rgn_offset(address) >= RGN_MAP_LIMIT)
if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
|| REGION_OFFSET(address) >= RGN_MAP_LIMIT)
goto bad_area;
if (expand_backing_store(vma, address))
goto bad_area;
......
......@@ -40,10 +40,10 @@ static unsigned long totalram_pages;
static int pgt_cache_water[2] = { 25, 50 };
int
void
check_pgt_cache (void)
{
int low, high, freed = 0;
int low, high;
low = pgt_cache_water[0];
high = pgt_cache_water[1];
......@@ -51,12 +51,11 @@ check_pgt_cache (void)
if (pgtable_cache_size > high) {
do {
if (pgd_quicklist)
free_page((unsigned long)pgd_alloc_one_fast(0)), ++freed;
free_page((unsigned long)pgd_alloc_one_fast(0));
if (pmd_quicklist)
free_page((unsigned long)pmd_alloc_one_fast(0, 0)), ++freed;
free_page((unsigned long)pmd_alloc_one_fast(0, 0));
} while (pgtable_cache_size > low);
}
return freed;
}
/*
......@@ -348,8 +347,6 @@ paging_init (void)
{
unsigned long max_dma, zones_size[MAX_NR_ZONES];
clear_page((void *) ZERO_PAGE_ADDR);
/* initialize mem_map[] */
memset(zones_size, 0, sizeof(zones_size));
......
......@@ -16,10 +16,11 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/delay.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/pal.h>
#include <asm/delay.h>
#include <asm/tlbflush.h>
#define SUPPORTED_PGBITS ( \
1 << _PAGE_SIZE_256M | \
......@@ -79,7 +80,7 @@ wrap_mmu_context (struct mm_struct *mm)
flush_tlb_all();
}
static inline void
void
ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
{
static spinlock_t ptcg_lock = SPIN_LOCK_UNLOCKED;
......
......@@ -28,11 +28,6 @@ SECTIONS
.text : AT(ADDR(.text) - PAGE_OFFSET)
{
*(.text.ivt)
/* these are not really text pages, but they need to be page aligned: */
*(__special_page_section)
__start_gate_section = .;
*(.text.gate)
__stop_gate_section = .;
*(.text)
}
.text2 : AT(ADDR(.text2) - PAGE_OFFSET)
......@@ -120,6 +115,13 @@ SECTIONS
.data.init_task : AT(ADDR(.data.init_task) - PAGE_OFFSET)
{ *(.data.init_task) }
.data.page_aligned : AT(ADDR(.data.page_aligned) - PAGE_OFFSET)
{ *(__special_page_section)
__start_gate_section = .;
*(.text.gate)
__stop_gate_section = .;
}
. = ALIGN(SMP_CACHE_BYTES);
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET)
{ *(.data.cacheline_aligned) }
......
......@@ -931,6 +931,9 @@
121a NetServer SMIC Controller
121b NetServer Legacy COM Port Decoder
121c NetServer PCI COM Port Decoder
1229 zx1 System Bus Adapter
122a zx1 I/O Controller
122e zx1 Local Bus Adapter
2910 E2910A
2925 E2925A
103e Solliday Engineering
......
#ifndef _ASM_IA64_ACPI_EXT_H
#define _ASM_IA64_ACPI_EXT_H
/*
* Advanced Configuration and Power Infterface
* Based on 'ACPI Specification 1.0b' Febryary 2, 1999
* and 'IA-64 Extensions to the ACPI Specification' Rev 0.6
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2000 Intel Corp.
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* ACPI 2.0 specification
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/mm.h>
#pragma pack(1)
#define ACPI_RSDP_SIG "RSD PTR " /* Trailing space required */
#define ACPI_RSDP_SIG_LEN 8
typedef struct {
char signature[8];
u8 checksum;
char oem_id[6];
u8 revision;
u32 rsdt;
u32 length;
struct acpi_xsdt *xsdt;
u8 ext_checksum;
u8 reserved[3];
} acpi20_rsdp_t;
typedef struct {
char signature[4];
u32 length;
u8 revision;
u8 checksum;
char oem_id[6];
char oem_table_id[8];
u32 oem_revision;
u32 creator_id;
u32 creator_revision;
} acpi_desc_table_hdr_t;
#define ACPI_RSDT_SIG "RSDT"
#define ACPI_RSDT_SIG_LEN 4
typedef struct {
acpi_desc_table_hdr_t header;
u8 reserved[4];
u32 entry_ptrs[1]; /* Not really . . . */
} acpi20_rsdt_t;
#define ACPI_XSDT_SIG "XSDT"
#define ACPI_XSDT_SIG_LEN 4
typedef struct acpi_xsdt {
acpi_desc_table_hdr_t header;
unsigned long entry_ptrs[1]; /* Not really . . . */
} acpi_xsdt_t;
/* Common structures for ACPI 2.0 and 0.71 */
typedef struct acpi_entry_iosapic {
u8 type;
u8 length;
u8 id;
u8 reserved;
u32 irq_base; /* start of IRQ's this IOSAPIC is responsible for. */
unsigned long address; /* Address of this IOSAPIC */
} acpi_entry_iosapic_t;
/* Local SAPIC flags */
#define LSAPIC_ENABLED (1<<0)
#define LSAPIC_PERFORMANCE_RESTRICTED (1<<1)
#define LSAPIC_PRESENT (1<<2)
/* Defines legacy IRQ->pin mapping */
typedef struct {
u8 type;
u8 length;
u8 bus; /* Constant 0 == ISA */
u8 isa_irq; /* ISA IRQ # */
u32 pin; /* called vector in spec; really IOSAPIC pin number */
u16 flags; /* Edge/Level trigger & High/Low active */
} acpi_entry_int_override_t;
#define INT_OVERRIDE_ACTIVE_LOW 0x03
#define INT_OVERRIDE_LEVEL_TRIGGER 0x0d
/* IA64 ext 0.71 */
typedef struct {
char signature[8];
u8 checksum;
char oem_id[6];
char reserved; /* Must be 0 */
struct acpi_rsdt *rsdt;
} acpi_rsdp_t;
typedef struct acpi_rsdt {
acpi_desc_table_hdr_t header;
u8 reserved[4];
unsigned long entry_ptrs[1]; /* Not really . . . */
} acpi_rsdt_t;
#define ACPI_SAPIC_SIG "SPIC"
#define ACPI_SAPIC_SIG_LEN 4
typedef struct {
acpi_desc_table_hdr_t header;
u8 reserved[4];
unsigned long interrupt_block;
} acpi_sapic_t;
/* SAPIC structure types */
#define ACPI_ENTRY_LOCAL_SAPIC 0
#define ACPI_ENTRY_IO_SAPIC 1
#define ACPI_ENTRY_INT_SRC_OVERRIDE 2
#define ACPI_ENTRY_PLATFORM_INT_SOURCE 3 /* Unimplemented */
typedef struct acpi_entry_lsapic {
u8 type;
u8 length;
u16 acpi_processor_id;
u16 flags;
u8 id;
u8 eid;
} acpi_entry_lsapic_t;
typedef struct {
u8 type;
u8 length;
u16 flags;
u8 int_type;
u8 id;
u8 eid;
u8 iosapic_vector;
u8 reserved[4];
u32 global_vector;
} acpi_entry_platform_src_t;
/* ACPI 2.0 with 1.3 errata specific structures */
#define ACPI_MADT_SIG "APIC"
#define ACPI_MADT_SIG_LEN 4
typedef struct {
acpi_desc_table_hdr_t header;
u32 lapic_address;
u32 flags;
} acpi_madt_t;
/* acpi 2.0 MADT flags */
#define MADT_PCAT_COMPAT (1<<0)
/* acpi 2.0 MADT structure types */
#define ACPI20_ENTRY_LOCAL_APIC 0
#define ACPI20_ENTRY_IO_APIC 1
#define ACPI20_ENTRY_INT_SRC_OVERRIDE 2
#define ACPI20_ENTRY_NMI_SOURCE 3
#define ACPI20_ENTRY_LOCAL_APIC_NMI 4
#define ACPI20_ENTRY_LOCAL_APIC_ADDR_OVERRIDE 5
#define ACPI20_ENTRY_IO_SAPIC 6
#define ACPI20_ENTRY_LOCAL_SAPIC 7
#define ACPI20_ENTRY_PLATFORM_INT_SOURCE 8
typedef struct acpi20_entry_lsapic {
u8 type;
u8 length;
u8 acpi_processor_id;
u8 id;
u8 eid;
u8 reserved[3];
u32 flags;
} acpi20_entry_lsapic_t;
typedef struct acpi20_entry_lapic_addr_override {
u8 type;
u8 length;
u8 reserved[2];
unsigned long lapic_address;
} acpi20_entry_lapic_addr_override_t;
typedef struct {
u8 type;
u8 length;
u16 flags;
u8 int_type;
u8 id;
u8 eid;
u8 iosapic_vector;
u32 global_vector;
} acpi20_entry_platform_src_t;
/* constants for interrupt routing API for device drivers */
#define ACPI20_ENTRY_PIS_PMI 1
#define ACPI20_ENTRY_PIS_INIT 2
#define ACPI20_ENTRY_PIS_CPEI 3
#define ACPI_MAX_PLATFORM_IRQS 4
#define ACPI_SPCRT_SIG "SPCR"
#define ACPI_SPCRT_SIG_LEN 4
#define ACPI_DBGPT_SIG "DBGP"
#define ACPI_DBGPT_SIG_LEN 4
extern int acpi20_parse(acpi20_rsdp_t *);
extern int acpi20_early_parse(acpi20_rsdp_t *);
extern int acpi_parse(acpi_rsdp_t *);
extern const char *acpi_get_sysname (void);
extern int acpi_request_vector(u32 int_type);
extern void (*acpi_idle) (void); /* power-management idle function, if any */
#ifdef CONFIG_NUMA
extern cnodeid_t paddr_to_nid(unsigned long paddr);
#endif
/*
* ACPI 2.0 SRAT Table
* http://www.microsoft.com/HWDEV/design/SRAT.htm
*/
typedef struct acpi_srat {
acpi_desc_table_hdr_t header;
u32 table_revision;
u64 reserved;
} acpi_srat_t;
typedef struct srat_cpu_affinity {
u8 type;
u8 length;
u8 proximity_domain;
u8 apic_id;
u32 flags;
u8 local_sapic_eid;
u8 reserved[7];
} srat_cpu_affinity_t;
typedef struct srat_memory_affinity {
u8 type;
u8 length;
u8 proximity_domain;
u8 reserved[5];
u32 base_addr_lo;
u32 base_addr_hi;
u32 length_lo;
u32 length_hi;
u32 memory_type;
u32 flags;
u64 reserved2;
} srat_memory_affinity_t;
/* ACPI 2.0 SRAT structure */
#define ACPI_SRAT_SIG "SRAT"
#define ACPI_SRAT_SIG_LEN 4
#define ACPI_SRAT_REVISION 1
#define SRAT_CPU_STRUCTURE 0
#define SRAT_MEMORY_STRUCTURE 1
/* Only 1 flag for cpu affinity structure! */
#define SRAT_CPU_FLAGS_ENABLED 0x00000001
#define SRAT_MEMORY_FLAGS_ENABLED 0x00000001
#define SRAT_MEMORY_FLAGS_HOTREMOVABLE 0x00000002
/* ACPI 2.0 address range types */
#define ACPI_ADDRESS_RANGE_MEMORY 1
#define ACPI_ADDRESS_RANGE_RESERVED 2
#define ACPI_ADDRESS_RANGE_ACPI 3
#define ACPI_ADDRESS_RANGE_NVS 4
#define NODE_ARRAY_INDEX(x) ((x) / 8) /* 8 bits/char */
#define NODE_ARRAY_OFFSET(x) ((x) % 8) /* 8 bits/char */
#define MAX_PXM_DOMAINS (256)
#ifdef CONFIG_DISCONTIGMEM
/*
* List of node memory chunks. Filled when parsing SRAT table to
* obtain information about memory nodes.
*/
struct node_memory_chunk_s {
unsigned long start_paddr;
unsigned long size;
int pxm; // proximity domain of node
int nid; // which cnode contains this chunk?
int bank; // which mem bank on this node
};
extern struct node_memory_chunk_s node_memory_chunk[PLAT_MAXCLUMPS]; // temporary?
struct node_cpuid_s {
u16 phys_id; /* id << 8 | eid */
int pxm; // proximity domain of cpu
int nid;
};
extern struct node_cpuid_s node_cpuid[NR_CPUS];
extern int pxm_to_nid_map[MAX_PXM_DOMAINS]; /* _PXM to logical node ID map */
extern int nid_to_pxm_map[PLAT_MAX_COMPACT_NODES]; /* logical node ID to _PXM map */
extern int numnodes; /* total number of nodes in system */
extern int num_memory_chunks; /* total number of memory chunks */
/*
* ACPI 2.0 SLIT Table
* http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
*/
typedef struct acpi_slit {
acpi_desc_table_hdr_t header;
u64 localities;
u8 entries[1]; /* dummy, real size = locality^2 */
} acpi_slit_t;
extern u8 acpi20_slit[PLAT_MAX_COMPACT_NODES * PLAT_MAX_COMPACT_NODES];
#define ACPI_SLIT_SIG "SLIT"
#define ACPI_SLIT_SIG_LEN 4
#define ACPI_SLIT_REVISION 1
#define ACPI_SLIT_LOCAL 10
#endif /* CONFIG_DISCONTIGMEM */
#pragma pack()
#endif /* _ASM_IA64_ACPI_EXT_H */
/*
* asm-ia64/acpi.h
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
#ifdef __KERNEL__
#define __acpi_map_table(phys_addr, size) __va(phys_addr)
int acpi_boot_init (char *cdline);
int acpi_find_rsdp (unsigned long *phys_addr);
int acpi_request_vector (u32 int_type);
int acpi_get_prt (struct pci_vector_struct **vectors, int *count);
int acpi_get_interrupt_model(int *type);
#ifdef CONFIG_DISCONTIGMEM
#define NODE_ARRAY_INDEX(x) ((x) / 8) /* 8 bits/char */
#define NODE_ARRAY_OFFSET(x) ((x) % 8) /* 8 bits/char */
#define MAX_PXM_DOMAINS (256)
#endif /* CONFIG_DISCONTIGMEM */
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/
#ifndef _ASM_IA64_ACPIKCFG_H
#define _ASM_IA64_ACPIKCFG_H
/*
* acpikcfg.h - ACPI based Kernel Configuration Manager External Interfaces
*
* Copyright (C) 2000 Intel Corp.
* Copyright (C) 2000 J.I. Lee <jung-ik.lee@intel.com>
*/
u32 __init acpi_cf_init (void * rsdp);
u32 __init acpi_cf_terminate (void );
u32 __init
acpi_cf_get_pci_vectors (
struct pci_vector_struct **vectors,
int *num_pci_vectors
);
#ifdef CONFIG_ACPI_KERNEL_CONFIG_DEBUG
void __init
acpi_cf_print_pci_vectors (
struct pci_vector_struct *vectors,
int num_pci_vectors
);
#endif
#endif /* _ASM_IA64_ACPIKCFG_H */
#ifndef _ASM_IA64_CACHEFLUSH_H
#define _ASM_IA64_CACHEFLUSH_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/bitops.h>
#include <asm/page.h>
/*
* Cache flushing routines. This is the kind of stuff that can be very expensive, so try
* to avoid them whenever possible.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &page->flags); \
} while (0)
extern void flush_icache_range (unsigned long start, unsigned long end);
#define flush_icache_user_range(vma, page, user_addr, len) \
do { \
unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
#endif /* _ASM_IA64_CACHEFLUSH_H */
......@@ -87,6 +87,8 @@ typedef struct {
#define EFI_MEMORY_RUNTIME 0x8000000000000000 /* range requires runtime mapping */
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
#define EFI_PAGE_SHIFT 12
typedef struct {
u32 type;
u32 pad;
......@@ -257,6 +259,7 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timeval *tv);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
extern u64 efi_get_iobase (void);
extern u32 efi_mem_type (u64 phys_addr);
/*
* Variable Attributes
......
......@@ -88,6 +88,7 @@ hw_resend_irq (struct hw_interrupt_type *h, unsigned int vector)
extern struct irq_desc _irq_desc[NR_IRQS];
#ifndef CONFIG_IA64_GENERIC
static inline struct irq_desc *
__ia64_irq_desc (unsigned int irq)
{
......@@ -105,6 +106,7 @@ __ia64_local_vector_to_irq (ia64_vector vec)
{
return (unsigned int) vec;
}
#endif
/*
* Next follows the irq descriptor interface. On IA-64, each CPU supports 256 interrupt
......
......@@ -31,14 +31,14 @@ static __inline__ int
ide_default_irq (ide_ioreg_t base)
{
switch (base) {
case 0x1f0: return isa_irq_to_vector(14);
case 0x170: return isa_irq_to_vector(15);
case 0x1e8: return isa_irq_to_vector(11);
case 0x168: return isa_irq_to_vector(10);
case 0x1e0: return isa_irq_to_vector(8);
case 0x160: return isa_irq_to_vector(12);
default:
return 0;
case 0x1f0: return isa_irq_to_vector(14);
case 0x170: return isa_irq_to_vector(15);
case 0x1e8: return isa_irq_to_vector(11);
case 0x168: return isa_irq_to_vector(10);
case 0x1e0: return isa_irq_to_vector(8);
case 0x160: return isa_irq_to_vector(12);
default:
return 0;
}
}
......@@ -46,14 +46,14 @@ static __inline__ ide_ioreg_t
ide_default_io_base (int index)
{
switch (index) {
case 0: return 0x1f0;
case 1: return 0x170;
case 2: return 0x1e8;
case 3: return 0x168;
case 4: return 0x1e0;
case 5: return 0x160;
default:
return 0;
case 0: return 0x1f0;
case 1: return 0x170;
case 2: return 0x1e8;
case 3: return 0x168;
case 4: return 0x1e0;
case 5: return 0x160;
default:
return 0;
}
}
......
......@@ -68,6 +68,8 @@ extern void machvec_noop (void);
# include <asm/machvec_hpsim.h>
# elif defined (CONFIG_IA64_DIG)
# include <asm/machvec_dig.h>
# elif defined (CONFIG_IA64_HP_ZX1)
# include <asm/machvec_hpzx1.h>
# elif defined (CONFIG_IA64_SGI_SN1)
# include <asm/machvec_sn1.h>
# elif defined (CONFIG_IA64_SGI_SN2)
......@@ -123,6 +125,7 @@ struct ia64_machine_vector {
ia64_mv_cmci_handler_t *cmci_handler;
ia64_mv_log_print_t *log_print;
ia64_mv_send_ipi_t *send_ipi;
ia64_mv_global_tlb_purge_t *global_tlb_purge;
ia64_mv_pci_dma_init *dma_init;
ia64_mv_pci_alloc_consistent *alloc_consistent;
ia64_mv_pci_free_consistent *free_consistent;
......@@ -149,6 +152,7 @@ struct ia64_machine_vector {
{ \
#name, \
platform_setup, \
platform_cpu_init, \
platform_irq_init, \
platform_pci_fixup, \
platform_map_nr, \
......
#ifndef _ASM_IA64_MACHVEC_HPZX1_h
#define _ASM_IA64_MACHVEC_HPZX1_h
extern ia64_mv_setup_t dig_setup;
extern ia64_mv_pci_fixup_t hpzx1_pci_fixup;
extern ia64_mv_map_nr_t map_nr_dense;
extern ia64_mv_pci_alloc_consistent sba_alloc_consistent;
extern ia64_mv_pci_free_consistent sba_free_consistent;
extern ia64_mv_pci_map_single sba_map_single;
extern ia64_mv_pci_unmap_single sba_unmap_single;
extern ia64_mv_pci_map_sg sba_map_sg;
extern ia64_mv_pci_unmap_sg sba_unmap_sg;
extern ia64_mv_pci_dma_address sba_dma_address;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define platform_name "hpzx1"
#define platform_setup dig_setup
#define platform_pci_fixup hpzx1_pci_fixup
#define platform_map_nr map_nr_dense
#define platform_pci_dma_init ((ia64_mv_pci_dma_init *) machvec_noop)
#define platform_pci_alloc_consistent sba_alloc_consistent
#define platform_pci_free_consistent sba_free_consistent
#define platform_pci_map_single sba_map_single
#define platform_pci_unmap_single sba_unmap_single
#define platform_pci_map_sg sba_map_sg
#define platform_pci_unmap_sg sba_unmap_sg
#define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop)
#define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop)
#define platform_pci_dma_address sba_dma_address
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
#ifndef _ASM_IA64_MACHVEC_HPZX1_h
#define _ASM_IA64_MACHVEC_HPZX1_h
extern ia64_mv_setup_t dig_setup;
extern ia64_mv_pci_fixup_t hpzx1_pci_fixup;
extern ia64_mv_map_nr_t map_nr_dense;
extern ia64_mv_pci_alloc_consistent sba_alloc_consistent;
extern ia64_mv_pci_free_consistent sba_free_consistent;
extern ia64_mv_pci_map_single sba_map_single;
extern ia64_mv_pci_unmap_single sba_unmap_single;
extern ia64_mv_pci_map_sg sba_map_sg;
extern ia64_mv_pci_unmap_sg sba_unmap_sg;
extern ia64_mv_pci_dma_address sba_dma_address;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define platform_name "hpzx1"
#define platform_setup dig_setup
#define platform_pci_fixup hpzx1_pci_fixup
#define platform_map_nr map_nr_dense
#define platform_pci_dma_init ((ia64_mv_pci_dma_init *) machvec_noop)
#define platform_pci_alloc_consistent sba_alloc_consistent
#define platform_pci_free_consistent sba_free_consistent
#define platform_pci_map_single sba_map_single
#define platform_pci_unmap_single sba_unmap_single
#define platform_pci_map_sg sba_map_sg
#define platform_pci_unmap_sg sba_unmap_sg
#define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop)
#define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop)
#define platform_pci_dma_address sba_dma_address
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
......@@ -5,6 +5,11 @@
#include <asm/machvec.h>
extern ia64_mv_send_ipi_t ia64_send_ipi;
extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
extern ia64_mv_irq_desc __ia64_irq_desc;
extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
extern ia64_mv_inb_t __ia64_inb;
extern ia64_mv_inw_t __ia64_inw;
extern ia64_mv_inl_t __ia64_inl;
......
......@@ -51,6 +51,9 @@ ia64_module_init (struct module *mod)
return 0;
archdata = (struct archdata *)(mod->archdata_start);
if (archdata->unw_start == 0)
return 0;
/*
* Make sure the unwind pointers are sane.
*/
......
......@@ -6,8 +6,8 @@
* This file was generated by arch/ia64/tools/print_offsets.awk.
*
*/
#define IA64_TASK_SIZE 3936 /* 0xf60 */
#define IA64_THREAD_INFO_SIZE 24 /* 0x18 */
#define IA64_TASK_SIZE 3952 /* 0xf70 */
#define IA64_THREAD_INFO_SIZE 32 /* 0x20 */
#define IA64_PT_REGS_SIZE 400 /* 0x190 */
#define IA64_SWITCH_STACK_SIZE 560 /* 0x230 */
#define IA64_SIGINFO_SIZE 128 /* 0x80 */
......@@ -15,7 +15,7 @@
#define SIGFRAME_SIZE 2816 /* 0xb00 */
#define UNW_FRAME_INFO_SIZE 448 /* 0x1c0 */
#define IA64_TASK_THREAD_KSP_OFFSET 1480 /* 0x5c8 */
#define IA64_TASK_THREAD_KSP_OFFSET 1496 /* 0x5d8 */
#define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */
#define IA64_PT_REGS_CR_IIP_OFFSET 8 /* 0x8 */
#define IA64_PT_REGS_CR_IFS_OFFSET 16 /* 0x10 */
......
......@@ -20,6 +20,11 @@
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int seg, int bus);
extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
struct pci_dev;
/*
......
#ifndef _ASM_IA64_PERCPU_H
#define _ASM_IA64_PERCPU_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifdef __ASSEMBLY__
#define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
#else /* !__ASSEMBLY__ */
#include <linux/threads.h>
extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu(var, cpu) (*(__typeof__(&(var))) ((void *) &(var) + __per_cpu_offset[cpu]))
#define this_cpu(var) (var)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_PERCPU_H */
......@@ -150,7 +150,7 @@ pte_free_kernel (pte_t *pte)
free_page((unsigned long) pte);
}
extern int do_check_pgt_cache (int, int);
extern void check_pgt_cache (void);
/*
* IA-64 doesn't have any external MMU info: the page tables contain all the necessary
......@@ -177,69 +177,4 @@ update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
/*
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
*/
/*
* Flush everything (kernel mapping may also have changed due to
* vmalloc/vfree).
*/
extern void __flush_tlb_all (void);
#ifdef CONFIG_SMP
extern void smp_flush_tlb_all (void);
# define flush_tlb_all() smp_flush_tlb_all()
#else
# define flush_tlb_all() __flush_tlb_all()
#endif
/*
* Flush a specified user mapping
*/
static inline void
flush_tlb_mm (struct mm_struct *mm)
{
if (mm) {
mm->context = 0;
if (mm == current->active_mm) {
/* This is called, e.g., as a result of exec(). */
get_new_mmu_context(mm);
reload_context(mm);
}
}
}
extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
/*
* Page-granular tlb flush.
*/
static inline void
flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
{
#ifdef CONFIG_SMP
flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
#else
if (vma->vm_mm == current->active_mm)
asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
#endif
}
/*
* Flush the TLB entries mapping the virtually mapped linear page
* table corresponding to address range [START-END).
*/
static inline void
flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
{
struct vm_area_struct vma;
if (rgn_index(start) != rgn_index(end))
printk("flush_tlb_pgtables: can't flush across regions!!\n");
vma.vm_mm = mm;
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
}
#endif /* _ASM_IA64_PGALLOC_H */
......@@ -121,6 +121,7 @@
# ifndef __ASSEMBLY__
#include <asm/bitops.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
......@@ -290,30 +291,6 @@ ia64_phys_addr_valid (unsigned long addr)
# define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
#endif
/*
* Return the region index for virtual address ADDRESS.
*/
static inline unsigned long
rgn_index (unsigned long address)
{
ia64_va a;
a.l = address;
return a.f.reg;
}
/*
* Return the region offset for virtual address ADDRESS.
*/
static inline unsigned long
rgn_offset (unsigned long address)
{
ia64_va a;
a.l = address;
return a.f.off;
}
static inline unsigned long
pgd_index (unsigned long address)
{
......@@ -440,33 +417,6 @@ extern void paging_init (void);
#define io_remap_page_range remap_page_range /* XXX is this right? */
/*
* Now for some cache flushing routines. This is the kind of stuff that can be very
* expensive, so try to avoid them whenever possible.
*/
/* Caches aren't brain-dead on the IA-64. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &page->flags); \
} while (0)
extern void flush_icache_range (unsigned long start, unsigned long end);
#define flush_icache_user_range(vma, page, user_addr, len) \
do { \
unsigned long _addr = page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
......
......@@ -15,7 +15,7 @@
#include <linux/config.h>
#include <linux/compiler.h>
#include <linux/percpu.h>
#include <asm/ptrace.h>
#include <asm/kregs.h>
......@@ -186,10 +186,6 @@
*/
#define IA64_USEC_PER_CYC_SHIFT 41
#define __HAVE_ARCH_PER_CPU
#define THIS_CPU(var) (var)
#ifndef __ASSEMBLY__
#include <linux/threads.h>
......@@ -202,11 +198,6 @@
#include <asm/unwind.h>
#include <asm/atomic.h>
extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu(var, cpu) (*(__typeof__(&(var))) ((void *) &(var) + __per_cpu_offset[cpu]))
#define this_cpu(var) (var)
/* like above but expressed as bitfields for more efficient access: */
struct ia64_psr {
__u64 reserved0 : 1;
......
......@@ -5,8 +5,8 @@
* Here is where we want to put optimized versions of the string
* routines.
*
* Copyright (C) 1998-2000 Hewlett-Packard Co
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1998-2000, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h> /* remove this once we remove the A-step workaround... */
......@@ -17,7 +17,21 @@
#define __HAVE_ARCH_BCOPY 1 /* see arch/ia64/lib/memcpy.S */
extern __kernel_size_t strlen (const char *);
extern void *memset (void *, int, __kernel_size_t);
extern void *memcpy (void *, const void *, __kernel_size_t);
extern void *__memset_generic (void *, int, __kernel_size_t);
extern void __bzero (void *, __kernel_size_t);
#define memset(s, c, count) \
({ \
void *_s = (s); \
int _c = (c); \
__kernel_size_t _count = (count); \
\
if (__builtin_constant_p(_c) && _c == 0) \
__bzero(_s, _count); \
else \
__memset_generic(_s, _c, _count); \
})
#endif /* _ASM_IA64_STRING_H */
......@@ -18,14 +18,6 @@
#define KERNEL_START (PAGE_OFFSET + 68*1024*1024)
/*
* The following #defines must match with vmlinux.lds.S:
*/
#define IVT_ADDR (KERNEL_START)
#define IVT_END_ADDR (KERNEL_START + 0x8000)
#define ZERO_PAGE_ADDR PAGE_ALIGN(IVT_END_ADDR)
#define SWAPPER_PGD_ADDR (ZERO_PAGE_ADDR + 1*PAGE_SIZE)
#define GATE_ADDR (0xa000000000000000 + PAGE_SIZE)
#define PERCPU_ADDR (0xa000000000000000 + 2*PAGE_SIZE)
......
......@@ -12,7 +12,10 @@
#define TI_EXEC_DOMAIN 0x00
#define TI_FLAGS 0x08
#define TI_CPU 0x0c
#define TI_ADDR_LIMI 0x10
#define TI_ADDR_LIMIT 0x10
#define TI_PRE_COUNT 0x18
#define PREEMPT_ACTIVE 0x4000000
#ifndef __ASSEMBLY__
......@@ -26,6 +29,7 @@ struct thread_info {
__u32 flags; /* thread_info flags (see TIF_*) */
__u32 cpu; /* current CPU */
mm_segment_t addr_limit; /* user-level address space limit */
__s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
};
#define INIT_THREAD_SIZE /* tell sched.h not to declare the thread_union */
......@@ -37,6 +41,7 @@ struct thread_info {
flags: 0, \
cpu: 0, \
addr_limit: KERNEL_DS, \
preempt_count: 0, \
}
/* how to get the thread information struct from C */
......
#ifndef _ASM_IA64_TLBFLUSH_H
#define _ASM_IA64_TLBFLUSH_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
/*
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
*/
/*
* Flush everything (kernel mapping may also have changed due to
* vmalloc/vfree).
*/
extern void __flush_tlb_all (void);
#ifdef CONFIG_SMP
extern void smp_flush_tlb_all (void);
# define flush_tlb_all() smp_flush_tlb_all()
#else
# define flush_tlb_all() __flush_tlb_all()
#endif
/*
* Flush a specified user mapping
*/
static inline void
flush_tlb_mm (struct mm_struct *mm)
{
if (mm) {
mm->context = 0;
if (mm == current->active_mm) {
/* This is called, e.g., as a result of exec(). */
get_new_mmu_context(mm);
reload_context(mm);
}
}
}
extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
/*
* Page-granular tlb flush.
*/
static inline void
flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
{
#ifdef CONFIG_SMP
flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
#else
if (vma->vm_mm == current->active_mm)
asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
#endif
}
/*
* Flush the TLB entries mapping the virtually mapped linear page
* table corresponding to address range [START-END).
*/
static inline void
flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
{
struct vm_area_struct vma;
if (REGION_NUMBER(start) != REGION_NUMBER(end))
printk("flush_tlb_pgtables: can't flush across regions!!\n");
vma.vm_mm = mm;
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
}
#define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
#endif /* _ASM_IA64_TLBFLUSH_H */
......@@ -56,8 +56,9 @@
* address TASK_SIZE is never valid. We also need to make sure that the address doesn't
* point inside the virtually mapped linear page table.
*/
#define __access_ok(addr,size,segment) (((unsigned long) (addr)) <= (segment).seg \
&& ((segment).seg == KERNEL_DS.seg || rgn_offset((unsigned long) (addr)) < RGN_MAP_LIMIT))
#define __access_ok(addr,size,segment) (((unsigned long) (addr)) <= (segment).seg \
&& ((segment).seg == KERNEL_DS.seg \
|| REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))
#define access_ok(type,addr,size) __access_ok((addr),(size),get_fs())
static inline int
......
......@@ -219,6 +219,9 @@
#define __NR_lremovexattr 1227
#define __NR_fremovexattr 1228
#define __NR_tkill 1229
#define __NR_futex 1230
#define __NR_sched_setaffinity 1231
#define __NR_sched_getaffinity 1232
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
......
......@@ -11,6 +11,8 @@
extern void setup_serial_acpi(void *);
#define ACPI_SIG_LEN 4
/* ACPI table signatures */
#define ACPI_SPCRT_SIGNATURE "SPCR"
#define ACPI_DBGPT_SIGNATURE "DBGP"
......
......@@ -510,6 +510,9 @@
#define PCI_DEVICE_ID_HP_DIVA1 0x1049
#define PCI_DEVICE_ID_HP_DIVA2 0x104A
#define PCI_DEVICE_ID_HP_SP2_0 0x104B
#define PCI_DEVICE_ID_HP_ZX1_SBA 0x1229
#define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a
#define PCI_DEVICE_ID_HP_ZX1_LBA 0x122e
#define PCI_VENDOR_ID_PCTECH 0x1042
#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment