Commit 8beb1642 authored by David Mosberger's avatar David Mosberger

ia64: Manual merge.

parents 12ebbff8 cb1a895f
...@@ -26,7 +26,7 @@ CFLAGS_KERNEL := -mconstant-gp ...@@ -26,7 +26,7 @@ CFLAGS_KERNEL := -mconstant-gp
GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.') GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
ifneq ($(GCC_VERSION),2) ifneq ($(GCC_VERSION),2)
CFLAGS += -frename-registers --param max-inline-insns=2000 CFLAGS += -frename-registers --param max-inline-insns=5000
endif endif
ifeq ($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y) ifeq ($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y)
......
...@@ -64,7 +64,8 @@ if [ "$CONFIG_MCKINLEY" = "y" ]; then ...@@ -64,7 +64,8 @@ if [ "$CONFIG_MCKINLEY" = "y" ]; then
fi fi
fi fi
if [ "$CONFIG_IA64_GENERIC" = "y" -o "$CONFIG_IA64_DIG" = "y" -o "$CONFIG_IA64_HP_ZX1" = "y" ]; then if [ "$CONFIG_IA64_GENERIC" = "y" -o "$CONFIG_IA64_DIG" = "y" -o "$CONFIG_IA64_HP_ZX1" = "y" ];
then
bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA
define_bool CONFIG_PM y define_bool CONFIG_PM y
fi fi
...@@ -99,21 +100,21 @@ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF ...@@ -99,21 +100,21 @@ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/acpi/Config.in
source drivers/acpi/Config.in
bool 'PCI support' CONFIG_PCI bool 'PCI support' CONFIG_PCI
source drivers/pci/Config.in source drivers/pci/Config.in
bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
if [ "$CONFIG_HOTPLUG" = "y" ]; then if [ "$CONFIG_HOTPLUG" = "y" ]; then
source drivers/hotplug/Config.in
source drivers/pcmcia/Config.in source drivers/pcmcia/Config.in
else else
define_bool CONFIG_PCMCIA n define_bool CONFIG_PCMCIA n
fi fi
source drivers/parport/Config.in
source drivers/parport/Config.in
fi # !HP_SIM fi # !HP_SIM
endmenu endmenu
...@@ -124,38 +125,17 @@ fi ...@@ -124,38 +125,17 @@ fi
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/mtd/Config.in
source drivers/pnp/Config.in
source drivers/block/Config.in
source drivers/ieee1394/Config.in
source drivers/message/i2o/Config.in
source drivers/md/Config.in
source drivers/message/fusion/Config.in
mainmenu_option next_comment
comment 'ATA/ATAPI/MFM/RLL support'
tristate 'ATA/ATAPI/MFM/RLL support' CONFIG_IDE
if [ "$CONFIG_IDE" != "n" ]; then
source drivers/ide/Config.in
else
define_bool CONFIG_BLK_DEV_HD n
fi
endmenu
else # ! HP_SIM
mainmenu_option next_comment
comment 'Block devices'
tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then
int ' Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096
fi
endmenu
fi # !HP_SIM
mainmenu_option next_comment mainmenu_option next_comment
comment 'SCSI support' comment 'SCSI support'
...@@ -168,8 +148,7 @@ fi ...@@ -168,8 +148,7 @@ fi
endmenu endmenu
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
if [ "$CONFIG_NET" = "y" ]; then
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment mainmenu_option next_comment
comment 'Network device support' comment 'Network device support'
...@@ -178,21 +157,17 @@ if [ "$CONFIG_NET" = "y" ]; then ...@@ -178,21 +157,17 @@ if [ "$CONFIG_NET" = "y" ]; then
source drivers/net/Config.in source drivers/net/Config.in
fi fi
endmenu endmenu
fi
source net/ax25/Config.in
source drivers/isdn/Config.in
mainmenu_option next_comment
comment 'CD-ROM drivers (not for SCSI or IDE/ATAPI drives)'
bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then
source drivers/cdrom/Config.in
fi
endmenu
fi
bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
fi # !HP_SIM fi # !HP_SIM
# #
...@@ -220,21 +195,18 @@ fi ...@@ -220,21 +195,18 @@ fi
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
mainmenu_option next_comment mainmenu_option next_comment
comment 'Sound' comment 'Sound'
tristate 'Sound card support' CONFIG_SOUND tristate 'Sound card support' CONFIG_SOUND
if [ "$CONFIG_SOUND" != "n" ]; then if [ "$CONFIG_SOUND" != "n" ]; then
source sound/Config.in source sound/Config.in
fi fi
endmenu endmenu
source drivers/usb/Config.in
source lib/Config.in
source net/bluetooth/Config.in
source drivers/usb/Config.in
source lib/Config.in
source net/bluetooth/Config.in
fi # !HP_SIM fi # !HP_SIM
if [ "$CONFIG_IA64_HP_SIM" != "n" -o "$CONFIG_IA64_GENERIC" != "n" ]; then if [ "$CONFIG_IA64_HP_SIM" != "n" -o "$CONFIG_IA64_GENERIC" != "n" ]; then
......
This diff is collapsed.
...@@ -30,12 +30,12 @@ static void simcons_write (struct console *, const char *, unsigned); ...@@ -30,12 +30,12 @@ static void simcons_write (struct console *, const char *, unsigned);
static kdev_t simcons_console_device (struct console *); static kdev_t simcons_console_device (struct console *);
struct console hpsim_cons = { struct console hpsim_cons = {
name: "simcons", .name = "simcons",
write: simcons_write, .write = simcons_write,
device: simcons_console_device, .device = simcons_console_device,
setup: simcons_init, .setup = simcons_init,
flags: CON_PRINTBUFFER, .flags = CON_PRINTBUFFER,
index: -1, .index = -1,
}; };
static int static int
......
...@@ -22,14 +22,14 @@ hpsim_irq_noop (unsigned int irq) ...@@ -22,14 +22,14 @@ hpsim_irq_noop (unsigned int irq)
} }
static struct hw_interrupt_type irq_type_hp_sim = { static struct hw_interrupt_type irq_type_hp_sim = {
typename: "hpsim", .typename = "hpsim",
startup: hpsim_irq_startup, .startup = hpsim_irq_startup,
shutdown: hpsim_irq_noop, .shutdown = hpsim_irq_noop,
enable: hpsim_irq_noop, .enable = hpsim_irq_noop,
disable: hpsim_irq_noop, .disable = hpsim_irq_noop,
ack: hpsim_irq_noop, .ack = hpsim_irq_noop,
end: hpsim_irq_noop, .end = hpsim_irq_noop,
set_affinity: (void (*)(unsigned int, unsigned long)) hpsim_irq_noop, .set_affinity = (void (*)(unsigned int, unsigned long)) hpsim_irq_noop,
}; };
void __init void __init
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/serialP.h> #include <linux/serialP.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#ifdef CONFIG_KDB #ifdef CONFIG_KDB
......
#define MACHVEC_PLATFORM_NAME hpzx1 #define MACHVEC_PLATFORM_NAME hpzx1
#include <asm/machvec_init.h> #include <asm/machvec_init.h>
#define MACHVEC_PLATFORM_NAME hpzx1
#include <asm/machvec_init.h>
...@@ -67,7 +67,7 @@ ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int ...@@ -67,7 +67,7 @@ ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int
} }
static struct vm_operations_struct ia32_shared_page_vm_ops = { static struct vm_operations_struct ia32_shared_page_vm_ops = {
nopage: ia32_install_shared_page .nopage =ia32_install_shared_page
}; };
void void
......
...@@ -56,6 +56,8 @@ asm (".weak iosapic_version"); ...@@ -56,6 +56,8 @@ asm (".weak iosapic_version");
void (*pm_idle) (void); void (*pm_idle) (void);
void (*pm_power_off) (void); void (*pm_power_off) (void);
unsigned char acpi_kbd_controller_present = 1;
const char * const char *
acpi_get_sysname (void) acpi_get_sysname (void)
{ {
...@@ -206,7 +208,7 @@ struct acpi_table_madt * acpi_madt __initdata; ...@@ -206,7 +208,7 @@ struct acpi_table_madt * acpi_madt __initdata;
static int __init static int __init
acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header) acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header)
{ {
struct acpi_table_lapic_addr_ovr *lapic = NULL; struct acpi_table_lapic_addr_ovr *lapic;
lapic = (struct acpi_table_lapic_addr_ovr *) header; lapic = (struct acpi_table_lapic_addr_ovr *) header;
if (!lapic) if (!lapic)
...@@ -226,7 +228,7 @@ acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header) ...@@ -226,7 +228,7 @@ acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header)
static int __init static int __init
acpi_parse_lsapic (acpi_table_entry_header *header) acpi_parse_lsapic (acpi_table_entry_header *header)
{ {
struct acpi_table_lsapic *lsapic = NULL; struct acpi_table_lsapic *lsapic;
lsapic = (struct acpi_table_lsapic *) header; lsapic = (struct acpi_table_lsapic *) header;
if (!lsapic) if (!lsapic)
...@@ -262,7 +264,7 @@ acpi_parse_lsapic (acpi_table_entry_header *header) ...@@ -262,7 +264,7 @@ acpi_parse_lsapic (acpi_table_entry_header *header)
static int __init static int __init
acpi_parse_lapic_nmi (acpi_table_entry_header *header) acpi_parse_lapic_nmi (acpi_table_entry_header *header)
{ {
struct acpi_table_lapic_nmi *lacpi_nmi = NULL; struct acpi_table_lapic_nmi *lacpi_nmi;
lacpi_nmi = (struct acpi_table_lapic_nmi*) header; lacpi_nmi = (struct acpi_table_lapic_nmi*) header;
if (!lacpi_nmi) if (!lacpi_nmi)
...@@ -279,7 +281,7 @@ acpi_parse_lapic_nmi (acpi_table_entry_header *header) ...@@ -279,7 +281,7 @@ acpi_parse_lapic_nmi (acpi_table_entry_header *header)
static int __init static int __init
acpi_find_iosapic (int global_vector, u32 *irq_base, char **iosapic_address) acpi_find_iosapic (int global_vector, u32 *irq_base, char **iosapic_address)
{ {
struct acpi_table_iosapic *iosapic = NULL; struct acpi_table_iosapic *iosapic;
int ver = 0; int ver = 0;
int max_pin = 0; int max_pin = 0;
char *p = 0; char *p = 0;
...@@ -338,7 +340,7 @@ acpi_parse_iosapic (acpi_table_entry_header *header) ...@@ -338,7 +340,7 @@ acpi_parse_iosapic (acpi_table_entry_header *header)
static int __init static int __init
acpi_parse_plat_int_src (acpi_table_entry_header *header) acpi_parse_plat_int_src (acpi_table_entry_header *header)
{ {
struct acpi_table_plat_int_src *plintsrc = NULL; struct acpi_table_plat_int_src *plintsrc;
int vector = 0; int vector = 0;
u32 irq_base = 0; u32 irq_base = 0;
char *iosapic_address = NULL; char *iosapic_address = NULL;
...@@ -381,7 +383,7 @@ acpi_parse_plat_int_src (acpi_table_entry_header *header) ...@@ -381,7 +383,7 @@ acpi_parse_plat_int_src (acpi_table_entry_header *header)
static int __init static int __init
acpi_parse_int_src_ovr (acpi_table_entry_header *header) acpi_parse_int_src_ovr (acpi_table_entry_header *header)
{ {
struct acpi_table_int_src_ovr *p = NULL; struct acpi_table_int_src_ovr *p;
p = (struct acpi_table_int_src_ovr *) header; p = (struct acpi_table_int_src_ovr *) header;
if (!p) if (!p)
...@@ -404,7 +406,7 @@ acpi_parse_int_src_ovr (acpi_table_entry_header *header) ...@@ -404,7 +406,7 @@ acpi_parse_int_src_ovr (acpi_table_entry_header *header)
static int __init static int __init
acpi_parse_nmi_src (acpi_table_entry_header *header) acpi_parse_nmi_src (acpi_table_entry_header *header)
{ {
struct acpi_table_nmi_src *nmi_src = NULL; struct acpi_table_nmi_src *nmi_src;
nmi_src = (struct acpi_table_nmi_src*) header; nmi_src = (struct acpi_table_nmi_src*) header;
if (!nmi_src) if (!nmi_src)
...@@ -425,10 +427,6 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size) ...@@ -425,10 +427,6 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
return -EINVAL; return -EINVAL;
acpi_madt = (struct acpi_table_madt *) __va(phys_addr); acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
if (!acpi_madt) {
printk(KERN_WARNING PREFIX "Unable to map MADT\n");
return -ENODEV;
}
/* Get base address of IPI Message Block */ /* Get base address of IPI Message Block */
...@@ -442,6 +440,28 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size) ...@@ -442,6 +440,28 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
} }
static int __init
acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
{
struct acpi_table_header *fadt_header;
fadt_descriptor_rev2 *fadt;
if (!phys_addr || !size)
return -EINVAL;
fadt_header = (struct acpi_table_header *) __va(phys_addr);
if (fadt_header->revision != 3)
return -ENODEV; /* Only deal with ACPI 2.0 FADT */
fadt = (fadt_descriptor_rev2 *) fadt_header;
if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
acpi_kbd_controller_present = 0;
return 0;
}
int __init int __init
acpi_find_rsdp (unsigned long *rsdp_phys) acpi_find_rsdp (unsigned long *rsdp_phys)
{ {
...@@ -467,8 +487,8 @@ acpi_find_rsdp (unsigned long *rsdp_phys) ...@@ -467,8 +487,8 @@ acpi_find_rsdp (unsigned long *rsdp_phys)
static int __init static int __init
acpi_parse_spcr (unsigned long phys_addr, unsigned long size) acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
{ {
acpi_ser_t *spcr = NULL; acpi_ser_t *spcr;
unsigned long global_int = 0; unsigned long global_int;
if (!phys_addr || !size) if (!phys_addr || !size)
return -EINVAL; return -EINVAL;
...@@ -486,11 +506,6 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size) ...@@ -486,11 +506,6 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
*/ */
spcr = (acpi_ser_t *) __va(phys_addr); spcr = (acpi_ser_t *) __va(phys_addr);
if (!spcr) {
printk(KERN_WARNING PREFIX "Unable to map SPCR\n");
return -ENODEV;
}
setup_serial_acpi(spcr); setup_serial_acpi(spcr);
if (spcr->length < sizeof(acpi_ser_t)) if (spcr->length < sizeof(acpi_ser_t))
...@@ -527,11 +542,11 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size) ...@@ -527,11 +542,11 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
int __init int __init
acpi_boot_init (char *cmdline) acpi_boot_init (char *cmdline)
{ {
int result = 0; int result;
/* Initialize the ACPI boot-time table parser */ /* Initialize the ACPI boot-time table parser */
result = acpi_table_init(cmdline); result = acpi_table_init(cmdline);
if (0 != result) if (result)
return result; return result;
/* /*
...@@ -542,57 +557,49 @@ acpi_boot_init (char *cmdline) ...@@ -542,57 +557,49 @@ acpi_boot_init (char *cmdline)
* information -- the successor to MPS tables. * information -- the successor to MPS tables.
*/ */
result = acpi_table_parse(ACPI_APIC, acpi_parse_madt); if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) {
if (1 > result) printk(KERN_ERR PREFIX "Can't find MADT\n");
return result; goto skip_madt;
}
/* Local APIC */ /* Local APIC */
result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr); if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
if (0 > result) { acpi_parse_lapic_addr_ovr) < 0)
printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
return result;
}
result = acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic); if (acpi_table_parse_madt(ACPI_MADT_LSAPIC,
if (1 > result) { acpi_parse_lsapic) < 1)
printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries!\n"); printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n");
return -ENODEV;
}
result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi); if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI,
if (0 > result) { acpi_parse_lapic_nmi) < 0)
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
return result;
}
/* I/O APIC */ /* I/O APIC */
result = acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic); if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC,
if (1 > result) { acpi_parse_iosapic) < 1)
printk(KERN_ERR PREFIX "Error parsing MADT - no IOAPIC entries!\n"); printk(KERN_ERR PREFIX "Error parsing MADT - no IOAPIC entries\n");
return ((result == 0) ? -ENODEV : result);
}
/* System-Level Interrupt Routing */ /* System-Level Interrupt Routing */
result = acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src); if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC,
if (0 > result) { acpi_parse_plat_int_src) < 0)
printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n");
return result;
}
result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr); if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR,
if (0 > result) { acpi_parse_int_src_ovr) < 0)
printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
return result;
}
result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src); if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC,
if (0 > result) { acpi_parse_nmi_src) < 0)
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
return result; skip_madt:
}
/* FADT says whether a legacy keyboard controller is present. */
if (acpi_table_parse(ACPI_FACP, acpi_parse_fadt) < 1)
printk(KERN_ERR PREFIX "Can't find FADT\n");
#ifdef CONFIG_SERIAL_ACPI #ifdef CONFIG_SERIAL_ACPI
/* /*
...@@ -602,7 +609,7 @@ acpi_boot_init (char *cmdline) ...@@ -602,7 +609,7 @@ acpi_boot_init (char *cmdline)
* serial ports, EC, SMBus, etc. * serial ports, EC, SMBus, etc.
*/ */
acpi_table_parse(ACPI_SPCR, acpi_parse_spcr); acpi_table_parse(ACPI_SPCR, acpi_parse_spcr);
#endif /*CONFIG_SERIAL_ACPI*/ #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (available_cpus == 0) { if (available_cpus == 0) {
...@@ -625,9 +632,9 @@ acpi_boot_init (char *cmdline) ...@@ -625,9 +632,9 @@ acpi_boot_init (char *cmdline)
int __init int __init
acpi_get_prt (struct pci_vector_struct **vectors, int *count) acpi_get_prt (struct pci_vector_struct **vectors, int *count)
{ {
struct pci_vector_struct *vector = NULL; struct pci_vector_struct *vector;
struct list_head *node = NULL; struct list_head *node;
struct acpi_prt_entry *entry = NULL; struct acpi_prt_entry *entry;
int i = 0; int i = 0;
if (!vectors || !count) if (!vectors || !count)
......
...@@ -125,9 +125,79 @@ efi_gettimeofday (struct timeval *tv) ...@@ -125,9 +125,79 @@ efi_gettimeofday (struct timeval *tv)
tv->tv_usec = tm.nanosecond / 1000; tv->tv_usec = tm.nanosecond / 1000;
} }
static int
is_available_memory (efi_memory_desc_t *md)
{
if (!(md->attribute & EFI_MEMORY_WB))
return 0;
switch (md->type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
return 1;
}
return 0;
}
/*
* Trim descriptor MD so its starts at address START_ADDR. If the descriptor covers
* memory that is normally available to the kernel, issue a warning that some memory
* is being ignored.
*/
static void
trim_bottom (efi_memory_desc_t *md, u64 start_addr)
{
u64 num_skipped_pages;
if (md->phys_addr >= start_addr || !md->num_pages)
return;
num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT;
if (num_skipped_pages > md->num_pages)
num_skipped_pages = md->num_pages;
if (is_available_memory(md))
printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
"at 0x%lx\n", __FUNCTION__,
(num_skipped_pages << EFI_PAGE_SHIFT) >> 10,
md->phys_addr, start_addr - IA64_GRANULE_SIZE);
/*
* NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory
* descriptor list to become unsorted. In such a case, md->num_pages will be
* zero, so the Right Thing will happen.
*/
md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT;
md->num_pages -= num_skipped_pages;
}
static void
trim_top (efi_memory_desc_t *md, u64 end_addr)
{
u64 num_dropped_pages, md_end_addr;
md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
if (md_end_addr <= end_addr || !md->num_pages)
return;
num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT;
if (num_dropped_pages > md->num_pages)
num_dropped_pages = md->num_pages;
if (is_available_memory(md))
printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
"at 0x%lx\n", __FUNCTION__,
(num_dropped_pages << EFI_PAGE_SHIFT) >> 10,
md->phys_addr, end_addr);
md->num_pages -= num_dropped_pages;
}
/* /*
* Walks the EFI memory map and calls CALLBACK once for each EFI * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
* memory descriptor that has memory that is available for OS use. * has memory that is available for OS use.
*/ */
void void
efi_memmap_walk (efi_freemem_callback_t callback, void *arg) efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
...@@ -137,9 +207,9 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -137,9 +207,9 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
u64 start; u64 start;
u64 end; u64 end;
} prev, curr; } prev, curr;
void *efi_map_start, *efi_map_end, *p; void *efi_map_start, *efi_map_end, *p, *q;
efi_memory_desc_t *md; efi_memory_desc_t *md, *check_md;
u64 efi_desc_size, start, end; u64 efi_desc_size, start, end, granule_addr, first_non_wb_addr = 0;
efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
...@@ -147,24 +217,56 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -147,24 +217,56 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p; md = p;
switch (md->type) {
case EFI_LOADER_CODE: /* skip over non-WB memory descriptors; that's all we're interested in... */
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
if (!(md->attribute & EFI_MEMORY_WB)) if (!(md->attribute & EFI_MEMORY_WB))
continue; continue;
if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > first_non_wb_addr) {
/*
* Search for the next run of contiguous WB memory. Start search
* at first granule boundary covered by md.
*/
granule_addr = ((md->phys_addr + IA64_GRANULE_SIZE - 1)
& -IA64_GRANULE_SIZE);
first_non_wb_addr = granule_addr;
for (q = p; q < efi_map_end; q += efi_desc_size) {
check_md = q;
if (check_md->attribute & EFI_MEMORY_WB)
trim_bottom(md, granule_addr);
if (check_md->phys_addr < granule_addr)
continue;
if (!(check_md->attribute & EFI_MEMORY_WB))
break; /* hit a non-WB region; stop search */
if (check_md->phys_addr != first_non_wb_addr)
break; /* hit a memory hole; stop search */
first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT;
}
/* round it down to the previous granule-boundary: */
first_non_wb_addr &= -IA64_GRANULE_SIZE;
if (!(first_non_wb_addr > granule_addr))
continue; /* couldn't find enough contiguous memory */
}
/* BUG_ON((md->phys_addr >> IA64_GRANULE_SHIFT) < first_non_wb_addr); */
trim_top(md, first_non_wb_addr);
if (is_available_memory(md)) {
if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) { if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) {
if (md->phys_addr > mem_limit) if (md->phys_addr > mem_limit)
continue; continue;
md->num_pages = (mem_limit - md->phys_addr) >> EFI_PAGE_SHIFT; md->num_pages = (mem_limit - md->phys_addr) >> EFI_PAGE_SHIFT;
} }
if (md->num_pages == 0) {
printk("efi_memmap_walk: ignoring empty region at 0x%lx", if (md->num_pages == 0)
md->phys_addr);
continue; continue;
}
curr.start = PAGE_OFFSET + md->phys_addr; curr.start = PAGE_OFFSET + md->phys_addr;
curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT); curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT);
...@@ -187,10 +289,6 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -187,10 +289,6 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
prev = curr; prev = curr;
} }
} }
break;
default:
continue;
} }
} }
if (prev_valid) { if (prev_valid) {
...@@ -268,8 +366,9 @@ efi_map_pal_code (void) ...@@ -268,8 +366,9 @@ efi_map_pal_code (void)
*/ */
psr = ia64_clear_ic(); psr = ia64_clear_ic();
ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask, ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)), IA64_GRANULE_SHIFT); pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)),
ia64_set_psr(psr); IA64_GRANULE_SHIFT);
ia64_set_psr(psr); /* restore psr */
ia64_srlz_i(); ia64_srlz_i();
} }
} }
...@@ -376,7 +475,7 @@ efi_init (void) ...@@ -376,7 +475,7 @@ efi_init (void)
md = p; md = p;
printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n", printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
i, md->type, md->attribute, md->phys_addr, i, md->type, md->attribute, md->phys_addr,
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1, md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
md->num_pages >> (20 - EFI_PAGE_SHIFT)); md->num_pages >> (20 - EFI_PAGE_SHIFT));
} }
} }
......
...@@ -34,8 +34,8 @@ union init_thread { ...@@ -34,8 +34,8 @@ union init_thread {
} s; } s;
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)]; unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
} init_thread_union __attribute__((section(".data.init_task"))) = {{ } init_thread_union __attribute__((section(".data.init_task"))) = {{
task: INIT_TASK(init_thread_union.s.task), .task = INIT_TASK(init_thread_union.s.task),
thread_info: INIT_THREAD_INFO(init_thread_union.s.thread_info) .thread_info = INIT_THREAD_INFO(init_thread_union.s.thread_info)
}}; }};
asm (".global init_task; init_task = init_thread_union"); asm (".global init_task; init_task = init_thread_union");
...@@ -88,7 +88,7 @@ static struct { ...@@ -88,7 +88,7 @@ static struct {
static struct iosapic_irq { static struct iosapic_irq {
char *addr; /* base address of IOSAPIC */ char *addr; /* base address of IOSAPIC */
unsigned char base_irq; /* first irq assigned to this IOSAPIC */ unsigned int base_irq; /* first irq assigned to this IOSAPIC */
char pin; /* IOSAPIC pin (-1 => not an IOSAPIC irq) */ char pin; /* IOSAPIC pin (-1 => not an IOSAPIC irq) */
unsigned char dmode : 3; /* delivery mode (see iosapic.h) */ unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
unsigned char polarity : 1; /* interrupt polarity (see iosapic.h) */ unsigned char polarity : 1; /* interrupt polarity (see iosapic.h) */
...@@ -97,9 +97,9 @@ static struct iosapic_irq { ...@@ -97,9 +97,9 @@ static struct iosapic_irq {
static struct iosapic { static struct iosapic {
char *addr; /* base address of IOSAPIC */ char *addr; /* base address of IOSAPIC */
unsigned char pcat_compat; /* 8259 compatibility flag */ unsigned int base_irq; /* first irq assigned to this IOSAPIC */
unsigned char base_irq; /* first irq assigned to this IOSAPIC */
unsigned short max_pin; /* max input pin supported in this IOSAPIC */ unsigned short max_pin; /* max input pin supported in this IOSAPIC */
unsigned char pcat_compat; /* 8259 compatibility flag */
} iosapic_lists[256] __initdata; } iosapic_lists[256] __initdata;
static int num_iosapic = 0; static int num_iosapic = 0;
...@@ -322,14 +322,14 @@ iosapic_end_level_irq (unsigned int irq) ...@@ -322,14 +322,14 @@ iosapic_end_level_irq (unsigned int irq)
#define iosapic_ack_level_irq nop #define iosapic_ack_level_irq nop
struct hw_interrupt_type irq_type_iosapic_level = { struct hw_interrupt_type irq_type_iosapic_level = {
typename: "IO-SAPIC-level", .typename = "IO-SAPIC-level",
startup: iosapic_startup_level_irq, .startup = iosapic_startup_level_irq,
shutdown: iosapic_shutdown_level_irq, .shutdown = iosapic_shutdown_level_irq,
enable: iosapic_enable_level_irq, .enable = iosapic_enable_level_irq,
disable: iosapic_disable_level_irq, .disable = iosapic_disable_level_irq,
ack: iosapic_ack_level_irq, .ack = iosapic_ack_level_irq,
end: iosapic_end_level_irq, .end = iosapic_end_level_irq,
set_affinity: iosapic_set_affinity .set_affinity = iosapic_set_affinity
}; };
/* /*
...@@ -366,14 +366,14 @@ iosapic_ack_edge_irq (unsigned int irq) ...@@ -366,14 +366,14 @@ iosapic_ack_edge_irq (unsigned int irq)
#define iosapic_end_edge_irq nop #define iosapic_end_edge_irq nop
struct hw_interrupt_type irq_type_iosapic_edge = { struct hw_interrupt_type irq_type_iosapic_edge = {
typename: "IO-SAPIC-edge", .typename = "IO-SAPIC-edge",
startup: iosapic_startup_edge_irq, .startup = iosapic_startup_edge_irq,
shutdown: iosapic_disable_edge_irq, .shutdown = iosapic_disable_edge_irq,
enable: iosapic_enable_edge_irq, .enable = iosapic_enable_edge_irq,
disable: iosapic_disable_edge_irq, .disable = iosapic_disable_edge_irq,
ack: iosapic_ack_edge_irq, .ack = iosapic_ack_edge_irq,
end: iosapic_end_edge_irq, .end = iosapic_end_edge_irq,
set_affinity: iosapic_set_affinity .set_affinity = iosapic_set_affinity
}; };
unsigned int unsigned int
...@@ -679,11 +679,10 @@ iosapic_init_pci_irq (void) ...@@ -679,11 +679,10 @@ iosapic_init_pci_irq (void)
pci_irq.route[i].bus, pci_irq.route[i].pci_id>>16, pci_irq.route[i].pin, pci_irq.route[i].bus, pci_irq.route[i].pci_id>>16, pci_irq.route[i].pin,
iosapic_irq[vector].base_irq + iosapic_irq[vector].pin, vector); iosapic_irq[vector].base_irq + iosapic_irq[vector].pin, vector);
#endif #endif
/* /*
* Forget not to program the IOSAPIC RTE per ACPI _PRT * NOTE: The IOSAPIC RTE will be programmed in iosapic_pci_fixup(). It
* needs to be done there to ensure PCI hotplug works right.
*/ */
set_rte(vector, (ia64_get_lid() >> 16) & 0xffff);
} }
} }
......
...@@ -36,6 +36,10 @@ ...@@ -36,6 +36,10 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
#ifdef CONFIG_PERFMON
# include <asm/perfmon.h>
#endif
#define IRQ_DEBUG 0 #define IRQ_DEBUG 0
/* default base addr of IPI table */ /* default base addr of IPI table */
...@@ -144,9 +148,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -144,9 +148,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
extern void handle_IPI (int irq, void *dev_id, struct pt_regs *regs); extern void handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
static struct irqaction ipi_irqaction = { static struct irqaction ipi_irqaction = {
handler: handle_IPI, .handler = handle_IPI,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "IPI" .name = "IPI"
}; };
#endif #endif
...@@ -172,6 +176,9 @@ init_IRQ (void) ...@@ -172,6 +176,9 @@ init_IRQ (void)
register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
#endif
#ifdef CONFIG_PERFMON
perfmon_init_percpu();
#endif #endif
platform_irq_init(); platform_irq_init();
} }
......
...@@ -27,12 +27,12 @@ lsapic_noop (unsigned int irq) ...@@ -27,12 +27,12 @@ lsapic_noop (unsigned int irq)
} }
struct hw_interrupt_type irq_type_ia64_lsapic = { struct hw_interrupt_type irq_type_ia64_lsapic = {
typename: "LSAPIC", .typename = "LSAPIC",
startup: lsapic_noop_startup, .startup = lsapic_noop_startup,
shutdown: lsapic_noop, .shutdown = lsapic_noop,
enable: lsapic_noop, .enable = lsapic_noop,
disable: lsapic_noop, .disable = lsapic_noop,
ack: lsapic_noop, .ack = lsapic_noop,
end: lsapic_noop, .end = lsapic_noop,
set_affinity: (void (*)(unsigned int, unsigned long)) lsapic_noop .set_affinity = (void (*)(unsigned int, unsigned long)) lsapic_noop
}; };
...@@ -11,13 +11,16 @@ ...@@ -11,13 +11,16 @@
struct ia64_machine_vector ia64_mv; struct ia64_machine_vector ia64_mv;
/* /*
* Most platforms use this routine for mapping page frame addresses * Most platforms use this routine for mapping page frame addresses into a memory map
* into a memory map index. * index.
*
* Note: we can't use __pa() because map_nr_dense(X) MUST map to something >= max_mapnr if
* X is outside the identity mapped kernel space.
*/ */
unsigned long unsigned long
map_nr_dense (unsigned long addr) map_nr_dense (unsigned long addr)
{ {
return MAP_NR_DENSE(addr); return (addr - PAGE_OFFSET) >> PAGE_SHIFT;
} }
static struct ia64_machine_vector * static struct ia64_machine_vector *
......
...@@ -82,27 +82,27 @@ extern void ia64_slave_init_handler (void); ...@@ -82,27 +82,27 @@ extern void ia64_slave_init_handler (void);
extern struct hw_interrupt_type irq_type_iosapic_level; extern struct hw_interrupt_type irq_type_iosapic_level;
static struct irqaction cmci_irqaction = { static struct irqaction cmci_irqaction = {
handler: ia64_mca_cmc_int_handler, .handler = ia64_mca_cmc_int_handler,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "cmc_hndlr" .name = "cmc_hndlr"
}; };
static struct irqaction mca_rdzv_irqaction = { static struct irqaction mca_rdzv_irqaction = {
handler: ia64_mca_rendez_int_handler, .handler = ia64_mca_rendez_int_handler,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "mca_rdzv" .name = "mca_rdzv"
}; };
static struct irqaction mca_wkup_irqaction = { static struct irqaction mca_wkup_irqaction = {
handler: ia64_mca_wakeup_int_handler, .handler = ia64_mca_wakeup_int_handler,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "mca_wkup" .name = "mca_wkup"
}; };
static struct irqaction mca_cpe_irqaction = { static struct irqaction mca_cpe_irqaction = {
handler: ia64_mca_cpe_int_handler, .handler = ia64_mca_cpe_int_handler,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "cpe_hndlr" .name = "cpe_hndlr"
}; };
/* /*
......
...@@ -684,9 +684,9 @@ ia64_os_mca_tlb_error_check: ...@@ -684,9 +684,9 @@ ia64_os_mca_tlb_error_check:
movl r3=SAL_GET_STATE_INFO;; movl r3=SAL_GET_STATE_INFO;;
DATA_VA_TO_PA(r7);; // convert to physical address DATA_VA_TO_PA(r7);; // convert to physical address
ld8 r8=[r7],8;; // get pdesc function pointer ld8 r8=[r7],8;; // get pdesc function pointer
DATA_VA_TO_PA(r8) // convert to physical address dep r8=0,r8,61,3;; // convert SAL VA to PA
ld8 r1=[r7];; // set new (ia64_sal) gp ld8 r1=[r7];; // set new (ia64_sal) gp
DATA_VA_TO_PA(r1) // convert to physical address dep r1=0,r1,61,3;; // convert SAL VA to PA
mov b6=r8 mov b6=r8
alloc r5=ar.pfs,8,0,8,0;; // allocate stack frame for SAL call alloc r5=ar.pfs,8,0,8,0;; // allocate stack frame for SAL call
......
...@@ -265,12 +265,37 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r ...@@ -265,12 +265,37 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r
int int
pcibios_enable_device (struct pci_dev *dev) pcibios_enable_device (struct pci_dev *dev)
{ {
u16 cmd, old_cmd;
int idx;
struct resource *r;
if (!dev) if (!dev)
return -EINVAL; return -EINVAL;
/* Not needed, since we enable all devices at startup. */ pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for (idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
printk(KERN_ERR
"PCI: Device %s not available because of resource collisions\n",
dev->slot_name);
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (dev->resource[PCI_ROM_RESOURCE].start)
cmd |= PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n", dev->slot_name, old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq, dev->slot_name); printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq, dev->slot_name);
return 0; return 0;
} }
......
...@@ -106,6 +106,12 @@ ...@@ -106,6 +106,12 @@
#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0) #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) (i==0)
#endif
/* /*
* debugging * debugging
*/ */
...@@ -277,8 +283,8 @@ typedef struct { ...@@ -277,8 +283,8 @@ typedef struct {
typedef struct { typedef struct {
pfm_pmu_reg_type_t type; pfm_pmu_reg_type_t type;
int pm_pos; int pm_pos;
int (*read_check)(struct task_struct *task, unsigned int cnum, unsigned long *val); int (*read_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
int (*write_check)(struct task_struct *task, unsigned int cnum, unsigned long *val); int (*write_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
unsigned long dep_pmd[4]; unsigned long dep_pmd[4];
unsigned long dep_pmc[4]; unsigned long dep_pmc[4];
} pfm_reg_desc_t; } pfm_reg_desc_t;
...@@ -396,7 +402,7 @@ static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values ...@@ -396,7 +402,7 @@ static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values
static void pfm_vm_close(struct vm_area_struct * area); static void pfm_vm_close(struct vm_area_struct * area);
static struct vm_operations_struct pfm_vm_ops={ static struct vm_operations_struct pfm_vm_ops={
close: pfm_vm_close .close = pfm_vm_close
}; };
/* /*
...@@ -902,8 +908,8 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx) ...@@ -902,8 +908,8 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
/* /*
* and it must be a valid CPU * and it must be a valid CPU
*/ */
cpu = ffs(pfx->ctx_cpu_mask); cpu = ffz(~pfx->ctx_cpu_mask);
if (!cpu_online(cpu)) { if (cpu_is_online(cpu) == 0) {
DBprintk(("CPU%d is not online\n", cpu)); DBprintk(("CPU%d is not online\n", cpu));
return -EINVAL; return -EINVAL;
} }
...@@ -925,11 +931,12 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx) ...@@ -925,11 +931,12 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
DBprintk(("must have notify_pid when blocking for [%d]\n", task->pid)); DBprintk(("must have notify_pid when blocking for [%d]\n", task->pid));
return -EINVAL; return -EINVAL;
} }
#if 0
if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) { if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) {
DBprintk(("cannot notify self when blocking for [%d]\n", task->pid)); DBprintk(("cannot notify self when blocking for [%d]\n", task->pid));
return -EINVAL; return -EINVAL;
} }
#endif
} }
/* probably more to add here */ /* probably more to add here */
...@@ -968,7 +975,7 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int ...@@ -968,7 +975,7 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
if (ctx_flags & PFM_FL_SYSTEM_WIDE) { if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
/* at this point, we know there is at least one bit set */ /* at this point, we know there is at least one bit set */
cpu = ffs(tmp.ctx_cpu_mask) - 1; cpu = ffz(~tmp.ctx_cpu_mask);
DBprintk(("requesting CPU%d currently on CPU%d\n",cpu, smp_processor_id())); DBprintk(("requesting CPU%d currently on CPU%d\n",cpu, smp_processor_id()));
...@@ -1280,7 +1287,7 @@ pfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun ...@@ -1280,7 +1287,7 @@ pfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/* /*
* execute write checker, if any * execute write checker, if any
*/ */
if (PMC_WR_FUNC(cnum)) ret = PMC_WR_FUNC(cnum)(task, cnum, &tmp.reg_value); if (PMC_WR_FUNC(cnum)) ret = PMC_WR_FUNC(cnum)(task, cnum, &tmp.reg_value, regs);
abort_mission: abort_mission:
if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL; if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL;
...@@ -1371,7 +1378,7 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun ...@@ -1371,7 +1378,7 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/* /*
* execute write checker, if any * execute write checker, if any
*/ */
if (PMD_WR_FUNC(cnum)) ret = PMD_WR_FUNC(cnum)(task, cnum, &tmp.reg_value); if (PMD_WR_FUNC(cnum)) ret = PMD_WR_FUNC(cnum)(task, cnum, &tmp.reg_value, regs);
abort_mission: abort_mission:
if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL; if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL;
...@@ -1394,6 +1401,8 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun ...@@ -1394,6 +1401,8 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/* keep track of what we use */ /* keep track of what we use */
CTX_USED_PMD(ctx, pmu_conf.pmd_desc[(cnum)].dep_pmd[0]); CTX_USED_PMD(ctx, pmu_conf.pmd_desc[(cnum)].dep_pmd[0]);
/* mark this register as used as well */
CTX_USED_PMD(ctx, RDEP(cnum));
/* writes to unimplemented part is ignored, so this is safe */ /* writes to unimplemented part is ignored, so this is safe */
ia64_set_pmd(cnum, tmp.reg_value & pmu_conf.perf_ovfl_val); ia64_set_pmd(cnum, tmp.reg_value & pmu_conf.perf_ovfl_val);
...@@ -1438,7 +1447,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count ...@@ -1438,7 +1447,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
DBprintk(("ctx_last_cpu=%d for [%d]\n", atomic_read(&ctx->ctx_last_cpu), task->pid)); DBprintk(("ctx_last_cpu=%d for [%d]\n", atomic_read(&ctx->ctx_last_cpu), task->pid));
for (i = 0; i < count; i++, req++) { for (i = 0; i < count; i++, req++) {
unsigned long reg_val = ~0UL, ctx_val = ~0UL; unsigned long ctx_val = ~0UL;
if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT; if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;
...@@ -1462,7 +1471,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count ...@@ -1462,7 +1471,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
*/ */
if (atomic_read(&ctx->ctx_last_cpu) == smp_processor_id()){ if (atomic_read(&ctx->ctx_last_cpu) == smp_processor_id()){
ia64_srlz_d(); ia64_srlz_d();
val = reg_val = ia64_get_pmd(cnum); val = ia64_get_pmd(cnum);
DBprintk(("reading pmd[%u]=0x%lx from hw\n", cnum, val)); DBprintk(("reading pmd[%u]=0x%lx from hw\n", cnum, val));
} else { } else {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1484,7 +1493,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count ...@@ -1484,7 +1493,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
} }
#endif #endif
/* context has been saved */ /* context has been saved */
val = reg_val = th->pmd[cnum]; val = th->pmd[cnum];
} }
if (PMD_IS_COUNTING(cnum)) { if (PMD_IS_COUNTING(cnum)) {
/* /*
...@@ -1493,8 +1502,6 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count ...@@ -1493,8 +1502,6 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
val &= pmu_conf.perf_ovfl_val; val &= pmu_conf.perf_ovfl_val;
val += ctx_val = ctx->ctx_soft_pmds[cnum].val; val += ctx_val = ctx->ctx_soft_pmds[cnum].val;
} else {
val = reg_val = ia64_get_pmd(cnum);
} }
tmp.reg_value = val; tmp.reg_value = val;
...@@ -1503,14 +1510,13 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count ...@@ -1503,14 +1510,13 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
* execute read checker, if any * execute read checker, if any
*/ */
if (PMD_RD_FUNC(cnum)) { if (PMD_RD_FUNC(cnum)) {
ret = PMD_RD_FUNC(cnum)(task, cnum, &tmp.reg_value); ret = PMD_RD_FUNC(cnum)(task, cnum, &tmp.reg_value, regs);
} }
PFM_REG_RETFLAG_SET(tmp.reg_flags, ret); PFM_REG_RETFLAG_SET(tmp.reg_flags, ret);
DBprintk(("read pmd[%u] ret=%d soft_pmd=0x%lx reg=0x%lx pmc=0x%lx\n", DBprintk(("read pmd[%u] ret=%d value=0x%lx pmc=0x%lx\n",
cnum, ret, ctx_val, reg_val, cnum, ret, val, ia64_get_pmc(cnum)));
ia64_get_pmc(cnum)));
if (copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT; if (copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT;
} }
...@@ -1553,15 +1559,11 @@ pfm_use_debug_registers(struct task_struct *task) ...@@ -1553,15 +1559,11 @@ pfm_use_debug_registers(struct task_struct *task)
*/ */
if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1; if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
/*
* XXX: not pretty
*/
LOCK_PFS(); LOCK_PFS();
/* /*
* We only allow the use of debug registers when there is no system * We cannot allow setting breakpoints when system wide monitoring
* wide monitoring * sessions are using the debug registers.
* XXX: we could relax this by
*/ */
if (pfm_sessions.pfs_sys_use_dbregs> 0) if (pfm_sessions.pfs_sys_use_dbregs> 0)
ret = -1; ret = -1;
...@@ -1921,7 +1923,6 @@ typedef union { ...@@ -1921,7 +1923,6 @@ typedef union {
dbr_mask_reg_t dbr; dbr_mask_reg_t dbr;
} dbreg_t; } dbreg_t;
static int static int
pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs) pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs)
{ {
...@@ -1963,8 +1964,8 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru ...@@ -1963,8 +1964,8 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
if (ctx->ctx_fl_system) { if (ctx->ctx_fl_system) {
/* we mark ourselves as owner of the debug registers */ /* we mark ourselves as owner of the debug registers */
ctx->ctx_fl_using_dbreg = 1; ctx->ctx_fl_using_dbreg = 1;
} else { DBprintk(("system-wide setting fl_using_dbreg for [%d]\n", task->pid));
if (ctx->ctx_fl_using_dbreg == 0) { } else if (first_time) {
ret= -EBUSY; ret= -EBUSY;
if ((thread->flags & IA64_THREAD_DBG_VALID) != 0) { if ((thread->flags & IA64_THREAD_DBG_VALID) != 0) {
DBprintk(("debug registers already in use for [%d]\n", task->pid)); DBprintk(("debug registers already in use for [%d]\n", task->pid));
...@@ -1973,6 +1974,7 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru ...@@ -1973,6 +1974,7 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
/* we mark ourselves as owner of the debug registers */ /* we mark ourselves as owner of the debug registers */
ctx->ctx_fl_using_dbreg = 1; ctx->ctx_fl_using_dbreg = 1;
DBprintk(("setting fl_using_dbreg for [%d]\n", task->pid));
/* /*
* Given debug registers cannot be used for both debugging * Given debug registers cannot be used for both debugging
* and performance monitoring at the same time, we reuse * and performance monitoring at the same time, we reuse
...@@ -1980,10 +1982,18 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru ...@@ -1980,10 +1982,18 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
*/ */
memset(task->thread.dbr, 0, sizeof(task->thread.dbr)); memset(task->thread.dbr, 0, sizeof(task->thread.dbr));
memset(task->thread.ibr, 0, sizeof(task->thread.ibr)); memset(task->thread.ibr, 0, sizeof(task->thread.ibr));
}
if (first_time) {
DBprintk(("[%d] clearing ibrs,dbrs\n", task->pid));
/* /*
* clear hardware registers to make sure we don't * clear hardware registers to make sure we don't
* pick up stale state * pick up stale state.
*
* for a system wide session, we do not use
* thread.dbr, thread.ibr because this process
* never leaves the current CPU and the state
* is shared by all processes running on it
*/ */
for (i=0; i < pmu_conf.num_ibrs; i++) { for (i=0; i < pmu_conf.num_ibrs; i++) {
ia64_set_ibr(i, 0UL); ia64_set_ibr(i, 0UL);
...@@ -1994,7 +2004,6 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru ...@@ -1994,7 +2004,6 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
} }
ia64_srlz_d(); ia64_srlz_d();
} }
}
ret = -EFAULT; ret = -EFAULT;
...@@ -2361,9 +2370,9 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6, ...@@ -2361,9 +2370,9 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
{ {
struct pt_regs *regs = (struct pt_regs *)&stack; struct pt_regs *regs = (struct pt_regs *)&stack;
struct task_struct *task = current; struct task_struct *task = current;
pfm_context_t *ctx = task->thread.pfm_context; pfm_context_t *ctx;
size_t sz; size_t sz;
int ret = -ESRCH, narg; int ret, narg;
/* /*
* reject any call if perfmon was disabled at initialization time * reject any call if perfmon was disabled at initialization time
...@@ -2393,6 +2402,8 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6, ...@@ -2393,6 +2402,8 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
if (pid != current->pid) { if (pid != current->pid) {
ret = -ESRCH;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
task = find_task_by_pid(pid); task = find_task_by_pid(pid);
...@@ -2407,10 +2418,11 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6, ...@@ -2407,10 +2418,11 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
ret = check_task_state(task); ret = check_task_state(task);
if (ret != 0) goto abort_call; if (ret != 0) goto abort_call;
} }
ctx = task->thread.pfm_context;
} }
} }
ctx = task->thread.pfm_context;
if (PFM_CMD_USE_CTX(cmd)) { if (PFM_CMD_USE_CTX(cmd)) {
ret = -EINVAL; ret = -EINVAL;
if (ctx == NULL) { if (ctx == NULL) {
...@@ -2953,11 +2965,6 @@ perfmon_interrupt (int irq, void *arg, struct pt_regs *regs) ...@@ -2953,11 +2965,6 @@ perfmon_interrupt (int irq, void *arg, struct pt_regs *regs)
static int static int
perfmon_proc_info(char *page) perfmon_proc_info(char *page)
{ {
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) 1
#endif
char *p = page; char *p = page;
int i; int i;
...@@ -4118,9 +4125,9 @@ pfm_cleanup_notifiers(struct task_struct *task) ...@@ -4118,9 +4125,9 @@ pfm_cleanup_notifiers(struct task_struct *task)
} }
static struct irqaction perfmon_irqaction = { static struct irqaction perfmon_irqaction = {
handler: perfmon_interrupt, .handler = perfmon_interrupt,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "perfmon" .name = "perfmon"
}; };
...@@ -4150,11 +4157,6 @@ perfmon_init (void) ...@@ -4150,11 +4157,6 @@ perfmon_init (void)
pal_perf_mon_info_u_t pm_info; pal_perf_mon_info_u_t pm_info;
s64 status; s64 status;
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ia64_set_pmv(IA64_PERFMON_VECTOR);
ia64_srlz_d();
pmu_conf.pfm_is_disabled = 1; pmu_conf.pfm_is_disabled = 1;
printk("perfmon: version %u.%u (sampling format v%u.%u) IRQ %u\n", printk("perfmon: version %u.%u (sampling format v%u.%u) IRQ %u\n",
...@@ -4232,6 +4234,9 @@ __initcall(perfmon_init); ...@@ -4232,6 +4234,9 @@ __initcall(perfmon_init);
void void
perfmon_init_percpu (void) perfmon_init_percpu (void)
{ {
if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ia64_set_pmv(IA64_PERFMON_VECTOR); ia64_set_pmv(IA64_PERFMON_VECTOR);
ia64_srlz_d(); ia64_srlz_d();
} }
......
/*
* This file contains the Itanium PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_ITANIUM
#error "This file is only valid when CONFIG_ITANIUM is defined"
#endif
static int pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pmc_desc[256]={
/* pmc0 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_MONITOR, 6, NULL, pfm_ita_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_CONFIG, 0, NULL, pfm_ita_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pmd_desc[256]={
/* pmd0 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd16 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd17 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static int
pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{
pfm_context_t *ctx = task->thread.pfm_context;
int ret;
/*
* we must clear the (instruction) debug registers if pmc13.ta bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 13 && ((*val & 0x1) == 0UL) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(1, task, NULL, 0, regs);
if (ret) return ret;
}
/*
* we must clear the (data) debug registers if pmc11.pt bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 11 && ((*val >> 28)& 0x1) == 0 && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(0, task, NULL, 0, regs);
if (ret) return ret;
}
return 0;
}
/*
* This file contains the McKinley PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_MCKINLEY
#error "This file is only valid when CONFIG_MCKINLEY is defined"
#endif
static int pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pmc_desc[256]={
/* pmc0 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 6, NULL, pfm_mck_pmc_check, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_CONFIG, 0, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_MONITOR, 4, NULL, NULL, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_CONFIG, 0, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc14 */ { PFM_REG_CONFIG, 0, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc15 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pmd_desc[256]={
/* pmd0 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd16 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd17 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static int
pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{
struct thread_struct *th = &task->thread;
pfm_context_t *ctx = task->thread.pfm_context;
int ret = 0, check_case1 = 0;
unsigned long val8 = 0, val14 = 0, val13 = 0;
/*
* we must clear the debug registers if any pmc13.ena_dbrpX bit is enabled
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 13 && (*val & (0xfUL << 45)) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(1, task, NULL, 0, regs);
if (ret) return ret;
}
/*
* we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
* before they are (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 14 && ((*val & 0x2222) != 0x2222) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(0, task, NULL, 0, regs);
if (ret) return ret;
}
switch(cnum) {
case 4: *val |= 1UL << 23; /* force power enable bit */
break;
case 8: val8 = *val;
val13 = th->pmc[13];
val14 = th->pmc[14];
check_case1 = 1;
break;
case 13: val8 = th->pmc[8];
val13 = *val;
val14 = th->pmc[14];
check_case1 = 1;
break;
case 14: val8 = th->pmc[13];
val13 = th->pmc[13];
val14 = *val;
check_case1 = 1;
break;
}
/* check illegal configuration which can produce inconsistencies in tagging
* i-side events in L1D and L2 caches
*/
if (check_case1) {
ret = ((val13 >> 45) & 0xf) == 0
&& ((val8 & 0x1) == 0)
&& ((((val14>>1) & 0x3) == 0x2 || ((val14>>1) & 0x3) == 0x0)
||(((val14>>4) & 0x3) == 0x2 || ((val14>>4) & 0x3) == 0x0));
if (ret) printk("perfmon: failure check_case1\n");
}
return ret ? -EINVAL : 0;
}
...@@ -325,6 +325,11 @@ copy_thread (int nr, unsigned long clone_flags, ...@@ -325,6 +325,11 @@ copy_thread (int nr, unsigned long clone_flags,
/* copy parts of thread_struct: */ /* copy parts of thread_struct: */
p->thread.ksp = (unsigned long) child_stack - 16; p->thread.ksp = (unsigned long) child_stack - 16;
/* stop some PSR bits from being inherited: */
child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
& ~IA64_PSR_BITS_TO_CLEAR);
/* /*
* NOTE: The calling convention considers all floating point * NOTE: The calling convention considers all floating point
* registers in the high partition (fph) to be scratch. Since * registers in the high partition (fph) to be scratch. Since
......
...@@ -455,10 +455,10 @@ c_stop (struct seq_file *m, void *v) ...@@ -455,10 +455,10 @@ c_stop (struct seq_file *m, void *v)
} }
struct seq_operations cpuinfo_op = { struct seq_operations cpuinfo_op = {
start: c_start, .start = c_start,
next: c_next, .next = c_next,
stop: c_stop, .stop = c_stop,
show: show_cpuinfo .show = show_cpuinfo
}; };
void void
...@@ -542,7 +542,18 @@ cpu_init (void) ...@@ -542,7 +542,18 @@ cpu_init (void)
extern char __per_cpu_end[]; extern char __per_cpu_end[];
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (__per_cpu_end - __per_cpu_start > PAGE_SIZE)
panic("Per-cpu data area too big! (%Zu > %Zu)",
__per_cpu_end - __per_cpu_start, PAGE_SIZE);
/*
* On the BSP, the page allocator isn't initialized by the time we get here. On
* the APs, the bootmem allocator is no longer available...
*/
if (cpu == 0)
my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start); my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start);
else
my_cpu_data = (void *) get_free_page(GFP_KERNEL);
memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start; __per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start;
my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start); my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start);
......
...@@ -146,6 +146,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from) ...@@ -146,6 +146,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
if (from->si_code < 0) { if (from->si_code < 0) {
if (__copy_to_user(to, from, sizeof(siginfo_t))) if (__copy_to_user(to, from, sizeof(siginfo_t)))
return -EFAULT; return -EFAULT;
return 0;
} else { } else {
int err; int err;
......
...@@ -425,7 +425,7 @@ do_boot_cpu (int sapicid) ...@@ -425,7 +425,7 @@ do_boot_cpu (int sapicid)
task_for_booting_cpu = idle; task_for_booting_cpu = idle;
Dprintk("Sending wakeup vector %u to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
...@@ -537,7 +537,7 @@ smp_boot_cpus (void) ...@@ -537,7 +537,7 @@ smp_boot_cpus (void)
printk("Before bogomips.\n"); printk("Before bogomips.\n");
if (!cpucount) { if (!cpucount) {
printk(KERN_ERR "Error: only one processor found.\n"); printk(KERN_WARNING "Warning: only one processor found.\n");
} else { } else {
unsigned long bogosum = 0; unsigned long bogosum = 0;
for (cpu = 0; cpu < NR_CPUS; cpu++) for (cpu = 0; cpu < NR_CPUS; cpu++)
......
...@@ -41,10 +41,12 @@ do_profile (unsigned long ip) ...@@ -41,10 +41,12 @@ do_profile (unsigned long ip)
extern unsigned long prof_cpu_mask; extern unsigned long prof_cpu_mask;
extern char _stext; extern char _stext;
if (!prof_buffer)
return;
if (!((1UL << smp_processor_id()) & prof_cpu_mask)) if (!((1UL << smp_processor_id()) & prof_cpu_mask))
return; return;
if (prof_buffer && current->pid) {
ip -= (unsigned long) &_stext; ip -= (unsigned long) &_stext;
ip >>= prof_shift; ip >>= prof_shift;
/* /*
...@@ -55,7 +57,6 @@ do_profile (unsigned long ip) ...@@ -55,7 +57,6 @@ do_profile (unsigned long ip)
ip = prof_len - 1; ip = prof_len - 1;
atomic_inc((atomic_t *) &prof_buffer[ip]); atomic_inc((atomic_t *) &prof_buffer[ip]);
}
} }
/* /*
...@@ -285,9 +286,9 @@ ia64_init_itm (void) ...@@ -285,9 +286,9 @@ ia64_init_itm (void)
} }
static struct irqaction timer_irqaction = { static struct irqaction timer_irqaction = {
handler: timer_interrupt, .handler = timer_interrupt,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "timer" .name = "timer"
}; };
void __init void __init
......
...@@ -93,9 +93,9 @@ die (const char *str, struct pt_regs *regs, long err) ...@@ -93,9 +93,9 @@ die (const char *str, struct pt_regs *regs, long err)
int lock_owner; int lock_owner;
int lock_owner_depth; int lock_owner_depth;
} die = { } die = {
lock: SPIN_LOCK_UNLOCKED, .lock = SPIN_LOCK_UNLOCKED,
lock_owner: -1, .lock_owner = -1,
lock_owner_depth: 0 .lock_owner_depth = 0
}; };
if (die.lock_owner != smp_processor_id()) { if (die.lock_owner != smp_processor_id()) {
...@@ -435,7 +435,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -435,7 +435,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
unsigned long code, error = isr; unsigned long code, error = isr;
struct siginfo siginfo; struct siginfo siginfo;
char buf[128]; char buf[128];
int result; int result, sig;
static const char *reason[] = { static const char *reason[] = {
"IA-64 Illegal Operation fault", "IA-64 Illegal Operation fault",
"IA-64 Privileged Operation fault", "IA-64 Privileged Operation fault",
...@@ -479,6 +479,30 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -479,6 +479,30 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
break; break;
case 26: /* NaT Consumption */ case 26: /* NaT Consumption */
if (user_mode(regs)) {
if (((isr >> 4) & 0xf) == 2) {
/* NaT page consumption */
sig = SIGSEGV;
code = SEGV_ACCERR;
} else {
/* register NaT consumption */
sig = SIGILL;
code = ILL_ILLOPN;
}
siginfo.si_signo = sig;
siginfo.si_code = code;
siginfo.si_errno = 0;
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_imm = vector;
siginfo.si_flags = __ISR_VALID;
siginfo.si_isr = isr;
force_sig_info(sig, &siginfo, current);
return;
} else if (done_with_exception(regs))
return;
sprintf(buf, "NaT consumption");
break;
case 31: /* Unsupported Data Reference */ case 31: /* Unsupported Data Reference */
if (user_mode(regs)) { if (user_mode(regs)) {
siginfo.si_signo = SIGILL; siginfo.si_signo = SIGILL;
...@@ -491,7 +515,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -491,7 +515,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
force_sig_info(SIGILL, &siginfo, current); force_sig_info(SIGILL, &siginfo, current);
return; return;
} }
sprintf(buf, (vector == 26) ? "NaT consumption" : "Unsupported data reference"); sprintf(buf, "Unsupported data reference");
break; break;
case 29: /* Debug */ case 29: /* Debug */
...@@ -508,16 +532,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -508,16 +532,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
if (ia64_psr(regs)->is == 0) if (ia64_psr(regs)->is == 0)
ifa = regs->cr_iip; ifa = regs->cr_iip;
#endif #endif
siginfo.si_addr = (void *) ifa;
break; break;
case 35: siginfo.si_code = TRAP_BRANCH; break; case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
case 36: siginfo.si_code = TRAP_TRACE; break; case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
} }
siginfo.si_signo = SIGTRAP; siginfo.si_signo = SIGTRAP;
siginfo.si_errno = 0; siginfo.si_errno = 0;
siginfo.si_flags = 0; siginfo.si_flags = 0;
siginfo.si_isr = 0; siginfo.si_isr = 0;
siginfo.si_addr = 0; siginfo.si_addr = (void *) ifa;
siginfo.si_imm = 0; siginfo.si_imm = 0;
force_sig_info(SIGTRAP, &siginfo, current); force_sig_info(SIGTRAP, &siginfo, current);
return; return;
......
...@@ -140,13 +140,13 @@ static struct { ...@@ -140,13 +140,13 @@ static struct {
} stat; } stat;
# endif # endif
} unw = { } unw = {
tables: &unw.kernel_table, .tables = &unw.kernel_table,
lock: SPIN_LOCK_UNLOCKED, .lock = SPIN_LOCK_UNLOCKED,
save_order: { .save_order = {
UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
}, },
preg_index: { .preg_index = {
struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */ struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */ struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
struct_offset(struct unw_frame_info, bsp_loc)/8, struct_offset(struct unw_frame_info, bsp_loc)/8,
...@@ -189,9 +189,9 @@ static struct { ...@@ -189,9 +189,9 @@ static struct {
struct_offset(struct unw_frame_info, fr_loc[30 - 16])/8, struct_offset(struct unw_frame_info, fr_loc[30 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[31 - 16])/8, struct_offset(struct unw_frame_info, fr_loc[31 - 16])/8,
}, },
hash : { [0 ... UNW_HASH_SIZE - 1] = -1 }, .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
#if UNW_DEBUG #if UNW_DEBUG
preg_name: { .preg_name = {
"pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp", "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
"r4", "r5", "r6", "r7", "r4", "r5", "r6", "r7",
"ar.unat", "pr", "ar.lc", "ar.fpsr", "ar.unat", "pr", "ar.lc", "ar.fpsr",
...@@ -634,8 +634,8 @@ alloc_spill_area (unsigned long *offp, unsigned long regsize, ...@@ -634,8 +634,8 @@ alloc_spill_area (unsigned long *offp, unsigned long regsize,
for (reg = hi; reg >= lo; --reg) { for (reg = hi; reg >= lo; --reg) {
if (reg->where == UNW_WHERE_SPILL_HOME) { if (reg->where == UNW_WHERE_SPILL_HOME) {
reg->where = UNW_WHERE_PSPREL; reg->where = UNW_WHERE_PSPREL;
reg->val = 0x10 - *offp; *offp -= regsize;
*offp += regsize; reg->val = *offp;
} }
} }
} }
...@@ -814,7 +814,8 @@ desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *s ...@@ -814,7 +814,8 @@ desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *s
} }
for (i = 0; i < 20; ++i) { for (i = 0; i < 20; ++i) {
if ((frmask & 1) != 0) { if ((frmask & 1) != 0) {
set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
sr->region_start + sr->region_len - 1, 0); sr->region_start + sr->region_len - 1, 0);
sr->any_spills = 1; sr->any_spills = 1;
} }
......
...@@ -9,12 +9,12 @@ export-objs := io.o swiotlb.o ...@@ -9,12 +9,12 @@ export-objs := io.o swiotlb.o
obj-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ obj-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \
checksum.o clear_page.o csum_partial_copy.o copy_page.o \ checksum.o clear_page.o csum_partial_copy.o copy_page.o \
copy_user.o clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
flush.o io.o ip_fast_csum.o do_csum.o \ flush.o io.o ip_fast_csum.o do_csum.o \
memcpy.o memset.o strlen.o swiotlb.o memset.o strlen.o swiotlb.o
obj-$(CONFIG_ITANIUM) += copy_page.o obj-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
obj-$(CONFIG_MCKINLEY) += copy_page_mck.o obj-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
IGNORE_FLAGS_OBJS = __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ IGNORE_FLAGS_OBJS = __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
......
...@@ -237,15 +237,17 @@ GLOBAL_ENTRY(__copy_user) ...@@ -237,15 +237,17 @@ GLOBAL_ENTRY(__copy_user)
.copy_user_bit##rshift: \ .copy_user_bit##rshift: \
1: \ 1: \
EX(.failure_out,(EPI) st8 [dst1]=tmp,8); \ EX(.failure_out,(EPI) st8 [dst1]=tmp,8); \
(EPI_1) shrp tmp=val1[PIPE_DEPTH-3],val1[PIPE_DEPTH-2],rshift; \ (EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \
EX(3f,(p16) ld8 val1[0]=[src1],8); \ EX(3f,(p16) ld8 val1[1]=[src1],8); \
(p16) mov val1[0]=r0; \
br.ctop.dptk 1b; \ br.ctop.dptk 1b; \
;; \ ;; \
br.cond.sptk.many .diff_align_do_tail; \ br.cond.sptk.many .diff_align_do_tail; \
2: \ 2: \
(EPI) st8 [dst1]=tmp,8; \ (EPI) st8 [dst1]=tmp,8; \
(EPI_1) shrp tmp=val1[PIPE_DEPTH-3],val1[PIPE_DEPTH-2],rshift; \ (EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \
3: \ 3: \
(p16) mov val1[1]=r0; \
(p16) mov val1[0]=r0; \ (p16) mov val1[0]=r0; \
br.ctop.dptk 2b; \ br.ctop.dptk 2b; \
;; \ ;; \
......
...@@ -87,6 +87,12 @@ ia64_outl (unsigned int val, unsigned long port) ...@@ -87,6 +87,12 @@ ia64_outl (unsigned int val, unsigned long port)
__ia64_outl(val, port); __ia64_outl(val, port);
} }
void
ia64_mmiob (void)
{
__ia64_mmiob();
}
/* define aliases: */ /* define aliases: */
asm (".global __ia64_inb, __ia64_inw, __ia64_inl"); asm (".global __ia64_inb, __ia64_inw, __ia64_inl");
...@@ -99,4 +105,7 @@ asm ("__ia64_outb = ia64_outb"); ...@@ -99,4 +105,7 @@ asm ("__ia64_outb = ia64_outb");
asm ("__ia64_outw = ia64_outw"); asm ("__ia64_outw = ia64_outw");
asm ("__ia64_outl = ia64_outl"); asm ("__ia64_outl = ia64_outl");
asm (".global __ia64_mmiob");
asm ("__ia64_mmiob = ia64_mmiob");
#endif /* CONFIG_IA64_GENERIC */ #endif /* CONFIG_IA64_GENERIC */
This diff is collapsed.
...@@ -415,18 +415,20 @@ int ...@@ -415,18 +415,20 @@ int
swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
{ {
void *addr; void *addr;
unsigned long pci_addr;
int i; int i;
if (direction == PCI_DMA_NONE) if (direction == PCI_DMA_NONE)
BUG(); BUG();
for (i = 0; i < nelems; i++, sg++) { for (i = 0; i < nelems; i++, sg++) {
sg->orig_address = SG_ENT_VIRT_ADDRESS(sg); addr = SG_ENT_VIRT_ADDRESS(sg);
if ((SG_ENT_PHYS_ADDRESS(sg) & ~hwdev->dma_mask) != 0) { pci_addr = virt_to_phys(addr);
addr = map_single(hwdev, sg->orig_address, sg->length, direction); if ((pci_addr & ~hwdev->dma_mask) != 0)
sg->page = virt_to_page(addr); sg->dma_address = map_single(hwdev, addr, sg->length, direction);
sg->offset = (u64) addr & ~PAGE_MASK; else
} sg->dma_address = pci_addr;
sg->dma_length = sg->length;
} }
return nelems; return nelems;
} }
...@@ -444,12 +446,10 @@ swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int ...@@ -444,12 +446,10 @@ swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
BUG(); BUG();
for (i = 0; i < nelems; i++, sg++) for (i = 0; i < nelems; i++, sg++)
if (sg->orig_address != SG_ENT_VIRT_ADDRESS(sg)) { if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
unmap_single(hwdev, SG_ENT_VIRT_ADDRESS(sg), sg->length, direction); unmap_single(hwdev, sg->dma_address, sg->dma_length, direction);
sg->page = virt_to_page(sg->orig_address); else if (direction == PCI_DMA_FROMDEVICE)
sg->offset = (u64) sg->orig_address & ~PAGE_MASK; mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
} else if (direction == PCI_DMA_FROMDEVICE)
mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->length);
} }
/* /*
...@@ -468,14 +468,14 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int ...@@ -468,14 +468,14 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
BUG(); BUG();
for (i = 0; i < nelems; i++, sg++) for (i = 0; i < nelems; i++, sg++)
if (sg->orig_address != SG_ENT_VIRT_ADDRESS(sg)) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
sync_single(hwdev, SG_ENT_VIRT_ADDRESS(sg), sg->length, direction); sync_single(hwdev, sg->dma_address, sg->dma_length, direction);
} }
unsigned long unsigned long
swiotlb_dma_address (struct scatterlist *sg) swiotlb_dma_address (struct scatterlist *sg)
{ {
return SG_ENT_PHYS_ADDRESS(sg); return sg->dma_address;
} }
/* /*
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/personality.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/swap.h> #include <linux/swap.h>
...@@ -68,10 +69,9 @@ ia64_init_addr_space (void) ...@@ -68,10 +69,9 @@ ia64_init_addr_space (void)
struct vm_area_struct *vma; struct vm_area_struct *vma;
/* /*
* If we're out of memory and kmem_cache_alloc() returns NULL, * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
* we simply ignore the problem. When the process attempts to * the problem. When the process attempts to write to the register backing store
* write to the register backing store for the first time, it * for the first time, it will get a SEGFAULT in this case.
* will get a SEGFAULT in this case.
*/ */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma) { if (vma) {
...@@ -86,6 +86,19 @@ ia64_init_addr_space (void) ...@@ -86,6 +86,19 @@ ia64_init_addr_space (void)
vma->vm_private_data = NULL; vma->vm_private_data = NULL;
insert_vm_struct(current->mm, vma); insert_vm_struct(current->mm, vma);
} }
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
insert_vm_struct(current->mm, vma);
}
}
} }
void void
......
...@@ -35,10 +35,10 @@ ...@@ -35,10 +35,10 @@
1 << _PAGE_SIZE_4K ) 1 << _PAGE_SIZE_4K )
struct ia64_ctx ia64_ctx = { struct ia64_ctx ia64_ctx = {
lock: SPIN_LOCK_UNLOCKED, .lock = SPIN_LOCK_UNLOCKED,
next: 1, .next = 1,
limit: (1 << 15) - 1, /* start out with the safe (architected) limit */ .limit = (1 << 15) - 1, /* start out with the safe (architected) limit */
max_ctx: ~0U .max_ctx = ~0U
}; };
/* /*
...@@ -49,6 +49,7 @@ wrap_mmu_context (struct mm_struct *mm) ...@@ -49,6 +49,7 @@ wrap_mmu_context (struct mm_struct *mm)
{ {
unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx; unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
struct task_struct *tsk; struct task_struct *tsk;
int i;
if (ia64_ctx.next > max_ctx) if (ia64_ctx.next > max_ctx)
ia64_ctx.next = 300; /* skip daemons */ ia64_ctx.next = 300; /* skip daemons */
...@@ -77,7 +78,11 @@ wrap_mmu_context (struct mm_struct *mm) ...@@ -77,7 +78,11 @@ wrap_mmu_context (struct mm_struct *mm)
ia64_ctx.limit = tsk_context; ia64_ctx.limit = tsk_context;
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
flush_tlb_all(); /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
for (i = 0; i < smp_num_cpus; ++i)
if (i != smp_processor_id())
per_cpu(ia64_need_tlb_flush, i) = 1;
__flush_tlb_all();
} }
void void
......
...@@ -279,9 +279,9 @@ static int ifconfig_net_ioctl(struct inode * inode, struct file * file, ...@@ -279,9 +279,9 @@ static int ifconfig_net_ioctl(struct inode * inode, struct file * file,
} }
struct file_operations ifconfig_net_fops = { struct file_operations ifconfig_net_fops = {
ioctl:ifconfig_net_ioctl, /* ioctl */ .ioctl =ifconfig_net_ioctl, /* ioctl */
open:ifconfig_net_open, /* open */ .open =ifconfig_net_open, /* open */
release:ifconfig_net_close /* release */ .release =ifconfig_net_close /* release */
}; };
......
...@@ -210,31 +210,31 @@ static void dump_allocations(struct list_head * dalp); ...@@ -210,31 +210,31 @@ static void dump_allocations(struct list_head * dalp);
/* file operations for each type of node */ /* file operations for each type of node */
static struct file_operations rom_fops = { static struct file_operations rom_fops = {
owner: THIS_MODULE, .owner = THIS_MODULE,
mmap: rom_mmap, .mmap = rom_mmap,
open: generic_open, .open = generic_open,
release: rom_release .release = rom_release
}; };
static struct file_operations base_fops = { static struct file_operations base_fops = {
owner: THIS_MODULE, .owner = THIS_MODULE,
mmap: base_mmap, .mmap = base_mmap,
open: generic_open .open = generic_open
}; };
static struct file_operations config_fops = { static struct file_operations config_fops = {
owner: THIS_MODULE, .owner = THIS_MODULE,
ioctl: config_ioctl, .ioctl = config_ioctl,
open: generic_open .open = generic_open
}; };
static struct file_operations dma_fops = { static struct file_operations dma_fops = {
owner: THIS_MODULE, .owner = THIS_MODULE,
ioctl: dma_ioctl, .ioctl = dma_ioctl,
mmap: dma_mmap, .mmap = dma_mmap,
open: generic_open .open = generic_open
}; };
......
...@@ -24,7 +24,7 @@ extern void hubni_error_handler(char *, int); /* huberror.c */ ...@@ -24,7 +24,7 @@ extern void hubni_error_handler(char *, int); /* huberror.c */
static int hubstats_ioctl(struct inode *, struct file *, unsigned int, unsigned long); static int hubstats_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
struct file_operations hub_mon_fops = { struct file_operations hub_mon_fops = {
ioctl: hubstats_ioctl, .ioctl = hubstats_ioctl,
}; };
#define HUB_CAPTURE_TICKS (2 * HZ) #define HUB_CAPTURE_TICKS (2 * HZ)
......
...@@ -307,22 +307,22 @@ extern void free_pciio_dmamap(pcibr_dmamap_t); ...@@ -307,22 +307,22 @@ extern void free_pciio_dmamap(pcibr_dmamap_t);
* appropriate function name below. * appropriate function name below.
*/ */
struct file_operations pcibr_fops = { struct file_operations pcibr_fops = {
owner: THIS_MODULE, .owner = THIS_MODULE,
llseek: NULL, .llseek = NULL,
read: NULL, .read = NULL,
write: NULL, .write = NULL,
readdir: NULL, .readdir = NULL,
poll: NULL, .poll = NULL,
ioctl: NULL, .ioctl = NULL,
mmap: NULL, .mmap = NULL,
open: NULL, .open = NULL,
flush: NULL, .flush = NULL,
release: NULL, .release = NULL,
fsync: NULL, .fsync = NULL,
fasync: NULL, .fasync = NULL,
lock: NULL, .lock = NULL,
readv: NULL, .readv = NULL,
writev: NULL .writev = NULL
}; };
extern devfs_handle_t hwgraph_root; extern devfs_handle_t hwgraph_root;
......
...@@ -64,22 +64,22 @@ int pcibr_devflag = D_MP; ...@@ -64,22 +64,22 @@ int pcibr_devflag = D_MP;
* appropriate function name below. * appropriate function name below.
*/ */
struct file_operations pcibr_fops = { struct file_operations pcibr_fops = {
owner: THIS_MODULE, .owner =THIS_MODULE,
llseek: NULL, .llseek = NULL,
read: NULL, .read = NULL,
write: NULL, .write = NULL,
readdir: NULL, .readdir = NULL,
poll: NULL, .poll = NULL,
ioctl: NULL, .ioctl = NULL,
mmap: NULL, .mmap = NULL,
open: NULL, .open = NULL,
flush: NULL, .flush = NULL,
release: NULL, .release = NULL,
fsync: NULL, .fsync = NULL,
fasync: NULL, .fasync = NULL,
lock: NULL, .lock = NULL,
readv: NULL, .readv = NULL,
writev: NULL .writev = NULL
}; };
#ifdef LATER #ifdef LATER
......
...@@ -109,14 +109,14 @@ irqpda_t *irqpdaindr[NR_CPUS]; ...@@ -109,14 +109,14 @@ irqpda_t *irqpdaindr[NR_CPUS];
* VGA color display. * VGA color display.
*/ */
struct screen_info sn1_screen_info = { struct screen_info sn1_screen_info = {
orig_x: 0, .orig_x = 0,
orig_y: 0, .orig_y = 0,
orig_video_mode: 3, .orig_video_mode = 3,
orig_video_cols: 80, .orig_video_cols = 80,
orig_video_ega_bx: 3, .orig_video_ega_bx = 3,
orig_video_lines: 25, .orig_video_lines = 25,
orig_video_isVGA: 1, .orig_video_isVGA = 1,
orig_video_points: 16 .orig_video_points = 16
}; };
/* /*
...@@ -170,9 +170,9 @@ early_sn1_setup(void) ...@@ -170,9 +170,9 @@ early_sn1_setup(void)
#ifdef NOT_YET_CONFIG_IA64_MCA #ifdef NOT_YET_CONFIG_IA64_MCA
extern void ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs); extern void ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs);
static struct irqaction mca_cpe_irqaction = { static struct irqaction mca_cpe_irqaction = {
handler: ia64_mca_cpe_int_handler, .handler = ia64_mca_cpe_int_handler,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "cpe_hndlr" .name = "cpe_hndlr"
}; };
#endif #endif
#ifdef CONFIG_IA64_MCA #ifdef CONFIG_IA64_MCA
......
...@@ -326,7 +326,7 @@ ia64_fls (unsigned long x) ...@@ -326,7 +326,7 @@ ia64_fls (unsigned long x)
return exp - 0xffff; return exp - 0xffff;
} }
static int static inline int
fls (int x) fls (int x)
{ {
return ia64_fls((unsigned int) x); return ia64_fls((unsigned int) x);
......
...@@ -53,7 +53,7 @@ ia64_get_itc (void) ...@@ -53,7 +53,7 @@ ia64_get_itc (void)
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
#ifdef CONFIG_ITANIUM #ifdef CONFIG_ITANIUM
while (unlikely ((__s32) result == -1) while (unlikely((__s32) result == -1))
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
#endif #endif
return result; return result;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#define KEYBOARD_IRQ isa_irq_to_vector(1) #define KEYBOARD_IRQ isa_irq_to_vector(1)
#define DISABLE_KBD_DURING_INTERRUPTS 0 #define DISABLE_KBD_DURING_INTERRUPTS 0
extern unsigned char acpi_kbd_controller_present;
extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode); extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
extern int pckbd_getkeycode(unsigned int scancode); extern int pckbd_getkeycode(unsigned int scancode);
extern int pckbd_pretranslate(unsigned char scancode, char raw_mode); extern int pckbd_pretranslate(unsigned char scancode, char raw_mode);
...@@ -26,6 +27,7 @@ extern void pckbd_leds(unsigned char leds); ...@@ -26,6 +27,7 @@ extern void pckbd_leds(unsigned char leds);
extern void pckbd_init_hw(void); extern void pckbd_init_hw(void);
extern unsigned char pckbd_sysrq_xlate[128]; extern unsigned char pckbd_sysrq_xlate[128];
#define kbd_controller_present() acpi_kbd_controller_present
#define kbd_setkeycode pckbd_setkeycode #define kbd_setkeycode pckbd_setkeycode
#define kbd_getkeycode pckbd_getkeycode #define kbd_getkeycode pckbd_getkeycode
#define kbd_pretranslate pckbd_pretranslate #define kbd_pretranslate pckbd_pretranslate
......
...@@ -64,6 +64,15 @@ ...@@ -64,6 +64,15 @@
#define IA64_PSR_RI_BIT 41 #define IA64_PSR_RI_BIT 41
#define IA64_PSR_ED_BIT 43 #define IA64_PSR_ED_BIT 43
#define IA64_PSR_BN_BIT 44 #define IA64_PSR_BN_BIT 44
#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
execve(). Only list flags here that need to be cleared/set for BOTH clone2() and
execve(). */
#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH)
#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT) #define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT) #define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
......
...@@ -210,6 +210,7 @@ extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg; ...@@ -210,6 +210,7 @@ extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg;
extern ia64_mv_pci_dma_sync_single swiotlb_sync_single; extern ia64_mv_pci_dma_sync_single swiotlb_sync_single;
extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg; extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg;
extern ia64_mv_pci_dma_address swiotlb_dma_address; extern ia64_mv_pci_dma_address swiotlb_dma_address;
extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
/* /*
* Define default versions so we can extend machvec for new platforms without having * Define default versions so we can extend machvec for new platforms without having
......
...@@ -16,6 +16,7 @@ extern ia64_mv_inl_t __ia64_inl; ...@@ -16,6 +16,7 @@ extern ia64_mv_inl_t __ia64_inl;
extern ia64_mv_outb_t __ia64_outb; extern ia64_mv_outb_t __ia64_outb;
extern ia64_mv_outw_t __ia64_outw; extern ia64_mv_outw_t __ia64_outw;
extern ia64_mv_outl_t __ia64_outl; extern ia64_mv_outl_t __ia64_outl;
extern ia64_mv_mmiob_t __ia64_mmiob;
#define MACHVEC_HELPER(name) \ #define MACHVEC_HELPER(name) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \ struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
......
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
#define _ASM_IA64_MMU_CONTEXT_H #define _ASM_IA64_MMU_CONTEXT_H
/* /*
* Copyright (C) 1998-2001 Hewlett-Packard Co * Copyright (C) 1998-2002 Hewlett-Packard Co
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
/* /*
...@@ -13,8 +13,6 @@ ...@@ -13,8 +13,6 @@
* consider the region number when performing a TLB lookup, we need to assign a unique * consider the region number when performing a TLB lookup, we need to assign a unique
* region id to each region in a process. We use the least significant three bits in a * region id to each region in a process. We use the least significant three bits in a
* region id for this purpose. * region id for this purpose.
*
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
...@@ -23,6 +21,8 @@ ...@@ -23,6 +21,8 @@
# ifndef __ASSEMBLY__ # ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/percpu.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -36,6 +36,7 @@ struct ia64_ctx { ...@@ -36,6 +36,7 @@ struct ia64_ctx {
}; };
extern struct ia64_ctx ia64_ctx; extern struct ia64_ctx ia64_ctx;
extern u8 ia64_need_tlb_flush __per_cpu_data;
extern void wrap_mmu_context (struct mm_struct *mm); extern void wrap_mmu_context (struct mm_struct *mm);
...@@ -44,6 +45,23 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) ...@@ -44,6 +45,23 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{ {
} }
/*
* When the context counter wraps around all TLBs need to be flushed because an old
* context number might have been reused. This is signalled by the ia64_need_tlb_flush
* per-CPU variable, which is checked in the routine below. Called by activate_mm().
* <efocht@ess.nec.de>
*/
static inline void
delayed_tlb_flush (void)
{
extern void __flush_tlb_all (void);
if (unlikely(ia64_need_tlb_flush)) {
__flush_tlb_all();
ia64_need_tlb_flush = 0;
}
}
static inline void static inline void
get_new_mmu_context (struct mm_struct *mm) get_new_mmu_context (struct mm_struct *mm)
{ {
...@@ -54,7 +72,6 @@ get_new_mmu_context (struct mm_struct *mm) ...@@ -54,7 +72,6 @@ get_new_mmu_context (struct mm_struct *mm)
mm->context = ia64_ctx.next++; mm->context = ia64_ctx.next++;
} }
spin_unlock(&ia64_ctx.lock); spin_unlock(&ia64_ctx.lock);
} }
static inline void static inline void
...@@ -109,6 +126,8 @@ reload_context (struct mm_struct *mm) ...@@ -109,6 +126,8 @@ reload_context (struct mm_struct *mm)
static inline void static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next) activate_mm (struct mm_struct *prev, struct mm_struct *next)
{ {
delayed_tlb_flush();
/* /*
* We may get interrupts here, but that's OK because interrupt * We may get interrupts here, but that's OK because interrupt
* handlers cannot touch user-space. * handlers cannot touch user-space.
......
#ifndef _ASM_IA64_OFFSETS_H
#define _ASM_IA64_OFFSETS_H
/*
* DO NOT MODIFY
*
* This file was generated by arch/ia64/tools/print_offsets.awk.
*
*/
#define IA64_TASK_SIZE 3952 /* 0xf70 */
#define IA64_THREAD_INFO_SIZE 32 /* 0x20 */
#define IA64_PT_REGS_SIZE 400 /* 0x190 */
#define IA64_SWITCH_STACK_SIZE 560 /* 0x230 */
#define IA64_SIGINFO_SIZE 128 /* 0x80 */
#define IA64_CPU_SIZE 224 /* 0xe0 */
#define SIGFRAME_SIZE 2816 /* 0xb00 */
#define UNW_FRAME_INFO_SIZE 448 /* 0x1c0 */
#define IA64_TASK_THREAD_KSP_OFFSET 1496 /* 0x5d8 */
#define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */
#define IA64_PT_REGS_CR_IIP_OFFSET 8 /* 0x8 */
#define IA64_PT_REGS_CR_IFS_OFFSET 16 /* 0x10 */
#define IA64_PT_REGS_AR_UNAT_OFFSET 24 /* 0x18 */
#define IA64_PT_REGS_AR_PFS_OFFSET 32 /* 0x20 */
#define IA64_PT_REGS_AR_RSC_OFFSET 40 /* 0x28 */
#define IA64_PT_REGS_AR_RNAT_OFFSET 48 /* 0x30 */
#define IA64_PT_REGS_AR_BSPSTORE_OFFSET 56 /* 0x38 */
#define IA64_PT_REGS_PR_OFFSET 64 /* 0x40 */
#define IA64_PT_REGS_B6_OFFSET 72 /* 0x48 */
#define IA64_PT_REGS_LOADRS_OFFSET 80 /* 0x50 */
#define IA64_PT_REGS_R1_OFFSET 88 /* 0x58 */
#define IA64_PT_REGS_R2_OFFSET 96 /* 0x60 */
#define IA64_PT_REGS_R3_OFFSET 104 /* 0x68 */
#define IA64_PT_REGS_R12_OFFSET 112 /* 0x70 */
#define IA64_PT_REGS_R13_OFFSET 120 /* 0x78 */
#define IA64_PT_REGS_R14_OFFSET 128 /* 0x80 */
#define IA64_PT_REGS_R15_OFFSET 136 /* 0x88 */
#define IA64_PT_REGS_R8_OFFSET 144 /* 0x90 */
#define IA64_PT_REGS_R9_OFFSET 152 /* 0x98 */
#define IA64_PT_REGS_R10_OFFSET 160 /* 0xa0 */
#define IA64_PT_REGS_R11_OFFSET 168 /* 0xa8 */
#define IA64_PT_REGS_R16_OFFSET 176 /* 0xb0 */
#define IA64_PT_REGS_R17_OFFSET 184 /* 0xb8 */
#define IA64_PT_REGS_R18_OFFSET 192 /* 0xc0 */
#define IA64_PT_REGS_R19_OFFSET 200 /* 0xc8 */
#define IA64_PT_REGS_R20_OFFSET 208 /* 0xd0 */
#define IA64_PT_REGS_R21_OFFSET 216 /* 0xd8 */
#define IA64_PT_REGS_R22_OFFSET 224 /* 0xe0 */
#define IA64_PT_REGS_R23_OFFSET 232 /* 0xe8 */
#define IA64_PT_REGS_R24_OFFSET 240 /* 0xf0 */
#define IA64_PT_REGS_R25_OFFSET 248 /* 0xf8 */
#define IA64_PT_REGS_R26_OFFSET 256 /* 0x100 */
#define IA64_PT_REGS_R27_OFFSET 264 /* 0x108 */
#define IA64_PT_REGS_R28_OFFSET 272 /* 0x110 */
#define IA64_PT_REGS_R29_OFFSET 280 /* 0x118 */
#define IA64_PT_REGS_R30_OFFSET 288 /* 0x120 */
#define IA64_PT_REGS_R31_OFFSET 296 /* 0x128 */
#define IA64_PT_REGS_AR_CCV_OFFSET 304 /* 0x130 */
#define IA64_PT_REGS_AR_FPSR_OFFSET 312 /* 0x138 */
#define IA64_PT_REGS_B0_OFFSET 320 /* 0x140 */
#define IA64_PT_REGS_B7_OFFSET 328 /* 0x148 */
#define IA64_PT_REGS_F6_OFFSET 336 /* 0x150 */
#define IA64_PT_REGS_F7_OFFSET 352 /* 0x160 */
#define IA64_PT_REGS_F8_OFFSET 368 /* 0x170 */
#define IA64_PT_REGS_F9_OFFSET 384 /* 0x180 */
#define IA64_SWITCH_STACK_CALLER_UNAT_OFFSET 0 /* 0x0 */
#define IA64_SWITCH_STACK_AR_FPSR_OFFSET 8 /* 0x8 */
#define IA64_SWITCH_STACK_F2_OFFSET 16 /* 0x10 */
#define IA64_SWITCH_STACK_F3_OFFSET 32 /* 0x20 */
#define IA64_SWITCH_STACK_F4_OFFSET 48 /* 0x30 */
#define IA64_SWITCH_STACK_F5_OFFSET 64 /* 0x40 */
#define IA64_SWITCH_STACK_F10_OFFSET 80 /* 0x50 */
#define IA64_SWITCH_STACK_F11_OFFSET 96 /* 0x60 */
#define IA64_SWITCH_STACK_F12_OFFSET 112 /* 0x70 */
#define IA64_SWITCH_STACK_F13_OFFSET 128 /* 0x80 */
#define IA64_SWITCH_STACK_F14_OFFSET 144 /* 0x90 */
#define IA64_SWITCH_STACK_F15_OFFSET 160 /* 0xa0 */
#define IA64_SWITCH_STACK_F16_OFFSET 176 /* 0xb0 */
#define IA64_SWITCH_STACK_F17_OFFSET 192 /* 0xc0 */
#define IA64_SWITCH_STACK_F18_OFFSET 208 /* 0xd0 */
#define IA64_SWITCH_STACK_F19_OFFSET 224 /* 0xe0 */
#define IA64_SWITCH_STACK_F20_OFFSET 240 /* 0xf0 */
#define IA64_SWITCH_STACK_F21_OFFSET 256 /* 0x100 */
#define IA64_SWITCH_STACK_F22_OFFSET 272 /* 0x110 */
#define IA64_SWITCH_STACK_F23_OFFSET 288 /* 0x120 */
#define IA64_SWITCH_STACK_F24_OFFSET 304 /* 0x130 */
#define IA64_SWITCH_STACK_F25_OFFSET 320 /* 0x140 */
#define IA64_SWITCH_STACK_F26_OFFSET 336 /* 0x150 */
#define IA64_SWITCH_STACK_F27_OFFSET 352 /* 0x160 */
#define IA64_SWITCH_STACK_F28_OFFSET 368 /* 0x170 */
#define IA64_SWITCH_STACK_F29_OFFSET 384 /* 0x180 */
#define IA64_SWITCH_STACK_F30_OFFSET 400 /* 0x190 */
#define IA64_SWITCH_STACK_F31_OFFSET 416 /* 0x1a0 */
#define IA64_SWITCH_STACK_R4_OFFSET 432 /* 0x1b0 */
#define IA64_SWITCH_STACK_R5_OFFSET 440 /* 0x1b8 */
#define IA64_SWITCH_STACK_R6_OFFSET 448 /* 0x1c0 */
#define IA64_SWITCH_STACK_R7_OFFSET 456 /* 0x1c8 */
#define IA64_SWITCH_STACK_B0_OFFSET 464 /* 0x1d0 */
#define IA64_SWITCH_STACK_B1_OFFSET 472 /* 0x1d8 */
#define IA64_SWITCH_STACK_B2_OFFSET 480 /* 0x1e0 */
#define IA64_SWITCH_STACK_B3_OFFSET 488 /* 0x1e8 */
#define IA64_SWITCH_STACK_B4_OFFSET 496 /* 0x1f0 */
#define IA64_SWITCH_STACK_B5_OFFSET 504 /* 0x1f8 */
#define IA64_SWITCH_STACK_AR_PFS_OFFSET 512 /* 0x200 */
#define IA64_SWITCH_STACK_AR_LC_OFFSET 520 /* 0x208 */
#define IA64_SWITCH_STACK_AR_UNAT_OFFSET 528 /* 0x210 */
#define IA64_SWITCH_STACK_AR_RNAT_OFFSET 536 /* 0x218 */
#define IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET 544 /* 0x220 */
#define IA64_SWITCH_STACK_PR_OFFSET 552 /* 0x228 */
#define IA64_SIGCONTEXT_IP_OFFSET 40 /* 0x28 */
#define IA64_SIGCONTEXT_AR_BSP_OFFSET 72 /* 0x48 */
#define IA64_SIGCONTEXT_AR_FPSR_OFFSET 104 /* 0x68 */
#define IA64_SIGCONTEXT_AR_RNAT_OFFSET 80 /* 0x50 */
#define IA64_SIGCONTEXT_AR_UNAT_OFFSET 96 /* 0x60 */
#define IA64_SIGCONTEXT_B0_OFFSET 136 /* 0x88 */
#define IA64_SIGCONTEXT_CFM_OFFSET 48 /* 0x30 */
#define IA64_SIGCONTEXT_FLAGS_OFFSET 0 /* 0x0 */
#define IA64_SIGCONTEXT_FR6_OFFSET 560 /* 0x230 */
#define IA64_SIGCONTEXT_PR_OFFSET 128 /* 0x80 */
#define IA64_SIGCONTEXT_R12_OFFSET 296 /* 0x128 */
#define IA64_SIGCONTEXT_RBS_BASE_OFFSET 2512 /* 0x9d0 */
#define IA64_SIGCONTEXT_LOADRS_OFFSET 2520 /* 0x9d8 */
#define IA64_SIGFRAME_ARG0_OFFSET 0 /* 0x0 */
#define IA64_SIGFRAME_ARG1_OFFSET 8 /* 0x8 */
#define IA64_SIGFRAME_ARG2_OFFSET 16 /* 0x10 */
#define IA64_SIGFRAME_HANDLER_OFFSET 24 /* 0x18 */
#define IA64_SIGFRAME_SIGCONTEXT_OFFSET 160 /* 0xa0 */
#define IA64_CLONE_VFORK 16384 /* 0x4000 */
#define IA64_CLONE_VM 256 /* 0x100 */
#endif /* _ASM_IA64_OFFSETS_H */
...@@ -90,7 +90,7 @@ pcibios_penalize_isa_irq (int irq) ...@@ -90,7 +90,7 @@ pcibios_penalize_isa_irq (int irq)
/* Return the index of the PCI controller for device PDEV. */ /* Return the index of the PCI controller for device PDEV. */
#define pci_controller_num(PDEV) (0) #define pci_controller_num(PDEV) (0)
#define sg_dma_len(sg) ((sg)->length) #define sg_dma_len(sg) ((sg)->dma_length)
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
......
...@@ -172,9 +172,8 @@ extern int pfm_use_debug_registers(struct task_struct *); ...@@ -172,9 +172,8 @@ extern int pfm_use_debug_registers(struct task_struct *);
extern int pfm_release_debug_registers(struct task_struct *); extern int pfm_release_debug_registers(struct task_struct *);
extern int pfm_cleanup_smpl_buf(struct task_struct *); extern int pfm_cleanup_smpl_buf(struct task_struct *);
extern void pfm_syst_wide_update_task(struct task_struct *, int); extern void pfm_syst_wide_update_task(struct task_struct *, int);
extern void pfm_ovfl_block_reset (void); extern void pfm_ovfl_block_reset(void);
extern void perfmon_init_percpu(void);
extern int pfm_syst_wide;
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -270,12 +270,8 @@ struct thread_struct { ...@@ -270,12 +270,8 @@ struct thread_struct {
#define start_thread(regs,new_ip,new_sp) do { \ #define start_thread(regs,new_ip,new_sp) do { \
set_fs(USER_DS); \ set_fs(USER_DS); \
ia64_psr(regs)->dfh = 1; /* disable fph */ \ regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL | IA64_PSR_SP)) \
ia64_psr(regs)->mfh = 0; /* clear mfh */ \ & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
ia64_psr(regs)->cpl = 3; /* set user mode */ \
ia64_psr(regs)->ri = 0; /* clear return slot number */ \
ia64_psr(regs)->is = 0; /* IA-64 instruction set */ \
ia64_psr(regs)->sp = 1; /* enforce secure perfmon */ \
regs->cr_iip = new_ip; \ regs->cr_iip = new_ip; \
regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
regs->ar_rnat = 0; \ regs->ar_rnat = 0; \
...@@ -284,7 +280,7 @@ struct thread_struct { ...@@ -284,7 +280,7 @@ struct thread_struct {
regs->loadrs = 0; \ regs->loadrs = 0; \
regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \ regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
if (!likely (current->mm->dumpable)) { \ if (unlikely(!current->mm->dumpable)) { \
/* \ /* \
* Zap scratch regs to avoid leaking bits between processes with different \ * Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \ * uid/privileges. \
......
...@@ -7,12 +7,12 @@ ...@@ -7,12 +7,12 @@
*/ */
struct scatterlist { struct scatterlist {
char *orig_address; /* for use by swiotlb */
/* These two are only valid if ADDRESS member of this struct is NULL. */
struct page *page; struct page *page;
unsigned int offset; unsigned int offset;
unsigned int length; /* buffer length */ unsigned int length; /* buffer length */
dma_addr_t dma_address;
unsigned int dma_length;
}; };
#define ISA_DMA_THRESHOLD (~0UL) #define ISA_DMA_THRESHOLD (~0UL)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <linux/compiler.h>
#define __local_bh_enable() do { barrier(); really_local_bh_count()--; } while (0) #define __local_bh_enable() do { barrier(); really_local_bh_count()--; } while (0)
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/percpu.h>
#include <asm/kregs.h> #include <asm/kregs.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -384,7 +385,8 @@ extern void ia64_save_extra (struct task_struct *task); ...@@ -384,7 +385,8 @@ extern void ia64_save_extra (struct task_struct *task);
extern void ia64_load_extra (struct task_struct *task); extern void ia64_load_extra (struct task_struct *task);
#if defined(CONFIG_SMP) && defined(CONFIG_PERFMON) #if defined(CONFIG_SMP) && defined(CONFIG_PERFMON)
# define PERFMON_IS_SYSWIDE() (local_cpu_data->pfm_syst_wide != 0) extern int __per_cpu_data pfm_syst_wide;
# define PERFMON_IS_SYSWIDE() (this_cpu(pfm_syst_wide) != 0)
#else #else
# define PERFMON_IS_SYSWIDE() (0) # define PERFMON_IS_SYSWIDE() (0)
#endif #endif
......
/* XXX fix me! */ #ifndef _ASM_IA64_TLB_H
#define _ASM_IA64_TLB_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* This file was derived from asm-generic/tlb.h.
*/
/*
* Removing a translation from a page table (including TLB-shootdown) is a four-step
* procedure:
*
* (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
* (this is a no-op on ia64).
* (2) Clear the relevant portions of the page-table
* (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
* (4) Release the pages that were freed up in step (2).
*
* Note that the ordering of these steps is crucial to avoid races on MP machines.
*
* The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
* tlb <- tlb_gather_mmu(mm); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
* for each page-table-entry PTE that needs to be removed do {
* tlb_remove_tlb_entry(tlb, pte, address);
* if (pte refers to a normal page) {
* tlb_remove_page(tlb, page);
* }
* }
* tlb_end_vma(tlb, vma);
* }
* }
* tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
*/
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_SMP
# define FREE_PTE_NR 2048
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL)
#else
# define FREE_PTE_NR 0
# define tlb_fast_mode(tlb) (1)
#endif
typedef struct {
struct mm_struct *mm;
unsigned long nr; /* == ~0UL => fast mode */
unsigned long freed; /* number of pages freed */
unsigned long start_addr;
unsigned long end_addr;
struct page *pages[FREE_PTE_NR];
} mmu_gather_t;
/* Users of the generic TLB shootdown code must declare this storage space. */
extern mmu_gather_t mmu_gathers[NR_CPUS];
/*
* Flush the TLB for address range START to END and, if not in fast mode, release the
* freed pages that where gathered up to this point.
*/
static inline void
ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
{
unsigned long nr;
if (unlikely (end - start >= 1024*1024*1024*1024UL
|| rgn_index(start) != rgn_index(end - 1)))
{
/*
* If we flush more than a tera-byte or across regions, we're probably
* better off just flushing the entire TLB(s). This should be very rare
* and is not worth optimizing for.
*/
flush_tlb_all();
} else {
/*
* XXX fix me: flush_tlb_range() should take an mm pointer instead of a
* vma pointer.
*/
struct vm_area_struct vma;
vma.vm_mm = tlb->mm;
/* flush the address range from the tlb: */
flush_tlb_range(&vma, start, end);
/* now flush the virt. page-table area mapping the address range: */
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
}
/* lastly, release the freed pages */
nr = tlb->nr;
if (!tlb_fast_mode(tlb)) {
unsigned long i;
tlb->nr = 0;
tlb->start_addr = ~0UL;
for (i = 0; i < nr; ++i)
free_page_and_swap_cache(tlb->pages[i]);
}
}
/*
* Return a pointer to an initialized mmu_gather_t.
*/
static inline mmu_gather_t *
tlb_gather_mmu (struct mm_struct *mm)
{
mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()];
tlb->mm = mm;
tlb->freed = 0;
tlb->start_addr = ~0UL;
/* Use fast mode if only one CPU is online */
tlb->nr = smp_num_cpus > 1 ? 0UL : ~0UL;
return tlb;
}
/*
* Called at the end of the shootdown operation to free up any resources that were
* collected. The page table lock is still held at this point.
*/
static inline void
tlb_finish_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
{
unsigned long freed = tlb->freed;
struct mm_struct *mm = tlb->mm;
unsigned long rss = mm->rss;
if (rss < freed)
freed = rss;
mm->rss = rss - freed;
/*
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
* tlb->end_addr.
*/
ia64_tlb_flush_mmu(tlb, start, end);
/* keep the page table cache within bounds */
check_pgt_cache();
}
/*
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
* PTE, not just those pointing to (normal) physical memory.
*/
static inline void
tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t pte, unsigned long address)
{
if (tlb->start_addr == ~0UL)
tlb->start_addr = address;
tlb->end_addr = address + PAGE_SIZE;
}
/*
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
* must be delayed until after the TLB has been flushed (see comments at the beginning of
* this file).
*/
static inline void
tlb_remove_page (mmu_gather_t *tlb, struct page *page)
{
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
return;
}
tlb->pages[tlb->nr++] = page;
if (tlb->nr >= FREE_PTE_NR)
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}
#define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h> #endif /* _ASM_IA64_TLB_H */
...@@ -70,12 +70,10 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) ...@@ -70,12 +70,10 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
static inline void static inline void
flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end) flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
{ {
struct vm_area_struct vma; /*
* Deprecated. The virtual page table is now flushed via the normal gather/flush
if (REGION_NUMBER(start) != REGION_NUMBER(end)) * interface (see tlb.h).
printk("flush_tlb_pgtables: can't flush across regions!!\n"); */
vma.vm_mm = mm;
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
} }
#define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
......
...@@ -223,6 +223,10 @@ ...@@ -223,6 +223,10 @@
#define __NR_sched_setaffinity 1231 #define __NR_sched_setaffinity 1231
#define __NR_sched_getaffinity 1232 #define __NR_sched_getaffinity 1232
#define __NR_security 1233 #define __NR_security 1233
#define __NR_get_large_pages 1234
#define __NR_free_large_pages 1235
#define __NR_share_large_pages 1236
#define __NR_unshare_large_pages 1237
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment