Commit 53405a4e authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5_ppc64

into samba.org:/scratch/anton/linux-2.5_work
parents 2ebd1fbd da5740e9
...@@ -247,6 +247,8 @@ ChangeLog ...@@ -247,6 +247,8 @@ ChangeLog
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog. Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.0.25:
- Minor bugfixes in error code paths and small cleanups.
2.0.24: 2.0.24:
- Small internal cleanups. - Small internal cleanups.
- Support for sendfile system call. (Christoph Hellwig) - Support for sendfile system call. (Christoph Hellwig)
......
...@@ -1005,9 +1005,14 @@ delay: ...@@ -1005,9 +1005,14 @@ delay:
ret ret
# Descriptor tables # Descriptor tables
#
# NOTE: if you think the GDT is large, you can make it smaller by just
# defining the KERNEL_CS and KERNEL_DS entries and shifting the gdt
# address down by GDT_ENTRY_KERNEL_CS*8. This puts bogus entries into
# the GDT, but those wont be used so it's not a problem.
#
gdt: gdt:
.word 0, 0, 0, 0 # dummy .fill GDT_ENTRY_KERNEL_CS,8,0
.word 0, 0, 0, 0 # unused
.word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb) .word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb)
.word 0 # base address = 0 .word 0 # base address = 0
......
...@@ -423,6 +423,7 @@ void __init cpu_init (void) ...@@ -423,6 +423,7 @@ void __init cpu_init (void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tss_struct * t = init_tss + cpu; struct tss_struct * t = init_tss + cpu;
struct thread_struct *thread = &current->thread;
if (test_and_set_bit(cpu, &cpu_initialized)) { if (test_and_set_bit(cpu, &cpu_initialized)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
...@@ -447,9 +448,13 @@ void __init cpu_init (void) ...@@ -447,9 +448,13 @@ void __init cpu_init (void)
*/ */
if (cpu) { if (cpu) {
memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE); memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
cpu_gdt_descr[cpu].size = GDT_SIZE; cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu]; cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
} }
/*
* Set up the per-thread TLS descriptor cache:
*/
memcpy(thread->tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_MAX * 8);
__asm__ __volatile__("lgdt %0": "=m" (cpu_gdt_descr[cpu])); __asm__ __volatile__("lgdt %0": "=m" (cpu_gdt_descr[cpu]));
__asm__ __volatile__("lidt %0": "=m" (idt_descr)); __asm__ __volatile__("lidt %0": "=m" (idt_descr));
...@@ -468,9 +473,9 @@ void __init cpu_init (void) ...@@ -468,9 +473,9 @@ void __init cpu_init (void)
BUG(); BUG();
enter_lazy_tlb(&init_mm, current, cpu); enter_lazy_tlb(&init_mm, current, cpu);
t->esp0 = current->thread.esp0; t->esp0 = thread->esp0;
set_tss_desc(cpu,t); set_tss_desc(cpu,t);
cpu_gdt_table[cpu][TSS_ENTRY].b &= 0xfffffdff; cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
load_TR_desc(); load_TR_desc();
load_LDT(&init_mm.context); load_LDT(&init_mm.context);
......
...@@ -753,6 +753,7 @@ ENTRY(sys_call_table) ...@@ -753,6 +753,7 @@ ENTRY(sys_call_table)
.long sys_sched_setaffinity .long sys_sched_setaffinity
.long sys_sched_getaffinity .long sys_sched_getaffinity
.long sys_set_thread_area .long sys_set_thread_area
.long sys_get_thread_area
.rept NR_syscalls-(.-sys_call_table)/4 .rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall .long sys_ni_syscall
......
...@@ -239,12 +239,7 @@ is386: movl $2,%ecx # set MP ...@@ -239,12 +239,7 @@ is386: movl $2,%ecx # set MP
movl %eax,%es movl %eax,%es
movl %eax,%fs movl %eax,%fs
movl %eax,%gs movl %eax,%gs
#ifdef CONFIG_SMP movl %eax,%ss
movl $(__KERNEL_DS), %eax
movl %eax,%ss # Reload the stack pointer (segment only)
#else
lss stack_start,%esp # Load processor stack
#endif
xorl %eax,%eax xorl %eax,%eax
lldt %ax lldt %ax
cld # gcc2 wants the direction flag cleared at all times cld # gcc2 wants the direction flag cleared at all times
...@@ -412,34 +407,40 @@ ENTRY(_stext) ...@@ -412,34 +407,40 @@ ENTRY(_stext)
ALIGN ALIGN
/* /*
* The Global Descriptor Table contains 20 quadwords, per-CPU. * The Global Descriptor Table contains 28 quadwords, per-CPU.
*/ */
ENTRY(cpu_gdt_table) ENTRY(cpu_gdt_table)
.quad 0x0000000000000000 /* NULL descriptor */ .quad 0x0000000000000000 /* NULL descriptor */
.quad 0x0000000000000000 /* TLS descriptor */ .quad 0x0000000000000000 /* 0x0b reserved */
.quad 0x00cf9a000000ffff /* 0x10 kernel 4GB code at 0x00000000 */ .quad 0x0000000000000000 /* 0x13 reserved */
.quad 0x00cf92000000ffff /* 0x18 kernel 4GB data at 0x00000000 */ .quad 0x0000000000000000 /* 0x1b reserved */
.quad 0x00cffa000000ffff /* 0x23 user 4GB code at 0x00000000 */ .quad 0x00cffa000000ffff /* 0x23 user 4GB code at 0x00000000 */
.quad 0x00cff2000000ffff /* 0x2b user 4GB data at 0x00000000 */ .quad 0x00cff2000000ffff /* 0x2b user 4GB data at 0x00000000 */
.quad 0x0000000000000000 /* TSS descriptor */ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
.quad 0x0000000000000000 /* LDT descriptor */ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
.quad 0x0000000000000000 /* 0x43 TLS entry 3 */
.quad 0x0000000000000000 /* 0x4b reserved */
.quad 0x0000000000000000 /* 0x53 reserved */
.quad 0x0000000000000000 /* 0x5b reserved */
.quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
.quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
.quad 0x0000000000000000 /* 0x70 TSS descriptor */
.quad 0x0000000000000000 /* 0x78 LDT descriptor */
/* Segments used for calling PnP BIOS */
.quad 0x00c09a0000000000 /* 0x80 32-bit code */
.quad 0x00809a0000000000 /* 0x88 16-bit code */
.quad 0x0080920000000000 /* 0x90 16-bit data */
.quad 0x0080920000000000 /* 0x98 16-bit data */
.quad 0x0080920000000000 /* 0xa0 16-bit data */
/* /*
* The APM segments have byte granularity and their bases * The APM segments have byte granularity and their bases
* and limits are set at run time. * and limits are set at run time.
*/ */
.quad 0x0040920000000000 /* 0x40 APM set up for bad BIOS's */ .quad 0x00409a0000000000 /* 0xa8 APM CS code */
.quad 0x00409a0000000000 /* 0x48 APM CS code */ .quad 0x00009a0000000000 /* 0xb0 APM CS 16 code (16 bit) */
.quad 0x00009a0000000000 /* 0x50 APM CS 16 code (16 bit) */ .quad 0x0040920000000000 /* 0xb8 APM DS data */
.quad 0x0040920000000000 /* 0x58 APM DS data */
/* Segments used for calling PnP BIOS */
.quad 0x00c09a0000000000 /* 0x60 32-bit code */
.quad 0x00809a0000000000 /* 0x68 16-bit code */
.quad 0x0080920000000000 /* 0x70 16-bit data */
.quad 0x0080920000000000 /* 0x78 16-bit data */
.quad 0x0080920000000000 /* 0x80 16-bit data */
.quad 0x0000000000000000 /* 0x88 not used */
.quad 0x0000000000000000 /* 0x90 not used */
.quad 0x0000000000000000 /* 0x98 not used */
#if CONFIG_SMP #if CONFIG_SMP
.fill (NR_CPUS-1)*GDT_ENTRIES,8,0 /* other CPU's GDT */ .fill (NR_CPUS-1)*GDT_ENTRIES,8,0 /* other CPU's GDT */
......
...@@ -170,7 +170,7 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode) ...@@ -170,7 +170,7 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
struct mm_struct * mm = current->mm; struct mm_struct * mm = current->mm;
__u32 entry_1, entry_2, *lp; __u32 entry_1, entry_2, *lp;
int error; int error;
struct modify_ldt_ldt_s ldt_info; struct user_desc ldt_info;
error = -EINVAL; error = -EINVAL;
if (bytecount != sizeof(ldt_info)) if (bytecount != sizeof(ldt_info))
...@@ -200,32 +200,17 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode) ...@@ -200,32 +200,17 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
/* Allow LDTs to be cleared by the user. */ /* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
if (oldmode || if (oldmode || LDT_empty(&ldt_info)) {
(ldt_info.contents == 0 &&
ldt_info.read_exec_only == 1 &&
ldt_info.seg_32bit == 0 &&
ldt_info.limit_in_pages == 0 &&
ldt_info.seg_not_present == 1 &&
ldt_info.useable == 0 )) {
entry_1 = 0; entry_1 = 0;
entry_2 = 0; entry_2 = 0;
goto install; goto install;
} }
} }
entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | entry_1 = LDT_entry_a(&ldt_info);
(ldt_info.limit & 0x0ffff); entry_2 = LDT_entry_b(&ldt_info);
entry_2 = (ldt_info.base_addr & 0xff000000) | if (oldmode)
((ldt_info.base_addr & 0x00ff0000) >> 16) | entry_2 &= ~(1 << 20);
(ldt_info.limit & 0xf0000) |
((ldt_info.read_exec_only ^ 1) << 9) |
(ldt_info.contents << 10) |
((ldt_info.seg_not_present ^ 1) << 15) |
(ldt_info.seg_32bit << 22) |
(ldt_info.limit_in_pages << 23) |
0x7000;
if (!oldmode)
entry_2 |= (ldt_info.useable << 20);
/* Install the new entry ... */ /* Install the new entry ... */
install: install:
......
...@@ -681,11 +681,8 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -681,11 +681,8 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* /*
* Load the per-thread Thread-Local Storage descriptor. * Load the per-thread Thread-Local Storage descriptor.
*
* NOTE: it's faster to do the two stores unconditionally
* than to branch away.
*/ */
load_TLS_desc(next, cpu); load_TLS(next, cpu);
/* /*
* Save away %fs and %gs. No need to save %es and %ds, as * Save away %fs and %gs. No need to save %es and %ds, as
...@@ -834,35 +831,114 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -834,35 +831,114 @@ unsigned long get_wchan(struct task_struct *p)
#undef first_sched #undef first_sched
/* /*
* Set the Thread-Local Storage area: * sys_alloc_thread_area: get a yet unused TLS descriptor index.
*/ */
asmlinkage int sys_set_thread_area(unsigned long base, unsigned long flags) static int get_free_idx(void)
{ {
struct thread_struct *t = &current->thread; struct thread_struct *t = &current->thread;
int writable = 0; int idx;
int cpu;
/* do not allow unused flags */ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
if (flags & ~TLS_FLAGS_MASK) if (desc_empty(t->tls_array + idx))
return idx + GDT_ENTRY_TLS_MIN;
return -ESRCH;
}
/*
* Set a given TLS descriptor:
*/
asmlinkage int sys_set_thread_area(struct user_desc *u_info)
{
struct thread_struct *t = &current->thread;
struct user_desc info;
struct desc_struct *desc;
int cpu, idx;
if (copy_from_user(&info, u_info, sizeof(info)))
return -EFAULT;
idx = info.entry_number;
/*
* index -1 means the kernel should try to find and
* allocate an empty descriptor:
*/
if (idx == -1) {
idx = get_free_idx();
if (idx < 0)
return idx;
if (put_user(idx, &u_info->entry_number))
return -EFAULT;
}
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL; return -EINVAL;
if (flags & TLS_FLAG_WRITABLE) desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
writable = 1;
/* /*
* We must not get preempted while modifying the TLS. * We must not get preempted while modifying the TLS.
*/ */
cpu = get_cpu(); cpu = get_cpu();
t->tls_desc.a = ((base & 0x0000ffff) << 16) | 0xffff; if (LDT_empty(&info)) {
desc->a = 0;
t->tls_desc.b = (base & 0xff000000) | ((base & 0x00ff0000) >> 16) | desc->b = 0;
0xf0000 | (writable << 9) | (1 << 15) | } else {
(1 << 22) | (1 << 23) | 0x7000; desc->a = LDT_entry_a(&info);
desc->b = LDT_entry_b(&info);
}
load_TLS(t, cpu);
load_TLS_desc(t, cpu);
put_cpu(); put_cpu();
return TLS_ENTRY*8 + 3; return 0;
}
/*
* Get the current Thread-Local Storage area:
*/
#define GET_BASE(desc) ( \
(((desc)->a >> 16) & 0x0000ffff) | \
(((desc)->b << 16) & 0x00ff0000) | \
( (desc)->b & 0xff000000) )
#define GET_LIMIT(desc) ( \
((desc)->a & 0x0ffff) | \
((desc)->b & 0xf0000) )
#define GET_32BIT(desc) (((desc)->b >> 23) & 1)
#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
asmlinkage int sys_get_thread_area(struct user_desc *u_info)
{
struct user_desc info;
struct desc_struct *desc;
int idx;
if (get_user(idx, &u_info->entry_number))
return -EFAULT;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
info.entry_number = idx;
info.base_addr = GET_BASE(desc);
info.limit = GET_LIMIT(desc);
info.seg_32bit = GET_32BIT(desc);
info.contents = GET_CONTENTS(desc);
info.read_exec_only = !GET_WRITABLE(desc);
info.limit_in_pages = GET_LIMIT_PAGES(desc);
info.seg_not_present = !GET_PRESENT(desc);
info.useable = GET_USEABLE(desc);
if (copy_to_user(u_info, &info, sizeof(info)))
return -EFAULT;
return 0;
} }
...@@ -207,7 +207,7 @@ void fix_processor_context(void) ...@@ -207,7 +207,7 @@ void fix_processor_context(void)
struct tss_struct * t = init_tss + cpu; struct tss_struct * t = init_tss + cpu;
set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy tsc or some similar stupidity. */ set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy tsc or some similar stupidity. */
cpu_gdt_table[cpu][TSS_ENTRY].b &= 0xfffffdff; cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
load_TR_desc(); /* This does ltr */ load_TR_desc(); /* This does ltr */
load_LDT(&current->mm->context); /* This does lldt */ load_LDT(&current->mm->context); /* This does lldt */
......
...@@ -13,7 +13,7 @@ AWK := awk ...@@ -13,7 +13,7 @@ AWK := awk
export AWK export AWK
OBJCOPYFLAGS := --strip-all OBJCOPYFLAGS := --strip-all
LDFLAGS_vmlinux := -static -T arch/$(ARCH)/vmlinux.lds LDFLAGS_vmlinux := -static -T arch/$(ARCH)/vmlinux.lds
AFLAGS_KERNEL := -mconstant-gp AFLAGS_KERNEL := -mconstant-gp
EXTRA = EXTRA =
...@@ -26,7 +26,7 @@ CFLAGS_KERNEL := -mconstant-gp ...@@ -26,7 +26,7 @@ CFLAGS_KERNEL := -mconstant-gp
GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.') GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
ifneq ($(GCC_VERSION),2) ifneq ($(GCC_VERSION),2)
CFLAGS += -frename-registers --param max-inline-insns=2000 CFLAGS += -frename-registers --param max-inline-insns=5000
endif endif
ifeq ($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y) ifeq ($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y)
......
...@@ -12,8 +12,10 @@ LINKFLAGS = -static -T bootloader.lds ...@@ -12,8 +12,10 @@ LINKFLAGS = -static -T bootloader.lds
OBJECTS = bootloader.o OBJECTS = bootloader.o
targets-$(CONFIG_IA64_HP_SIM) += bootloader targets-$(CONFIG_IA64_HP_SIM) += bootloader
targets-$(CONFIG_IA64_GENERIC) += bootloader targets-$(CONFIG_IA64_GENERIC) += bootloader
CFLAGS := $(CFLAGS) $(CFLAGS_KERNEL)
all: $(targets-y) all: $(targets-y)
......
...@@ -64,12 +64,13 @@ if [ "$CONFIG_MCKINLEY" = "y" ]; then ...@@ -64,12 +64,13 @@ if [ "$CONFIG_MCKINLEY" = "y" ]; then
fi fi
fi fi
if [ "$CONFIG_IA64_GENERIC" = "y" -o "$CONFIG_IA64_DIG" = "y" -o "$CONFIG_IA64_HP_ZX1" = "y" ]; then if [ "$CONFIG_IA64_GENERIC" = "y" -o "$CONFIG_IA64_DIG" = "y" -o "$CONFIG_IA64_HP_ZX1" = "y" ];
then
bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA
define_bool CONFIG_PM y define_bool CONFIG_PM y
fi fi
if [ "$CONFIG_IA64_SGI_SN1" = "y" -o "$CONFIG_IA64_SGI_SN2" = "y" ]; then if [ "$CONFIG_IA64_SGI_SN1" = "y" -o "$CONFIG_IA64_SGI_SN2" = "y" ]; then
define_bool CONFIG_IA64_SGI_SN y define_bool CONFIG_IA64_SGI_SN y
bool ' Enable extra debugging code' CONFIG_IA64_SGI_SN_DEBUG bool ' Enable extra debugging code' CONFIG_IA64_SGI_SN_DEBUG
bool ' Enable SGI Medusa Simulator Support' CONFIG_IA64_SGI_SN_SIM bool ' Enable SGI Medusa Simulator Support' CONFIG_IA64_SGI_SN_SIM
...@@ -99,21 +100,20 @@ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF ...@@ -99,21 +100,20 @@ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/acpi/Config.in
source drivers/acpi/Config.in bool 'PCI support' CONFIG_PCI
source drivers/pci/Config.in
bool 'PCI support' CONFIG_PCI bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
source drivers/pci/Config.in if [ "$CONFIG_HOTPLUG" = "y" ]; then
source drivers/hotplug/Config.in
bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG source drivers/pcmcia/Config.in
if [ "$CONFIG_HOTPLUG" = "y" ]; then else
source drivers/pcmcia/Config.in define_bool CONFIG_PCMCIA n
else fi
define_bool CONFIG_PCMCIA n
fi
source drivers/parport/Config.in
source drivers/parport/Config.in
fi # !HP_SIM fi # !HP_SIM
endmenu endmenu
...@@ -123,39 +123,26 @@ if [ "$CONFIG_NET" = "y" ]; then ...@@ -123,39 +123,26 @@ if [ "$CONFIG_NET" = "y" ]; then
fi fi
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/mtd/Config.in
source drivers/pnp/Config.in
source drivers/block/Config.in
source drivers/ieee1394/Config.in
source drivers/message/i2o/Config.in
source drivers/md/Config.in
source drivers/message/fusion/Config.in
source drivers/mtd/Config.in mainmenu_option next_comment
source drivers/pnp/Config.in comment 'ATA/ATAPI/MFM/RLL support'
source drivers/block/Config.in
source drivers/ieee1394/Config.in
source drivers/message/i2o/Config.in
source drivers/md/Config.in
source drivers/message/fusion/Config.in
mainmenu_option next_comment
comment 'ATA/ATAPI/MFM/RLL support'
tristate 'ATA/ATAPI/MFM/RLL support' CONFIG_IDE
if [ "$CONFIG_IDE" != "n" ]; then
source drivers/ide/Config.in
else
define_bool CONFIG_BLK_DEV_HD n
fi
endmenu
else # ! HP_SIM tristate 'ATA/ATAPI/MFM/RLL support' CONFIG_IDE
mainmenu_option next_comment
comment 'Block devices'
tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
tristate 'RAM disk support' CONFIG_BLK_DEV_RAM if [ "$CONFIG_IDE" != "n" ]; then
if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then source drivers/ide/Config.in
int ' Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096 else
define_bool CONFIG_BLK_DEV_HD n
fi
endmenu
fi fi
endmenu
fi # !HP_SIM
mainmenu_option next_comment mainmenu_option next_comment
comment 'SCSI support' comment 'SCSI support'
...@@ -168,80 +155,83 @@ fi ...@@ -168,80 +155,83 @@ fi
endmenu endmenu
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment
comment 'Network device support'
bool 'Network device support' CONFIG_NETDEVICES
if [ "$CONFIG_NETDEVICES" = "y" ]; then
source drivers/net/Config.in
fi
endmenu
fi
source net/ax25/Config.in
source drivers/isdn/Config.in
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment mainmenu_option next_comment
comment 'Network device support' comment 'CD-ROM drivers (not for SCSI or IDE/ATAPI drives)'
bool 'Network device support' CONFIG_NETDEVICES bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
if [ "$CONFIG_NETDEVICES" = "y" ]; then if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then
source drivers/net/Config.in source drivers/cdrom/Config.in
fi fi
endmenu endmenu
fi
source net/ax25/Config.in #
# input before char - char/joystick depends on it. As does USB.
#
source drivers/input/Config.in
source drivers/char/Config.in
source drivers/isdn/Config.in #source drivers/misc/Config.in
mainmenu_option next_comment source drivers/media/Config.in
comment 'CD-ROM drivers (not for SCSI or IDE/ATAPI drives)' else # HP_SIM
bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then
source drivers/cdrom/Config.in
fi
endmenu
fi # !HP_SIM
#
# input before char - char/joystick depends on it. As does USB.
#
source drivers/input/Config.in
source drivers/char/Config.in
#source drivers/misc/Config.in
source drivers/media/Config.in
source fs/Config.in
if [ "$CONFIG_VT" = "y" ]; then
mainmenu_option next_comment mainmenu_option next_comment
comment 'Console drivers' comment 'Block devices'
bool 'VGA text console' CONFIG_VGA_CONSOLE tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
source drivers/video/Config.in dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
if [ "$CONFIG_FB" = "y" ]; then
define_bool CONFIG_PCI_CONSOLE y tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then
int ' Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096
fi fi
endmenu endmenu
fi fi # HP_SIM
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
mainmenu_option next_comment source fs/Config.in
comment 'Sound'
tristate 'Sound card support' CONFIG_SOUND
if [ "$CONFIG_SOUND" != "n" ]; then
source sound/Config.in
fi
endmenu
source drivers/usb/Config.in if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
if [ "$CONFIG_VT" = "y" ]; then
mainmenu_option next_comment
comment 'Console drivers'
bool 'VGA text console' CONFIG_VGA_CONSOLE
source drivers/video/Config.in
if [ "$CONFIG_FB" = "y" ]; then
define_bool CONFIG_PCI_CONSOLE y
fi
endmenu
fi
source lib/Config.in mainmenu_option next_comment
comment 'Sound'
source net/bluetooth/Config.in tristate 'Sound card support' CONFIG_SOUND
if [ "$CONFIG_SOUND" != "n" ]; then
source sound/Config.in
fi
endmenu
source drivers/usb/Config.in
source lib/Config.in
source net/bluetooth/Config.in
fi # !HP_SIM fi # !HP_SIM
if [ "$CONFIG_IA64_HP_SIM" != "n" -o "$CONFIG_IA64_GENERIC" != "n" ]; then if [ "$CONFIG_IA64_HP_SIM" != "n" -o "$CONFIG_IA64_GENERIC" != "n" ]; then
source arch/ia64/hp/Config.in source arch/ia64/hp/sim/Config.in
fi fi
mainmenu_option next_comment mainmenu_option next_comment
comment 'Kernel hacking' comment 'Kernel hacking'
...@@ -255,7 +245,14 @@ if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then ...@@ -255,7 +245,14 @@ if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
bool ' Disable VHPT' CONFIG_DISABLE_VHPT bool ' Disable VHPT' CONFIG_DISABLE_VHPT
bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
bool ' Early printk support (requires VGA!)' CONFIG_IA64_EARLY_PRINTK bool ' Early printk support' CONFIG_IA64_EARLY_PRINTK
if [ "$CONFIG_IA64_EARLY_PRINTK" != "n" ]; then
bool ' Early printk on MMIO serial port' CONFIG_IA64_EARLY_PRINTK_UART
if [ "$CONFIG_IA64_EARLY_PRINTK_UART" != "n" ]; then
hex ' UART MMIO base address' CONFIG_IA64_EARLY_PRINTK_UART_BASE ff5e0000
fi
bool ' Early printk on VGA' CONFIG_IA64_EARLY_PRINTK_VGA
fi
bool ' Debug memory allocations' CONFIG_DEBUG_SLAB bool ' Debug memory allocations' CONFIG_DEBUG_SLAB
bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
bool ' Turn on compare-and-exchange bug checking (slow!)' CONFIG_IA64_DEBUG_CMPXCHG bool ' Turn on compare-and-exchange bug checking (slow!)' CONFIG_IA64_DEBUG_CMPXCHG
......
This diff is collapsed.
...@@ -30,12 +30,12 @@ static void simcons_write (struct console *, const char *, unsigned); ...@@ -30,12 +30,12 @@ static void simcons_write (struct console *, const char *, unsigned);
static kdev_t simcons_console_device (struct console *); static kdev_t simcons_console_device (struct console *);
struct console hpsim_cons = { struct console hpsim_cons = {
name: "simcons", .name = "simcons",
write: simcons_write, .write = simcons_write,
device: simcons_console_device, .device = simcons_console_device,
setup: simcons_init, .setup = simcons_init,
flags: CON_PRINTBUFFER, .flags = CON_PRINTBUFFER,
index: -1, .index = -1,
}; };
static int static int
......
...@@ -22,14 +22,14 @@ hpsim_irq_noop (unsigned int irq) ...@@ -22,14 +22,14 @@ hpsim_irq_noop (unsigned int irq)
} }
static struct hw_interrupt_type irq_type_hp_sim = { static struct hw_interrupt_type irq_type_hp_sim = {
typename: "hpsim", .typename = "hpsim",
startup: hpsim_irq_startup, .startup = hpsim_irq_startup,
shutdown: hpsim_irq_noop, .shutdown = hpsim_irq_noop,
enable: hpsim_irq_noop, .enable = hpsim_irq_noop,
disable: hpsim_irq_noop, .disable = hpsim_irq_noop,
ack: hpsim_irq_noop, .ack = hpsim_irq_noop,
end: hpsim_irq_noop, .end = hpsim_irq_noop,
set_affinity: (void (*)(unsigned int, unsigned long)) hpsim_irq_noop, .set_affinity = (void (*)(unsigned int, unsigned long)) hpsim_irq_noop,
}; };
void __init void __init
......
/* /*
* Platform dependent support for HP simulator. * Platform dependent support for HP simulator.
* *
* Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com> * Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/ */
#include <linux/console.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/major.h>
#include <linux/param.h> #include <linux/param.h>
#include <linux/root_dev.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/console.h>
#include <linux/root_dev.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -55,5 +56,5 @@ hpsim_setup (char **cmdline_p) ...@@ -55,5 +56,5 @@ hpsim_setup (char **cmdline_p)
{ {
ROOT_DEV = Root_SDA1; /* default to first SCSI drive */ ROOT_DEV = Root_SDA1; /* default to first SCSI drive */
register_console (&hpsim_cons); register_console(&hpsim_cons);
} }
...@@ -62,7 +62,9 @@ struct disk_stat { ...@@ -62,7 +62,9 @@ struct disk_stat {
extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr); extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
static int desc[8] = { -1, -1, -1, -1, -1, -1, -1, -1 }; static int desc[16] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
static struct queue_entry { static struct queue_entry {
Scsi_Cmnd *sc; Scsi_Cmnd *sc;
...@@ -148,9 +150,9 @@ simscsi_biosparam (Disk *disk, struct block_device *n, int ip[]) ...@@ -148,9 +150,9 @@ simscsi_biosparam (Disk *disk, struct block_device *n, int ip[])
{ {
int size = disk->capacity; int size = disk->capacity;
ip[0] = 64; ip[0] = 64; /* heads */
ip[1] = 32; ip[1] = 32; /* sectors */
ip[2] = size >> 11; ip[2] = size >> 11; /* cylinders */
return 0; return 0;
} }
...@@ -229,6 +231,29 @@ simscsi_readwrite6 (Scsi_Cmnd *sc, int mode) ...@@ -229,6 +231,29 @@ simscsi_readwrite6 (Scsi_Cmnd *sc, int mode)
simscsi_readwrite(sc, mode, offset, sc->cmnd[4]*512); simscsi_readwrite(sc, mode, offset, sc->cmnd[4]*512);
} }
static size_t
simscsi_get_disk_size (int fd)
{
struct disk_stat stat;
size_t bit, sectors = 0;
struct disk_req req;
char buf[512];
/*
* This is a bit kludgey: the simulator doesn't provide a direct way of determining
* the disk size, so we do a binary search, assuming a maximum disk size of 4GB.
*/
for (bit = (4UL << 30)/512; bit != 0; bit >>= 1) {
req.addr = __pa(&buf);
req.len = sizeof(buf);
ia64_ssc(fd, 1, __pa(&req), ((sectors | bit) - 1)*512, SSC_READ);
stat.fd = fd;
ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION);
if (stat.count == sizeof(buf))
sectors |= bit;
}
return sectors - 1; /* return last valid sector number */
}
static void static void
simscsi_readwrite10 (Scsi_Cmnd *sc, int mode) simscsi_readwrite10 (Scsi_Cmnd *sc, int mode)
...@@ -247,6 +272,7 @@ int ...@@ -247,6 +272,7 @@ int
simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *)) simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
{ {
char fname[MAX_ROOT_LEN+16]; char fname[MAX_ROOT_LEN+16];
size_t disk_size;
char *buf; char *buf;
#if DEBUG_SIMSCSI #if DEBUG_SIMSCSI
register long sp asm ("sp"); register long sp asm ("sp");
...@@ -258,15 +284,15 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *)) ...@@ -258,15 +284,15 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
sc->result = DID_BAD_TARGET << 16; sc->result = DID_BAD_TARGET << 16;
sc->scsi_done = done; sc->scsi_done = done;
if (sc->target <= 7 && sc->lun == 0) { if (sc->target <= 15 && sc->lun == 0) {
switch (sc->cmnd[0]) { switch (sc->cmnd[0]) {
case INQUIRY: case INQUIRY:
if (sc->request_bufflen < 35) { if (sc->request_bufflen < 35) {
break; break;
} }
sprintf (fname, "%s%c", simscsi_root, 'a' + sc->target); sprintf (fname, "%s%c", simscsi_root, 'a' + sc->target);
desc[sc->target] = ia64_ssc (__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS, desc[sc->target] = ia64_ssc(__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS,
0, 0, SSC_OPEN); 0, 0, SSC_OPEN);
if (desc[sc->target] < 0) { if (desc[sc->target] < 0) {
/* disk doesn't exist... */ /* disk doesn't exist... */
break; break;
...@@ -319,11 +345,13 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *)) ...@@ -319,11 +345,13 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
} }
buf = sc->request_buffer; buf = sc->request_buffer;
disk_size = simscsi_get_disk_size(desc[sc->target]);
/* pretend to be a 1GB disk (partition table contains real stuff): */ /* pretend to be a 1GB disk (partition table contains real stuff): */
buf[0] = 0x00; buf[0] = (disk_size >> 24) & 0xff;
buf[1] = 0x1f; buf[1] = (disk_size >> 16) & 0xff;
buf[2] = 0xff; buf[2] = (disk_size >> 8) & 0xff;
buf[3] = 0xff; buf[3] = (disk_size >> 0) & 0xff;
/* set block size of 512 bytes: */ /* set block size of 512 bytes: */
buf[4] = 0; buf[4] = 0;
buf[5] = 0; buf[5] = 0;
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
* *
* 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close(). * 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close().
* 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c. * 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c.
* 07/30/02 D. Mosberger Replace sti()/cli() with explicit spinlocks & local irq masking
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -31,6 +32,7 @@ ...@@ -31,6 +32,7 @@
#include <linux/serialP.h> #include <linux/serialP.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#ifdef CONFIG_KDB #ifdef CONFIG_KDB
...@@ -61,6 +63,7 @@ extern void ia64_ssc_connect_irq (long intr, long irq); ...@@ -61,6 +63,7 @@ extern void ia64_ssc_connect_irq (long intr, long irq);
static char *serial_name = "SimSerial driver"; static char *serial_name = "SimSerial driver";
static char *serial_version = "0.6"; static char *serial_version = "0.6";
static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
/* /*
* This has been extracted from asm/serial.h. We need one eventually but * This has been extracted from asm/serial.h. We need one eventually but
...@@ -232,14 +235,14 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch) ...@@ -232,14 +235,14 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch)
if (!tty || !info->xmit.buf) return; if (!tty || !info->xmit.buf) return;
save_flags(flags); cli(); spin_lock_irqsave(&serial_lock, flags);
if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) { if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) {
restore_flags(flags); spin_unlock_irqrestore(&serial_lock, flags);
return; return;
} }
info->xmit.buf[info->xmit.head] = ch; info->xmit.buf[info->xmit.head] = ch;
info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1); info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1);
restore_flags(flags); spin_unlock_irqrestore(&serial_lock, flags);
} }
static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done) static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
...@@ -247,7 +250,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done) ...@@ -247,7 +250,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
int count; int count;
unsigned long flags; unsigned long flags;
save_flags(flags); cli(); spin_lock_irqsave(&serial_lock, flags);
if (info->x_char) { if (info->x_char) {
char c = info->x_char; char c = info->x_char;
...@@ -290,7 +293,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done) ...@@ -290,7 +293,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
info->xmit.tail += count; info->xmit.tail += count;
} }
out: out:
restore_flags(flags); spin_unlock_irqrestore(&serial_lock, flags);
} }
static void rs_flush_chars(struct tty_struct *tty) static void rs_flush_chars(struct tty_struct *tty)
...@@ -314,7 +317,6 @@ static int rs_write(struct tty_struct * tty, int from_user, ...@@ -314,7 +317,6 @@ static int rs_write(struct tty_struct * tty, int from_user,
if (!tty || !info->xmit.buf || !tmp_buf) return 0; if (!tty || !info->xmit.buf || !tmp_buf) return 0;
save_flags(flags);
if (from_user) { if (from_user) {
down(&tmp_buf_sem); down(&tmp_buf_sem);
while (1) { while (1) {
...@@ -331,21 +333,26 @@ static int rs_write(struct tty_struct * tty, int from_user, ...@@ -331,21 +333,26 @@ static int rs_write(struct tty_struct * tty, int from_user,
ret = -EFAULT; ret = -EFAULT;
break; break;
} }
cli();
c1 = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); spin_lock_irqsave(&serial_lock, flags);
if (c1 < c) {
c = c1; c1 = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail,
memcpy(info->xmit.buf + info->xmit.head, tmp_buf, c); SERIAL_XMIT_SIZE);
info->xmit.head = ((info->xmit.head + c) & if (c1 < c)
(SERIAL_XMIT_SIZE-1)); c = c1;
restore_flags(flags); memcpy(info->xmit.buf + info->xmit.head, tmp_buf, c);
info->xmit.head = ((info->xmit.head + c) &
(SERIAL_XMIT_SIZE-1));
}
spin_unlock_irqrestore(&serial_lock, flags);
buf += c; buf += c;
count -= c; count -= c;
ret += c; ret += c;
} }
up(&tmp_buf_sem); up(&tmp_buf_sem);
} else { } else {
cli(); spin_lock_irqsave(&serial_lock, flags);
while (1) { while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
if (count < c) if (count < c)
...@@ -360,7 +367,7 @@ static int rs_write(struct tty_struct * tty, int from_user, ...@@ -360,7 +367,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
count -= c; count -= c;
ret += c; ret += c;
} }
restore_flags(flags); spin_unlock_irqrestore(&serial_lock, flags);
} }
/* /*
* Hey, we transmit directly from here in our case * Hey, we transmit directly from here in our case
...@@ -391,9 +398,9 @@ static void rs_flush_buffer(struct tty_struct *tty) ...@@ -391,9 +398,9 @@ static void rs_flush_buffer(struct tty_struct *tty)
struct async_struct *info = (struct async_struct *)tty->driver_data; struct async_struct *info = (struct async_struct *)tty->driver_data;
unsigned long flags; unsigned long flags;
save_flags(flags); cli(); spin_lock_irqsave(&serial_lock, flags);
info->xmit.head = info->xmit.tail = 0; info->xmit.head = info->xmit.tail = 0;
restore_flags(flags); spin_unlock_irqrestore(&serial_lock, flags);
wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->write_wait);
...@@ -566,44 +573,45 @@ static void shutdown(struct async_struct * info) ...@@ -566,44 +573,45 @@ static void shutdown(struct async_struct * info)
state->irq); state->irq);
#endif #endif
save_flags(flags); cli(); /* Disable interrupts */ spin_lock_irqsave(&serial_lock, flags);
{
/*
* First unlink the serial port from the IRQ chain...
*/
if (info->next_port)
info->next_port->prev_port = info->prev_port;
if (info->prev_port)
info->prev_port->next_port = info->next_port;
else
IRQ_ports[state->irq] = info->next_port;
/* /*
* First unlink the serial port from the IRQ chain... * Free the IRQ, if necessary
*/ */
if (info->next_port) if (state->irq && (!IRQ_ports[state->irq] ||
info->next_port->prev_port = info->prev_port; !IRQ_ports[state->irq]->next_port)) {
if (info->prev_port) if (IRQ_ports[state->irq]) {
info->prev_port->next_port = info->next_port; free_irq(state->irq, NULL);
else retval = request_irq(state->irq, rs_interrupt_single,
IRQ_ports[state->irq] = info->next_port; IRQ_T(info), "serial", NULL);
if (retval)
printk("serial shutdown: request_irq: error %d"
" Couldn't reacquire IRQ.\n", retval);
} else
free_irq(state->irq, NULL);
}
/* if (info->xmit.buf) {
* Free the IRQ, if necessary free_page((unsigned long) info->xmit.buf);
*/ info->xmit.buf = 0;
if (state->irq && (!IRQ_ports[state->irq] || }
!IRQ_ports[state->irq]->next_port)) {
if (IRQ_ports[state->irq]) {
free_irq(state->irq, NULL);
retval = request_irq(state->irq, rs_interrupt_single,
IRQ_T(info), "serial", NULL);
if (retval) if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags);
printk("serial shutdown: request_irq: error %d"
" Couldn't reacquire IRQ.\n", retval);
} else
free_irq(state->irq, NULL);
}
if (info->xmit.buf) { info->flags &= ~ASYNC_INITIALIZED;
free_page((unsigned long) info->xmit.buf);
info->xmit.buf = 0;
} }
spin_unlock_irqrestore(&serial_lock, flags);
if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags);
info->flags &= ~ASYNC_INITIALIZED;
restore_flags(flags);
} }
/* /*
...@@ -626,14 +634,13 @@ static void rs_close(struct tty_struct *tty, struct file * filp) ...@@ -626,14 +634,13 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
state = info->state; state = info->state;
save_flags(flags); cli(); spin_lock_irqsave(&serial_lock, flags);
if (tty_hung_up_p(filp)) { if (tty_hung_up_p(filp)) {
#ifdef SIMSERIAL_DEBUG #ifdef SIMSERIAL_DEBUG
printk("rs_close: hung_up\n"); printk("rs_close: hung_up\n");
#endif #endif
MOD_DEC_USE_COUNT; MOD_DEC_USE_COUNT;
restore_flags(flags); spin_unlock_irqrestore(&serial_lock, flags);
return; return;
} }
#ifdef SIMSERIAL_DEBUG #ifdef SIMSERIAL_DEBUG
...@@ -658,11 +665,11 @@ static void rs_close(struct tty_struct *tty, struct file * filp) ...@@ -658,11 +665,11 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
} }
if (state->count) { if (state->count) {
MOD_DEC_USE_COUNT; MOD_DEC_USE_COUNT;
restore_flags(flags); spin_unlock_irqrestore(&serial_lock, flags);
return; return;
} }
info->flags |= ASYNC_CLOSING; info->flags |= ASYNC_CLOSING;
restore_flags(flags); spin_unlock_irqrestore(&serial_lock, flags);
/* /*
* Now we wait for the transmit buffer to clear; and we notify * Now we wait for the transmit buffer to clear; and we notify
...@@ -770,7 +777,7 @@ startup(struct async_struct *info) ...@@ -770,7 +777,7 @@ startup(struct async_struct *info)
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
save_flags(flags); cli(); spin_lock_irqsave(&serial_lock, flags);
if (info->flags & ASYNC_INITIALIZED) { if (info->flags & ASYNC_INITIALIZED) {
free_page(page); free_page(page);
...@@ -851,11 +858,11 @@ startup(struct async_struct *info) ...@@ -851,11 +858,11 @@ startup(struct async_struct *info)
} }
info->flags |= ASYNC_INITIALIZED; info->flags |= ASYNC_INITIALIZED;
restore_flags(flags); spin_unlock_irqrestore(&serial_lock, flags);
return 0; return 0;
errout: errout:
restore_flags(flags); spin_unlock_irqrestore(&serial_lock, flags);
return retval; return retval;
} }
......
#define MACHVEC_PLATFORM_NAME hpzx1 #define MACHVEC_PLATFORM_NAME hpzx1
#include <asm/machvec_init.h> #include <asm/machvec_init.h>
#define MACHVEC_PLATFORM_NAME hpzx1
#include <asm/machvec_init.h>
This diff is collapsed.
...@@ -67,7 +67,7 @@ ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int ...@@ -67,7 +67,7 @@ ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int
} }
static struct vm_operations_struct ia32_shared_page_vm_ops = { static struct vm_operations_struct ia32_shared_page_vm_ops = {
nopage: ia32_install_shared_page .nopage =ia32_install_shared_page
}; };
void void
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include <asm/ia32.h> #include <asm/ia32.h>
#include <../drivers/char/drm/drm.h> #include <../drivers/char/drm/drm.h>
#include <../drivers/char/drm/mga_drm.h>
#include <../drivers/char/drm/i810_drm.h>
#define IOCTL_NR(a) ((a) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT)) #define IOCTL_NR(a) ((a) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
......
This diff is collapsed.
...@@ -125,9 +125,79 @@ efi_gettimeofday (struct timeval *tv) ...@@ -125,9 +125,79 @@ efi_gettimeofday (struct timeval *tv)
tv->tv_usec = tm.nanosecond / 1000; tv->tv_usec = tm.nanosecond / 1000;
} }
static int
is_available_memory (efi_memory_desc_t *md)
{
if (!(md->attribute & EFI_MEMORY_WB))
return 0;
switch (md->type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
return 1;
}
return 0;
}
/*
* Trim descriptor MD so its starts at address START_ADDR. If the descriptor covers
* memory that is normally available to the kernel, issue a warning that some memory
* is being ignored.
*/
static void
trim_bottom (efi_memory_desc_t *md, u64 start_addr)
{
u64 num_skipped_pages;
if (md->phys_addr >= start_addr || !md->num_pages)
return;
num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT;
if (num_skipped_pages > md->num_pages)
num_skipped_pages = md->num_pages;
if (is_available_memory(md))
printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
"at 0x%lx\n", __FUNCTION__,
(num_skipped_pages << EFI_PAGE_SHIFT) >> 10,
md->phys_addr, start_addr - IA64_GRANULE_SIZE);
/*
* NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory
* descriptor list to become unsorted. In such a case, md->num_pages will be
* zero, so the Right Thing will happen.
*/
md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT;
md->num_pages -= num_skipped_pages;
}
static void
trim_top (efi_memory_desc_t *md, u64 end_addr)
{
u64 num_dropped_pages, md_end_addr;
md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
if (md_end_addr <= end_addr || !md->num_pages)
return;
num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT;
if (num_dropped_pages > md->num_pages)
num_dropped_pages = md->num_pages;
if (is_available_memory(md))
printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
"at 0x%lx\n", __FUNCTION__,
(num_dropped_pages << EFI_PAGE_SHIFT) >> 10,
md->phys_addr, end_addr);
md->num_pages -= num_dropped_pages;
}
/* /*
* Walks the EFI memory map and calls CALLBACK once for each EFI * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
* memory descriptor that has memory that is available for OS use. * has memory that is available for OS use.
*/ */
void void
efi_memmap_walk (efi_freemem_callback_t callback, void *arg) efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
...@@ -137,9 +207,9 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -137,9 +207,9 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
u64 start; u64 start;
u64 end; u64 end;
} prev, curr; } prev, curr;
void *efi_map_start, *efi_map_end, *p; void *efi_map_start, *efi_map_end, *p, *q;
efi_memory_desc_t *md; efi_memory_desc_t *md, *check_md;
u64 efi_desc_size, start, end; u64 efi_desc_size, start, end, granule_addr, first_non_wb_addr = 0;
efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
...@@ -147,24 +217,56 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -147,24 +217,56 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p; md = p;
switch (md->type) {
case EFI_LOADER_CODE: /* skip over non-WB memory descriptors; that's all we're interested in... */
case EFI_LOADER_DATA: if (!(md->attribute & EFI_MEMORY_WB))
case EFI_BOOT_SERVICES_CODE: continue;
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY: if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > first_non_wb_addr) {
if (!(md->attribute & EFI_MEMORY_WB)) /*
continue; * Search for the next run of contiguous WB memory. Start search
* at first granule boundary covered by md.
*/
granule_addr = ((md->phys_addr + IA64_GRANULE_SIZE - 1)
& -IA64_GRANULE_SIZE);
first_non_wb_addr = granule_addr;
for (q = p; q < efi_map_end; q += efi_desc_size) {
check_md = q;
if (check_md->attribute & EFI_MEMORY_WB)
trim_bottom(md, granule_addr);
if (check_md->phys_addr < granule_addr)
continue;
if (!(check_md->attribute & EFI_MEMORY_WB))
break; /* hit a non-WB region; stop search */
if (check_md->phys_addr != first_non_wb_addr)
break; /* hit a memory hole; stop search */
first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT;
}
/* round it down to the previous granule-boundary: */
first_non_wb_addr &= -IA64_GRANULE_SIZE;
if (!(first_non_wb_addr > granule_addr))
continue; /* couldn't find enough contiguous memory */
}
/* BUG_ON((md->phys_addr >> IA64_GRANULE_SHIFT) < first_non_wb_addr); */
trim_top(md, first_non_wb_addr);
if (is_available_memory(md)) {
if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) { if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) {
if (md->phys_addr > mem_limit) if (md->phys_addr > mem_limit)
continue; continue;
md->num_pages = (mem_limit - md->phys_addr) >> EFI_PAGE_SHIFT; md->num_pages = (mem_limit - md->phys_addr) >> EFI_PAGE_SHIFT;
} }
if (md->num_pages == 0) {
printk("efi_memmap_walk: ignoring empty region at 0x%lx", if (md->num_pages == 0)
md->phys_addr);
continue; continue;
}
curr.start = PAGE_OFFSET + md->phys_addr; curr.start = PAGE_OFFSET + md->phys_addr;
curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT); curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT);
...@@ -187,10 +289,6 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -187,10 +289,6 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
prev = curr; prev = curr;
} }
} }
break;
default:
continue;
} }
} }
if (prev_valid) { if (prev_valid) {
...@@ -268,8 +366,9 @@ efi_map_pal_code (void) ...@@ -268,8 +366,9 @@ efi_map_pal_code (void)
*/ */
psr = ia64_clear_ic(); psr = ia64_clear_ic();
ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask, ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)), IA64_GRANULE_SHIFT); pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)),
ia64_set_psr(psr); IA64_GRANULE_SHIFT);
ia64_set_psr(psr); /* restore psr */
ia64_srlz_i(); ia64_srlz_i();
} }
} }
...@@ -347,6 +446,9 @@ efi_init (void) ...@@ -347,6 +446,9 @@ efi_init (void)
} else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
efi.sal_systab = __va(config_tables[i].table); efi.sal_systab = __va(config_tables[i].table);
printk(" SALsystab=0x%lx", config_tables[i].table); printk(" SALsystab=0x%lx", config_tables[i].table);
} else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
efi.hcdp = __va(config_tables[i].table);
printk(" HCDP=0x%lx", config_tables[i].table);
} }
} }
printk("\n"); printk("\n");
...@@ -376,7 +478,7 @@ efi_init (void) ...@@ -376,7 +478,7 @@ efi_init (void)
md = p; md = p;
printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n", printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
i, md->type, md->attribute, md->phys_addr, i, md->type, md->attribute, md->phys_addr,
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1, md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
md->num_pages >> (20 - EFI_PAGE_SHIFT)); md->num_pages >> (20 - EFI_PAGE_SHIFT));
} }
} }
......
...@@ -175,6 +175,7 @@ GLOBAL_ENTRY(ia64_switch_to) ...@@ -175,6 +175,7 @@ GLOBAL_ENTRY(ia64_switch_to)
(p6) srlz.d (p6) srlz.d
ld8 sp=[r21] // load kernel stack pointer of new task ld8 sp=[r21] // load kernel stack pointer of new task
mov IA64_KR(CURRENT)=r20 // update "current" application register mov IA64_KR(CURRENT)=r20 // update "current" application register
mov r8=r13 // return pointer to previously running task
mov r13=in0 // set "current" pointer mov r13=in0 // set "current" pointer
;; ;;
DO_LOAD_SWITCH_STACK DO_LOAD_SWITCH_STACK
......
...@@ -88,12 +88,6 @@ EXPORT_SYMBOL(ia64_cpu_to_sapicid); ...@@ -88,12 +88,6 @@ EXPORT_SYMBOL(ia64_cpu_to_sapicid);
#include <asm/smplock.h> #include <asm/smplock.h>
EXPORT_SYMBOL(kernel_flag); EXPORT_SYMBOL(kernel_flag);
/* #include <asm/system.h> */
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
EXPORT_SYMBOL(__flush_tlb_all); EXPORT_SYMBOL(__flush_tlb_all);
......
...@@ -34,8 +34,8 @@ union init_thread { ...@@ -34,8 +34,8 @@ union init_thread {
} s; } s;
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)]; unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
} init_thread_union __attribute__((section(".data.init_task"))) = {{ } init_thread_union __attribute__((section(".data.init_task"))) = {{
task: INIT_TASK(init_thread_union.s.task), .task = INIT_TASK(init_thread_union.s.task),
thread_info: INIT_THREAD_INFO(init_thread_union.s.thread_info) .thread_info = INIT_THREAD_INFO(init_thread_union.s.thread_info)
}}; }};
asm (".global init_task; init_task = init_thread_union"); asm (".global init_task; init_task = init_thread_union");
...@@ -88,7 +88,7 @@ static struct { ...@@ -88,7 +88,7 @@ static struct {
static struct iosapic_irq { static struct iosapic_irq {
char *addr; /* base address of IOSAPIC */ char *addr; /* base address of IOSAPIC */
unsigned char base_irq; /* first irq assigned to this IOSAPIC */ unsigned int base_irq; /* first irq assigned to this IOSAPIC */
char pin; /* IOSAPIC pin (-1 => not an IOSAPIC irq) */ char pin; /* IOSAPIC pin (-1 => not an IOSAPIC irq) */
unsigned char dmode : 3; /* delivery mode (see iosapic.h) */ unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
unsigned char polarity : 1; /* interrupt polarity (see iosapic.h) */ unsigned char polarity : 1; /* interrupt polarity (see iosapic.h) */
...@@ -97,9 +97,9 @@ static struct iosapic_irq { ...@@ -97,9 +97,9 @@ static struct iosapic_irq {
static struct iosapic { static struct iosapic {
char *addr; /* base address of IOSAPIC */ char *addr; /* base address of IOSAPIC */
unsigned char pcat_compat; /* 8259 compatibility flag */ unsigned int base_irq; /* first irq assigned to this IOSAPIC */
unsigned char base_irq; /* first irq assigned to this IOSAPIC */
unsigned short max_pin; /* max input pin supported in this IOSAPIC */ unsigned short max_pin; /* max input pin supported in this IOSAPIC */
unsigned char pcat_compat; /* 8259 compatibility flag */
} iosapic_lists[256] __initdata; } iosapic_lists[256] __initdata;
static int num_iosapic = 0; static int num_iosapic = 0;
...@@ -160,6 +160,10 @@ set_rte (unsigned int vector, unsigned long dest) ...@@ -160,6 +160,10 @@ set_rte (unsigned int vector, unsigned long dest)
int pin; int pin;
char redir; char redir;
#ifdef DEBUG_IRQ_ROUTING
printk(KERN_DEBUG "set_rte: routing vector 0x%02x to 0x%lx\n", vector, dest);
#endif
pin = iosapic_irq[vector].pin; pin = iosapic_irq[vector].pin;
if (pin < 0) if (pin < 0)
return; /* not an IOSAPIC interrupt */ return; /* not an IOSAPIC interrupt */
...@@ -322,14 +326,14 @@ iosapic_end_level_irq (unsigned int irq) ...@@ -322,14 +326,14 @@ iosapic_end_level_irq (unsigned int irq)
#define iosapic_ack_level_irq nop #define iosapic_ack_level_irq nop
struct hw_interrupt_type irq_type_iosapic_level = { struct hw_interrupt_type irq_type_iosapic_level = {
typename: "IO-SAPIC-level", .typename = "IO-SAPIC-level",
startup: iosapic_startup_level_irq, .startup = iosapic_startup_level_irq,
shutdown: iosapic_shutdown_level_irq, .shutdown = iosapic_shutdown_level_irq,
enable: iosapic_enable_level_irq, .enable = iosapic_enable_level_irq,
disable: iosapic_disable_level_irq, .disable = iosapic_disable_level_irq,
ack: iosapic_ack_level_irq, .ack = iosapic_ack_level_irq,
end: iosapic_end_level_irq, .end = iosapic_end_level_irq,
set_affinity: iosapic_set_affinity .set_affinity = iosapic_set_affinity
}; };
/* /*
...@@ -366,14 +370,14 @@ iosapic_ack_edge_irq (unsigned int irq) ...@@ -366,14 +370,14 @@ iosapic_ack_edge_irq (unsigned int irq)
#define iosapic_end_edge_irq nop #define iosapic_end_edge_irq nop
struct hw_interrupt_type irq_type_iosapic_edge = { struct hw_interrupt_type irq_type_iosapic_edge = {
typename: "IO-SAPIC-edge", .typename = "IO-SAPIC-edge",
startup: iosapic_startup_edge_irq, .startup = iosapic_startup_edge_irq,
shutdown: iosapic_disable_edge_irq, .shutdown = iosapic_disable_edge_irq,
enable: iosapic_enable_edge_irq, .enable = iosapic_enable_edge_irq,
disable: iosapic_disable_edge_irq, .disable = iosapic_disable_edge_irq,
ack: iosapic_ack_edge_irq, .ack = iosapic_ack_edge_irq,
end: iosapic_end_edge_irq, .end = iosapic_end_edge_irq,
set_affinity: iosapic_set_affinity .set_affinity = iosapic_set_affinity
}; };
unsigned int unsigned int
...@@ -406,7 +410,7 @@ iosapic_reassign_vector (int vector) ...@@ -406,7 +410,7 @@ iosapic_reassign_vector (int vector)
|| iosapic_irq[vector].polarity || iosapic_irq[vector].trigger) || iosapic_irq[vector].polarity || iosapic_irq[vector].trigger)
{ {
new_vector = ia64_alloc_irq(); new_vector = ia64_alloc_irq();
printk("Reassigning Vector 0x%x to 0x%x\n", vector, new_vector); printk("Reassigning vector 0x%x to 0x%x\n", vector, new_vector);
memcpy (&iosapic_irq[new_vector], &iosapic_irq[vector], memcpy (&iosapic_irq[new_vector], &iosapic_irq[vector],
sizeof(struct iosapic_irq)); sizeof(struct iosapic_irq));
memset (&iosapic_irq[vector], 0, sizeof(struct iosapic_irq)); memset (&iosapic_irq[vector], 0, sizeof(struct iosapic_irq));
...@@ -422,6 +426,7 @@ register_irq (u32 global_vector, int vector, int pin, unsigned char delivery, ...@@ -422,6 +426,7 @@ register_irq (u32 global_vector, int vector, int pin, unsigned char delivery,
irq_desc_t *idesc; irq_desc_t *idesc;
struct hw_interrupt_type *irq_type; struct hw_interrupt_type *irq_type;
gsi_to_vector(global_vector) = vector;
iosapic_irq[vector].pin = pin; iosapic_irq[vector].pin = pin;
iosapic_irq[vector].polarity = polarity ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW; iosapic_irq[vector].polarity = polarity ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW;
iosapic_irq[vector].dmode = delivery; iosapic_irq[vector].dmode = delivery;
...@@ -640,7 +645,7 @@ iosapic_init_pci_irq (void) ...@@ -640,7 +645,7 @@ iosapic_init_pci_irq (void)
unsigned int irq; unsigned int irq;
char *addr; char *addr;
if (0 != acpi_get_prt(&pci_irq.route, &pci_irq.num_routes)) if (acpi_get_prt(&pci_irq.route, &pci_irq.num_routes))
return; return;
for (i = 0; i < pci_irq.num_routes; i++) { for (i = 0; i < pci_irq.num_routes; i++) {
...@@ -679,11 +684,10 @@ iosapic_init_pci_irq (void) ...@@ -679,11 +684,10 @@ iosapic_init_pci_irq (void)
pci_irq.route[i].bus, pci_irq.route[i].pci_id>>16, pci_irq.route[i].pin, pci_irq.route[i].bus, pci_irq.route[i].pci_id>>16, pci_irq.route[i].pin,
iosapic_irq[vector].base_irq + iosapic_irq[vector].pin, vector); iosapic_irq[vector].base_irq + iosapic_irq[vector].pin, vector);
#endif #endif
/* /*
* Forget not to program the IOSAPIC RTE per ACPI _PRT * NOTE: The IOSAPIC RTE will be programmed in iosapic_pci_fixup(). It
* needs to be done there to ensure PCI hotplug works right.
*/ */
set_rte(vector, (ia64_get_lid() >> 16) & 0xffff);
} }
} }
...@@ -757,10 +761,11 @@ iosapic_pci_fixup (int phase) ...@@ -757,10 +761,11 @@ iosapic_pci_fixup (int phase)
if (!(smp_int_redirect & SMP_IRQ_REDIRECTION)) { if (!(smp_int_redirect & SMP_IRQ_REDIRECTION)) {
static int cpu_index = 0; static int cpu_index = 0;
set_rte(vector, cpu_physical_id(cpu_index) & 0xffff); while (!cpu_online(cpu_index))
if (++cpu_index >= NR_CPUS)
cpu_index = 0;
for (cpu_index++; !cpu_online(cpu_index % NR_CPUS); cpu_index++); set_rte(vector, cpu_physical_id(cpu_index) & 0xffff);
cpu_index %= NR_CPUS;
} else { } else {
/* /*
* Direct the interrupt vector to the current cpu, * Direct the interrupt vector to the current cpu,
......
This diff is collapsed.
...@@ -36,6 +36,10 @@ ...@@ -36,6 +36,10 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
#ifdef CONFIG_PERFMON
# include <asm/perfmon.h>
#endif
#define IRQ_DEBUG 0 #define IRQ_DEBUG 0
/* default base addr of IPI table */ /* default base addr of IPI table */
...@@ -50,6 +54,11 @@ __u8 isa_irq_to_vector_map[16] = { ...@@ -50,6 +54,11 @@ __u8 isa_irq_to_vector_map[16] = {
0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
}; };
/*
* GSI to IA-64 vector translation table.
*/
__u8 gsi_to_vector_map[255];
int int
ia64_alloc_irq (void) ia64_alloc_irq (void)
{ {
...@@ -144,9 +153,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -144,9 +153,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
extern void handle_IPI (int irq, void *dev_id, struct pt_regs *regs); extern void handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
static struct irqaction ipi_irqaction = { static struct irqaction ipi_irqaction = {
handler: handle_IPI, .handler = handle_IPI,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "IPI" .name = "IPI"
}; };
#endif #endif
...@@ -172,6 +181,9 @@ init_IRQ (void) ...@@ -172,6 +181,9 @@ init_IRQ (void)
register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
#endif
#ifdef CONFIG_PERFMON
perfmon_init_percpu();
#endif #endif
platform_irq_init(); platform_irq_init();
} }
......
...@@ -27,12 +27,12 @@ lsapic_noop (unsigned int irq) ...@@ -27,12 +27,12 @@ lsapic_noop (unsigned int irq)
} }
struct hw_interrupt_type irq_type_ia64_lsapic = { struct hw_interrupt_type irq_type_ia64_lsapic = {
typename: "LSAPIC", .typename = "LSAPIC",
startup: lsapic_noop_startup, .startup = lsapic_noop_startup,
shutdown: lsapic_noop, .shutdown = lsapic_noop,
enable: lsapic_noop, .enable = lsapic_noop,
disable: lsapic_noop, .disable = lsapic_noop,
ack: lsapic_noop, .ack = lsapic_noop,
end: lsapic_noop, .end = lsapic_noop,
set_affinity: (void (*)(unsigned int, unsigned long)) lsapic_noop .set_affinity = (void (*)(unsigned int, unsigned long)) lsapic_noop
}; };
...@@ -11,13 +11,16 @@ ...@@ -11,13 +11,16 @@
struct ia64_machine_vector ia64_mv; struct ia64_machine_vector ia64_mv;
/* /*
* Most platforms use this routine for mapping page frame addresses * Most platforms use this routine for mapping page frame addresses into a memory map
* into a memory map index. * index.
*
* Note: we can't use __pa() because map_nr_dense(X) MUST map to something >= max_mapnr if
* X is outside the identity mapped kernel space.
*/ */
unsigned long unsigned long
map_nr_dense (unsigned long addr) map_nr_dense (unsigned long addr)
{ {
return MAP_NR_DENSE(addr); return (addr - PAGE_OFFSET) >> PAGE_SHIFT;
} }
static struct ia64_machine_vector * static struct ia64_machine_vector *
......
...@@ -82,27 +82,27 @@ extern void ia64_slave_init_handler (void); ...@@ -82,27 +82,27 @@ extern void ia64_slave_init_handler (void);
extern struct hw_interrupt_type irq_type_iosapic_level; extern struct hw_interrupt_type irq_type_iosapic_level;
static struct irqaction cmci_irqaction = { static struct irqaction cmci_irqaction = {
handler: ia64_mca_cmc_int_handler, .handler = ia64_mca_cmc_int_handler,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "cmc_hndlr" .name = "cmc_hndlr"
}; };
static struct irqaction mca_rdzv_irqaction = { static struct irqaction mca_rdzv_irqaction = {
handler: ia64_mca_rendez_int_handler, .handler = ia64_mca_rendez_int_handler,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "mca_rdzv" .name = "mca_rdzv"
}; };
static struct irqaction mca_wkup_irqaction = { static struct irqaction mca_wkup_irqaction = {
handler: ia64_mca_wakeup_int_handler, .handler = ia64_mca_wakeup_int_handler,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "mca_wkup" .name = "mca_wkup"
}; };
static struct irqaction mca_cpe_irqaction = { static struct irqaction mca_cpe_irqaction = {
handler: ia64_mca_cpe_int_handler, .handler = ia64_mca_cpe_int_handler,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "cpe_hndlr" .name = "cpe_hndlr"
}; };
/* /*
...@@ -626,9 +626,12 @@ ia64_mca_wakeup_all(void) ...@@ -626,9 +626,12 @@ ia64_mca_wakeup_all(void)
void void
ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
{ {
int flags, cpu = 0; unsigned long flags;
int cpu = 0;
/* Mask all interrupts */ /* Mask all interrupts */
save_and_cli(flags); #warning XXX fix me: this used to be: save_and_cli(flags);
local_irq_save(flags);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpu = cpu_logical_id(hard_smp_processor_id()); cpu = cpu_logical_id(hard_smp_processor_id());
...@@ -646,7 +649,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) ...@@ -646,7 +649,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
ia64_mca_wakeup_ipi_wait(); ia64_mca_wakeup_ipi_wait();
/* Enable all interrupts */ /* Enable all interrupts */
restore_flags(flags); local_irq_restore(flags);
} }
......
...@@ -684,9 +684,9 @@ ia64_os_mca_tlb_error_check: ...@@ -684,9 +684,9 @@ ia64_os_mca_tlb_error_check:
movl r3=SAL_GET_STATE_INFO;; movl r3=SAL_GET_STATE_INFO;;
DATA_VA_TO_PA(r7);; // convert to physical address DATA_VA_TO_PA(r7);; // convert to physical address
ld8 r8=[r7],8;; // get pdesc function pointer ld8 r8=[r7],8;; // get pdesc function pointer
DATA_VA_TO_PA(r8) // convert to physical address dep r8=0,r8,61,3;; // convert SAL VA to PA
ld8 r1=[r7];; // set new (ia64_sal) gp ld8 r1=[r7];; // set new (ia64_sal) gp
DATA_VA_TO_PA(r1) // convert to physical address dep r1=0,r1,61,3;; // convert SAL VA to PA
mov b6=r8 mov b6=r8
alloc r5=ar.pfs,8,0,8,0;; // allocate stack frame for SAL call alloc r5=ar.pfs,8,0,8,0;; // allocate stack frame for SAL call
......
...@@ -165,7 +165,7 @@ struct pci_ops pci_sal_ops = { ...@@ -165,7 +165,7 @@ struct pci_ops pci_sal_ops = {
*/ */
struct pci_bus * struct pci_bus *
pcibios_scan_root(int seg, int bus) pcibios_scan_root(int bus)
{ {
struct list_head *list = NULL; struct list_head *list = NULL;
struct pci_bus *pci_bus = NULL; struct pci_bus *pci_bus = NULL;
...@@ -174,12 +174,12 @@ pcibios_scan_root(int seg, int bus) ...@@ -174,12 +174,12 @@ pcibios_scan_root(int seg, int bus)
pci_bus = pci_bus_b(list); pci_bus = pci_bus_b(list);
if (pci_bus->number == bus) { if (pci_bus->number == bus) {
/* Already scanned */ /* Already scanned */
printk("PCI: Bus (%02x:%02x) already probed\n", seg, bus); printk("PCI: Bus (%02x) already probed\n", bus);
return pci_bus; return pci_bus;
} }
} }
printk("PCI: Probing PCI hardware on bus (%02x:%02x)\n", seg, bus); printk("PCI: Probing PCI hardware on bus (%02x)\n", bus);
return pci_scan_bus(bus, pci_root_ops, NULL); return pci_scan_bus(bus, pci_root_ops, NULL);
} }
...@@ -265,12 +265,37 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r ...@@ -265,12 +265,37 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r
int int
pcibios_enable_device (struct pci_dev *dev) pcibios_enable_device (struct pci_dev *dev)
{ {
u16 cmd, old_cmd;
int idx;
struct resource *r;
if (!dev) if (!dev)
return -EINVAL; return -EINVAL;
/* Not needed, since we enable all devices at startup. */ pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for (idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
printk(KERN_ERR
"PCI: Device %s not available because of resource collisions\n",
dev->slot_name);
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (dev->resource[PCI_ROM_RESOURCE].start)
cmd |= PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n", dev->slot_name, old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq, dev->slot_name); printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq, dev->slot_name);
return 0; return 0;
} }
......
...@@ -106,6 +106,12 @@ ...@@ -106,6 +106,12 @@
#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0) #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) (i==0)
#endif
/* /*
* debugging * debugging
*/ */
...@@ -277,8 +283,8 @@ typedef struct { ...@@ -277,8 +283,8 @@ typedef struct {
typedef struct { typedef struct {
pfm_pmu_reg_type_t type; pfm_pmu_reg_type_t type;
int pm_pos; int pm_pos;
int (*read_check)(struct task_struct *task, unsigned int cnum, unsigned long *val); int (*read_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
int (*write_check)(struct task_struct *task, unsigned int cnum, unsigned long *val); int (*write_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
unsigned long dep_pmd[4]; unsigned long dep_pmd[4];
unsigned long dep_pmc[4]; unsigned long dep_pmc[4];
} pfm_reg_desc_t; } pfm_reg_desc_t;
...@@ -396,7 +402,7 @@ static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values ...@@ -396,7 +402,7 @@ static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values
static void pfm_vm_close(struct vm_area_struct * area); static void pfm_vm_close(struct vm_area_struct * area);
static struct vm_operations_struct pfm_vm_ops={ static struct vm_operations_struct pfm_vm_ops={
close: pfm_vm_close .close = pfm_vm_close
}; };
/* /*
...@@ -902,8 +908,8 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx) ...@@ -902,8 +908,8 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
/* /*
* and it must be a valid CPU * and it must be a valid CPU
*/ */
cpu = ffs(pfx->ctx_cpu_mask); cpu = ffz(~pfx->ctx_cpu_mask);
if (!cpu_online(cpu)) { if (cpu_is_online(cpu) == 0) {
DBprintk(("CPU%d is not online\n", cpu)); DBprintk(("CPU%d is not online\n", cpu));
return -EINVAL; return -EINVAL;
} }
...@@ -925,11 +931,12 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx) ...@@ -925,11 +931,12 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
DBprintk(("must have notify_pid when blocking for [%d]\n", task->pid)); DBprintk(("must have notify_pid when blocking for [%d]\n", task->pid));
return -EINVAL; return -EINVAL;
} }
#if 0
if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) { if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) {
DBprintk(("cannot notify self when blocking for [%d]\n", task->pid)); DBprintk(("cannot notify self when blocking for [%d]\n", task->pid));
return -EINVAL; return -EINVAL;
} }
#endif
} }
/* probably more to add here */ /* probably more to add here */
...@@ -968,7 +975,7 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int ...@@ -968,7 +975,7 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
if (ctx_flags & PFM_FL_SYSTEM_WIDE) { if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
/* at this point, we know there is at least one bit set */ /* at this point, we know there is at least one bit set */
cpu = ffs(tmp.ctx_cpu_mask) - 1; cpu = ffz(~tmp.ctx_cpu_mask);
DBprintk(("requesting CPU%d currently on CPU%d\n",cpu, smp_processor_id())); DBprintk(("requesting CPU%d currently on CPU%d\n",cpu, smp_processor_id()));
...@@ -1280,7 +1287,7 @@ pfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun ...@@ -1280,7 +1287,7 @@ pfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/* /*
* execute write checker, if any * execute write checker, if any
*/ */
if (PMC_WR_FUNC(cnum)) ret = PMC_WR_FUNC(cnum)(task, cnum, &tmp.reg_value); if (PMC_WR_FUNC(cnum)) ret = PMC_WR_FUNC(cnum)(task, cnum, &tmp.reg_value, regs);
abort_mission: abort_mission:
if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL; if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL;
...@@ -1371,7 +1378,7 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun ...@@ -1371,7 +1378,7 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/* /*
* execute write checker, if any * execute write checker, if any
*/ */
if (PMD_WR_FUNC(cnum)) ret = PMD_WR_FUNC(cnum)(task, cnum, &tmp.reg_value); if (PMD_WR_FUNC(cnum)) ret = PMD_WR_FUNC(cnum)(task, cnum, &tmp.reg_value, regs);
abort_mission: abort_mission:
if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL; if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL;
...@@ -1394,6 +1401,8 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun ...@@ -1394,6 +1401,8 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/* keep track of what we use */ /* keep track of what we use */
CTX_USED_PMD(ctx, pmu_conf.pmd_desc[(cnum)].dep_pmd[0]); CTX_USED_PMD(ctx, pmu_conf.pmd_desc[(cnum)].dep_pmd[0]);
/* mark this register as used as well */
CTX_USED_PMD(ctx, RDEP(cnum));
/* writes to unimplemented part is ignored, so this is safe */ /* writes to unimplemented part is ignored, so this is safe */
ia64_set_pmd(cnum, tmp.reg_value & pmu_conf.perf_ovfl_val); ia64_set_pmd(cnum, tmp.reg_value & pmu_conf.perf_ovfl_val);
...@@ -1438,7 +1447,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count ...@@ -1438,7 +1447,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
DBprintk(("ctx_last_cpu=%d for [%d]\n", atomic_read(&ctx->ctx_last_cpu), task->pid)); DBprintk(("ctx_last_cpu=%d for [%d]\n", atomic_read(&ctx->ctx_last_cpu), task->pid));
for (i = 0; i < count; i++, req++) { for (i = 0; i < count; i++, req++) {
unsigned long reg_val = ~0UL, ctx_val = ~0UL; unsigned long ctx_val = ~0UL;
if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT; if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;
...@@ -1462,7 +1471,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count ...@@ -1462,7 +1471,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
*/ */
if (atomic_read(&ctx->ctx_last_cpu) == smp_processor_id()){ if (atomic_read(&ctx->ctx_last_cpu) == smp_processor_id()){
ia64_srlz_d(); ia64_srlz_d();
val = reg_val = ia64_get_pmd(cnum); val = ia64_get_pmd(cnum);
DBprintk(("reading pmd[%u]=0x%lx from hw\n", cnum, val)); DBprintk(("reading pmd[%u]=0x%lx from hw\n", cnum, val));
} else { } else {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1484,7 +1493,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count ...@@ -1484,7 +1493,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
} }
#endif #endif
/* context has been saved */ /* context has been saved */
val = reg_val = th->pmd[cnum]; val = th->pmd[cnum];
} }
if (PMD_IS_COUNTING(cnum)) { if (PMD_IS_COUNTING(cnum)) {
/* /*
...@@ -1493,9 +1502,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count ...@@ -1493,9 +1502,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
val &= pmu_conf.perf_ovfl_val; val &= pmu_conf.perf_ovfl_val;
val += ctx_val = ctx->ctx_soft_pmds[cnum].val; val += ctx_val = ctx->ctx_soft_pmds[cnum].val;
} else { }
val = reg_val = ia64_get_pmd(cnum);
}
tmp.reg_value = val; tmp.reg_value = val;
...@@ -1503,14 +1510,13 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count ...@@ -1503,14 +1510,13 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
* execute read checker, if any * execute read checker, if any
*/ */
if (PMD_RD_FUNC(cnum)) { if (PMD_RD_FUNC(cnum)) {
ret = PMD_RD_FUNC(cnum)(task, cnum, &tmp.reg_value); ret = PMD_RD_FUNC(cnum)(task, cnum, &tmp.reg_value, regs);
} }
PFM_REG_RETFLAG_SET(tmp.reg_flags, ret); PFM_REG_RETFLAG_SET(tmp.reg_flags, ret);
DBprintk(("read pmd[%u] ret=%d soft_pmd=0x%lx reg=0x%lx pmc=0x%lx\n", DBprintk(("read pmd[%u] ret=%d value=0x%lx pmc=0x%lx\n",
cnum, ret, ctx_val, reg_val, cnum, ret, val, ia64_get_pmc(cnum)));
ia64_get_pmc(cnum)));
if (copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT; if (copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT;
} }
...@@ -1553,15 +1559,11 @@ pfm_use_debug_registers(struct task_struct *task) ...@@ -1553,15 +1559,11 @@ pfm_use_debug_registers(struct task_struct *task)
*/ */
if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1; if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
/*
* XXX: not pretty
*/
LOCK_PFS(); LOCK_PFS();
/* /*
* We only allow the use of debug registers when there is no system * We cannot allow setting breakpoints when system wide monitoring
* wide monitoring * sessions are using the debug registers.
* XXX: we could relax this by
*/ */
if (pfm_sessions.pfs_sys_use_dbregs> 0) if (pfm_sessions.pfs_sys_use_dbregs> 0)
ret = -1; ret = -1;
...@@ -1921,7 +1923,6 @@ typedef union { ...@@ -1921,7 +1923,6 @@ typedef union {
dbr_mask_reg_t dbr; dbr_mask_reg_t dbr;
} dbreg_t; } dbreg_t;
static int static int
pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs) pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs)
{ {
...@@ -1963,8 +1964,8 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru ...@@ -1963,8 +1964,8 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
if (ctx->ctx_fl_system) { if (ctx->ctx_fl_system) {
/* we mark ourselves as owner of the debug registers */ /* we mark ourselves as owner of the debug registers */
ctx->ctx_fl_using_dbreg = 1; ctx->ctx_fl_using_dbreg = 1;
} else { DBprintk(("system-wide setting fl_using_dbreg for [%d]\n", task->pid));
if (ctx->ctx_fl_using_dbreg == 0) { } else if (first_time) {
ret= -EBUSY; ret= -EBUSY;
if ((thread->flags & IA64_THREAD_DBG_VALID) != 0) { if ((thread->flags & IA64_THREAD_DBG_VALID) != 0) {
DBprintk(("debug registers already in use for [%d]\n", task->pid)); DBprintk(("debug registers already in use for [%d]\n", task->pid));
...@@ -1973,6 +1974,7 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru ...@@ -1973,6 +1974,7 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
/* we mark ourselves as owner of the debug registers */ /* we mark ourselves as owner of the debug registers */
ctx->ctx_fl_using_dbreg = 1; ctx->ctx_fl_using_dbreg = 1;
DBprintk(("setting fl_using_dbreg for [%d]\n", task->pid));
/* /*
* Given debug registers cannot be used for both debugging * Given debug registers cannot be used for both debugging
* and performance monitoring at the same time, we reuse * and performance monitoring at the same time, we reuse
...@@ -1980,20 +1982,27 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru ...@@ -1980,20 +1982,27 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
*/ */
memset(task->thread.dbr, 0, sizeof(task->thread.dbr)); memset(task->thread.dbr, 0, sizeof(task->thread.dbr));
memset(task->thread.ibr, 0, sizeof(task->thread.ibr)); memset(task->thread.ibr, 0, sizeof(task->thread.ibr));
}
/* if (first_time) {
* clear hardware registers to make sure we don't DBprintk(("[%d] clearing ibrs,dbrs\n", task->pid));
* pick up stale state /*
*/ * clear hardware registers to make sure we don't
for (i=0; i < pmu_conf.num_ibrs; i++) { * pick up stale state.
ia64_set_ibr(i, 0UL); *
} * for a system wide session, we do not use
ia64_srlz_i(); * thread.dbr, thread.ibr because this process
for (i=0; i < pmu_conf.num_dbrs; i++) { * never leaves the current CPU and the state
ia64_set_dbr(i, 0UL); * is shared by all processes running on it
} */
ia64_srlz_d(); for (i=0; i < pmu_conf.num_ibrs; i++) {
ia64_set_ibr(i, 0UL);
} }
ia64_srlz_i();
for (i=0; i < pmu_conf.num_dbrs; i++) {
ia64_set_dbr(i, 0UL);
}
ia64_srlz_d();
} }
ret = -EFAULT; ret = -EFAULT;
...@@ -2361,9 +2370,9 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6, ...@@ -2361,9 +2370,9 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
{ {
struct pt_regs *regs = (struct pt_regs *)&stack; struct pt_regs *regs = (struct pt_regs *)&stack;
struct task_struct *task = current; struct task_struct *task = current;
pfm_context_t *ctx = task->thread.pfm_context; pfm_context_t *ctx;
size_t sz; size_t sz;
int ret = -ESRCH, narg; int ret, narg;
/* /*
* reject any call if perfmon was disabled at initialization time * reject any call if perfmon was disabled at initialization time
...@@ -2393,6 +2402,8 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6, ...@@ -2393,6 +2402,8 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
if (pid != current->pid) { if (pid != current->pid) {
ret = -ESRCH;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
task = find_task_by_pid(pid); task = find_task_by_pid(pid);
...@@ -2407,10 +2418,11 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6, ...@@ -2407,10 +2418,11 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
ret = check_task_state(task); ret = check_task_state(task);
if (ret != 0) goto abort_call; if (ret != 0) goto abort_call;
} }
ctx = task->thread.pfm_context;
} }
} }
ctx = task->thread.pfm_context;
if (PFM_CMD_USE_CTX(cmd)) { if (PFM_CMD_USE_CTX(cmd)) {
ret = -EINVAL; ret = -EINVAL;
if (ctx == NULL) { if (ctx == NULL) {
...@@ -2953,11 +2965,6 @@ perfmon_interrupt (int irq, void *arg, struct pt_regs *regs) ...@@ -2953,11 +2965,6 @@ perfmon_interrupt (int irq, void *arg, struct pt_regs *regs)
static int static int
perfmon_proc_info(char *page) perfmon_proc_info(char *page)
{ {
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) 1
#endif
char *p = page; char *p = page;
int i; int i;
...@@ -4118,9 +4125,9 @@ pfm_cleanup_notifiers(struct task_struct *task) ...@@ -4118,9 +4125,9 @@ pfm_cleanup_notifiers(struct task_struct *task)
} }
static struct irqaction perfmon_irqaction = { static struct irqaction perfmon_irqaction = {
handler: perfmon_interrupt, .handler = perfmon_interrupt,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "perfmon" .name = "perfmon"
}; };
...@@ -4150,11 +4157,6 @@ perfmon_init (void) ...@@ -4150,11 +4157,6 @@ perfmon_init (void)
pal_perf_mon_info_u_t pm_info; pal_perf_mon_info_u_t pm_info;
s64 status; s64 status;
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ia64_set_pmv(IA64_PERFMON_VECTOR);
ia64_srlz_d();
pmu_conf.pfm_is_disabled = 1; pmu_conf.pfm_is_disabled = 1;
printk("perfmon: version %u.%u (sampling format v%u.%u) IRQ %u\n", printk("perfmon: version %u.%u (sampling format v%u.%u) IRQ %u\n",
...@@ -4232,6 +4234,9 @@ __initcall(perfmon_init); ...@@ -4232,6 +4234,9 @@ __initcall(perfmon_init);
void void
perfmon_init_percpu (void) perfmon_init_percpu (void)
{ {
if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ia64_set_pmv(IA64_PERFMON_VECTOR); ia64_set_pmv(IA64_PERFMON_VECTOR);
ia64_srlz_d(); ia64_srlz_d();
} }
......
/*
* This file contains the Itanium PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_ITANIUM
#error "This file is only valid when CONFIG_ITANIUM is defined"
#endif
static int pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pmc_desc[256]={
/* pmc0 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_MONITOR, 6, NULL, pfm_ita_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_CONFIG, 0, NULL, pfm_ita_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pmd_desc[256]={
/* pmd0 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd16 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd17 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static int
pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{
pfm_context_t *ctx = task->thread.pfm_context;
int ret;
/*
* we must clear the (instruction) debug registers if pmc13.ta bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 13 && ((*val & 0x1) == 0UL) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(1, task, NULL, 0, regs);
if (ret) return ret;
}
/*
* we must clear the (data) debug registers if pmc11.pt bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 11 && ((*val >> 28)& 0x1) == 0 && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(0, task, NULL, 0, regs);
if (ret) return ret;
}
return 0;
}
/*
* This file contains the McKinley PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_MCKINLEY
#error "This file is only valid when CONFIG_MCKINLEY is defined"
#endif
static int pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pmc_desc[256]={
/* pmc0 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 6, NULL, pfm_mck_pmc_check, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_CONFIG, 0, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_MONITOR, 4, NULL, NULL, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_CONFIG, 0, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc14 */ { PFM_REG_CONFIG, 0, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc15 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pmd_desc[256]={
/* pmd0 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd16 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd17 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static int
pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{
struct thread_struct *th = &task->thread;
pfm_context_t *ctx = task->thread.pfm_context;
int ret = 0, check_case1 = 0;
unsigned long val8 = 0, val14 = 0, val13 = 0;
/*
* we must clear the debug registers if any pmc13.ena_dbrpX bit is enabled
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 13 && (*val & (0xfUL << 45)) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(1, task, NULL, 0, regs);
if (ret) return ret;
}
/*
* we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
* before they are (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 14 && ((*val & 0x2222) != 0x2222) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(0, task, NULL, 0, regs);
if (ret) return ret;
}
switch(cnum) {
case 4: *val |= 1UL << 23; /* force power enable bit */
break;
case 8: val8 = *val;
val13 = th->pmc[13];
val14 = th->pmc[14];
check_case1 = 1;
break;
case 13: val8 = th->pmc[8];
val13 = *val;
val14 = th->pmc[14];
check_case1 = 1;
break;
case 14: val8 = th->pmc[13];
val13 = th->pmc[13];
val14 = *val;
check_case1 = 1;
break;
}
/* check illegal configuration which can produce inconsistencies in tagging
* i-side events in L1D and L2 caches
*/
if (check_case1) {
ret = ((val13 >> 45) & 0xf) == 0
&& ((val8 & 0x1) == 0)
&& ((((val14>>1) & 0x3) == 0x2 || ((val14>>1) & 0x3) == 0x0)
||(((val14>>4) & 0x3) == 0x2 || ((val14>>4) & 0x3) == 0x0));
if (ret) printk("perfmon: failure check_case1\n");
}
return ret ? -EINVAL : 0;
}
...@@ -325,6 +325,11 @@ copy_thread (int nr, unsigned long clone_flags, ...@@ -325,6 +325,11 @@ copy_thread (int nr, unsigned long clone_flags,
/* copy parts of thread_struct: */ /* copy parts of thread_struct: */
p->thread.ksp = (unsigned long) child_stack - 16; p->thread.ksp = (unsigned long) child_stack - 16;
/* stop some PSR bits from being inherited: */
child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
& ~IA64_PSR_BITS_TO_CLEAR);
/* /*
* NOTE: The calling convention considers all floating point * NOTE: The calling convention considers all floating point
* registers in the high partition (fph) to be scratch. Since * registers in the high partition (fph) to be scratch. Since
......
...@@ -347,6 +347,14 @@ setup_arch (char **cmdline_p) ...@@ -347,6 +347,14 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_ACPI_BOOT #ifdef CONFIG_ACPI_BOOT
acpi_boot_init(*cmdline_p); acpi_boot_init(*cmdline_p);
#endif #endif
#ifdef CONFIG_SERIAL_HCDP
if (efi.hcdp) {
void setup_serial_hcdp(void *);
/* Setup the serial ports described by HCDP */
setup_serial_hcdp(efi.hcdp);
}
#endif
#ifdef CONFIG_VT #ifdef CONFIG_VT
# if defined(CONFIG_DUMMY_CONSOLE) # if defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con; conswitchp = &dummy_con;
...@@ -436,7 +444,7 @@ static void * ...@@ -436,7 +444,7 @@ static void *
c_start (struct seq_file *m, loff_t *pos) c_start (struct seq_file *m, loff_t *pos)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
while (*pos < NR_CPUS && !(cpu_online_map & (1 << *pos))) while (*pos < NR_CPUS && !(cpu_online_map & (1UL << *pos)))
++*pos; ++*pos;
#endif #endif
return *pos < NR_CPUS ? cpu_data(*pos) : NULL; return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
...@@ -455,10 +463,10 @@ c_stop (struct seq_file *m, void *v) ...@@ -455,10 +463,10 @@ c_stop (struct seq_file *m, void *v)
} }
struct seq_operations cpuinfo_op = { struct seq_operations cpuinfo_op = {
start: c_start, .start = c_start,
next: c_next, .next = c_next,
stop: c_stop, .stop = c_stop,
show: show_cpuinfo .show = show_cpuinfo
}; };
void void
...@@ -542,7 +550,18 @@ cpu_init (void) ...@@ -542,7 +550,18 @@ cpu_init (void)
extern char __per_cpu_end[]; extern char __per_cpu_end[];
int cpu = smp_processor_id(); int cpu = smp_processor_id();
my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start); if (__per_cpu_end - __per_cpu_start > PAGE_SIZE)
panic("Per-cpu data area too big! (%Zu > %Zu)",
__per_cpu_end - __per_cpu_start, PAGE_SIZE);
/*
* On the BSP, the page allocator isn't initialized by the time we get here. On
* the APs, the bootmem allocator is no longer available...
*/
if (cpu == 0)
my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start);
else
my_cpu_data = (void *) get_free_page(GFP_KERNEL);
memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start; __per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start;
my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start); my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start);
......
...@@ -146,6 +146,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from) ...@@ -146,6 +146,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
if (from->si_code < 0) { if (from->si_code < 0) {
if (__copy_to_user(to, from, sizeof(siginfo_t))) if (__copy_to_user(to, from, sizeof(siginfo_t)))
return -EFAULT; return -EFAULT;
return 0;
} else { } else {
int err; int err;
......
/* /*
* SMP boot-related support * SMP boot-related support
* *
* Copyright (C) 2001 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1998-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* *
* 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here. * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
* 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code. * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
* 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
* smp_boot_cpus()/smp_commence() is replaced by
* smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
*/ */
...@@ -66,18 +70,16 @@ static volatile unsigned long go[SLAVE + 1]; ...@@ -66,18 +70,16 @@ static volatile unsigned long go[SLAVE + 1];
#define DEBUG_ITC_SYNC 0 #define DEBUG_ITC_SYNC 0
extern void __init calibrate_delay(void); extern void __init calibrate_delay (void);
extern void start_ap(void); extern void start_ap (void);
extern unsigned long ia64_iobase; extern unsigned long ia64_iobase;
int cpucount; int cpucount;
task_t *task_for_booting_cpu; task_t *task_for_booting_cpu;
/* Setup configured maximum number of CPUs to activate */
static int max_cpus = -1;
/* Bitmask of currently online CPUs */ /* Bitmask of currently online CPUs */
volatile unsigned long cpu_online_map; volatile unsigned long cpu_online_map;
unsigned long phys_cpu_present_map;
/* which logical CPU number maps to which CPU (physical APIC ID) */ /* which logical CPU number maps to which CPU (physical APIC ID) */
volatile int ia64_cpu_to_sapicid[NR_CPUS]; volatile int ia64_cpu_to_sapicid[NR_CPUS];
...@@ -86,44 +88,12 @@ static volatile unsigned long cpu_callin_map; ...@@ -86,44 +88,12 @@ static volatile unsigned long cpu_callin_map;
struct smp_boot_data smp_boot_data __initdata; struct smp_boot_data smp_boot_data __initdata;
/* Set when the idlers are all forked */
volatile int smp_threads_ready;
unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */ unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
char __initdata no_int_routing; char __initdata no_int_routing;
unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
/*
* Setup routine for controlling SMP activation
*
* Command-line option of "nosmp" or "maxcpus=0" will disable SMP
* activation entirely (the MPS table probe still happens, though).
*
* Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
* greater than 0, limits the maximum number of CPUs activated in
* SMP mode to <NUM>.
*/
static int __init
nosmp (char *str)
{
max_cpus = 0;
return 1;
}
__setup("nosmp", nosmp);
static int __init
maxcpus (char *str)
{
get_option(&str, &max_cpus);
return 1;
}
__setup("maxcpus=", maxcpus);
static int __init static int __init
nointroute (char *str) nointroute (char *str)
{ {
...@@ -299,7 +269,7 @@ smp_setup_percpu_timer (void) ...@@ -299,7 +269,7 @@ smp_setup_percpu_timer (void)
static volatile atomic_t smp_commenced = ATOMIC_INIT(0); static volatile atomic_t smp_commenced = ATOMIC_INIT(0);
void __init static void __init
smp_commence (void) smp_commence (void)
{ {
/* /*
...@@ -308,7 +278,7 @@ smp_commence (void) ...@@ -308,7 +278,7 @@ smp_commence (void)
Dprintk("Setting commenced=1, go go go\n"); Dprintk("Setting commenced=1, go go go\n");
wmb(); wmb();
atomic_set(&smp_commenced,1); atomic_set(&smp_commenced, 1);
} }
...@@ -380,11 +350,7 @@ start_secondary (void *unused) ...@@ -380,11 +350,7 @@ start_secondary (void *unused)
efi_map_pal_code(); efi_map_pal_code();
cpu_init(); cpu_init();
smp_callin(); smp_callin();
Dprintk("CPU %d is set to go.\n", smp_processor_id());
while (!atomic_read(&smp_commenced))
cpu_relax();
Dprintk("CPU %d is starting idle.\n", smp_processor_id());
return cpu_idle(); return cpu_idle();
} }
...@@ -392,22 +358,20 @@ static struct task_struct * __init ...@@ -392,22 +358,20 @@ static struct task_struct * __init
fork_by_hand (void) fork_by_hand (void)
{ {
/* /*
* don't care about the eip and regs settings since * don't care about the eip and regs settings since we'll never reschedule the
* we'll never reschedule the forked task. * forked task.
*/ */
return do_fork(CLONE_VM|CLONE_IDLETASK, 0, 0, 0); return do_fork(CLONE_VM|CLONE_IDLETASK, 0, 0, 0);
} }
static void __init static int __init
do_boot_cpu (int sapicid) do_boot_cpu (int sapicid, int cpu)
{ {
struct task_struct *idle; struct task_struct *idle;
int timeout, cpu; int timeout;
cpu = ++cpucount;
/* /*
* We can't use kernel_thread since we must avoid to * We can't use kernel_thread since we must avoid to reschedule the child.
* reschedule the child.
*/ */
idle = fork_by_hand(); idle = fork_by_hand();
if (IS_ERR(idle)) if (IS_ERR(idle))
...@@ -419,13 +383,11 @@ do_boot_cpu (int sapicid) ...@@ -419,13 +383,11 @@ do_boot_cpu (int sapicid)
*/ */
init_idle(idle, cpu); init_idle(idle, cpu);
ia64_cpu_to_sapicid[cpu] = sapicid;
unhash_process(idle); unhash_process(idle);
task_for_booting_cpu = idle; task_for_booting_cpu = idle;
Dprintk("Sending wakeup vector %u to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
...@@ -448,8 +410,10 @@ do_boot_cpu (int sapicid) ...@@ -448,8 +410,10 @@ do_boot_cpu (int sapicid)
} else { } else {
printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
ia64_cpu_to_sapicid[cpu] = -1; ia64_cpu_to_sapicid[cpu] = -1;
cpucount--; clear_bit(cpu, &cpu_online_map); /* was set in smp_callin() */
return -EINVAL;
} }
return 0;
} }
unsigned long cache_decay_ticks; /* # of ticks an idle task is considered cache-hot */ unsigned long cache_decay_ticks; /* # of ticks an idle task is considered cache-hot */
...@@ -463,22 +427,43 @@ smp_tune_scheduling (void) ...@@ -463,22 +427,43 @@ smp_tune_scheduling (void)
(cache_decay_ticks + 1) * 1000 / HZ); (cache_decay_ticks + 1) * 1000 / HZ);
} }
/*
* Initialize the logical CPU number to SAPICID mapping
*/
void __init
smp_build_cpu_map (void)
{
int sapicid, cpu, i;
int boot_cpu_id = hard_smp_processor_id();
for (cpu = 0; cpu < NR_CPUS; cpu++)
ia64_cpu_to_sapicid[cpu] = -1;
ia64_cpu_to_sapicid[0] = boot_cpu_id;
phys_cpu_present_map = 1;
for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
sapicid = smp_boot_data.cpu_phys_id[i];
if (sapicid == -1 || sapicid == boot_cpu_id)
continue;
phys_cpu_present_map |= (1 << cpu);
ia64_cpu_to_sapicid[cpu] = sapicid;
cpu++;
}
}
/* /*
* Cycle through the APs sending Wakeup IPIs to boot each. * Cycle through the APs sending Wakeup IPIs to boot each.
*/ */
void __init void __init
smp_boot_cpus (void) smp_prepare_cpus (unsigned int max_cpus)
{ {
int sapicid, cpu;
int boot_cpu_id = hard_smp_processor_id(); int boot_cpu_id = hard_smp_processor_id();
/* /*
* Initialize the logical to physical CPU number mapping * Initialize the per-CPU profiling counter/multiplier
* and the per-CPU profiling counter/multiplier
*/ */
for (cpu = 0; cpu < NR_CPUS; cpu++)
ia64_cpu_to_sapicid[cpu] = -1;
smp_setup_percpu_timer(); smp_setup_percpu_timer();
/* /*
...@@ -492,7 +477,6 @@ smp_boot_cpus (void) ...@@ -492,7 +477,6 @@ smp_boot_cpus (void)
printk("Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id); printk("Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
global_irq_holder = NO_PROC_ID;
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
smp_tune_scheduling(); smp_tune_scheduling();
...@@ -501,55 +485,48 @@ smp_boot_cpus (void) ...@@ -501,55 +485,48 @@ smp_boot_cpus (void)
*/ */
if (!max_cpus || (max_cpus < -1)) { if (!max_cpus || (max_cpus < -1)) {
printk(KERN_INFO "SMP mode deactivated.\n"); printk(KERN_INFO "SMP mode deactivated.\n");
cpu_online_map = 1; cpu_online_map = phys_cpu_present_map = 1;
goto smp_done; return;
} }
if (max_cpus != -1) }
printk (KERN_INFO "Limiting CPUs to %d\n", max_cpus);
if (smp_boot_data.cpu_count > 1) { void
smp_cpus_done (unsigned int dummy)
{
int cpu;
unsigned long bogosum = 0;
printk(KERN_INFO "SMP: starting up secondaries.\n"); /*
* Allow the user to impress friends.
*/
for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) { for (cpu = 0; cpu < NR_CPUS; cpu++)
/* if (cpu_online(cpu))
* Don't even attempt to start the boot CPU! bogosum += cpu_data(cpu)->loops_per_jiffy;
*/
sapicid = smp_boot_data.cpu_phys_id[cpu];
if ((sapicid == -1) || (sapicid == hard_smp_processor_id()))
continue;
if ((max_cpus > 0) && (cpucount + 1 >= max_cpus)) printk(KERN_INFO"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
break; num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
}
do_boot_cpu(sapicid); int __devinit
__cpu_up (unsigned int cpu)
{
int ret;
int sapicid;
/* sapicid = ia64_cpu_to_sapicid[cpu];
* Make sure we unmap all failed CPUs if (sapicid == -1)
*/ return -EINVAL;
if (ia64_cpu_to_sapicid[cpu] == -1)
printk("phys CPU#%d not responding - cannot use it.\n", cpu);
}
/* printk(KERN_INFO "Processor %d/%d is spinning up...\n", sapicid, cpu);
* Allow the user to impress friends.
*/ /* Processor goes to start_secondary(), sets online flag */
ret = do_boot_cpu(sapicid, cpu);
printk("Before bogomips.\n"); if (ret < 0)
if (!cpucount) { return ret;
printk(KERN_ERR "Error: only one processor found.\n");
} else { printk(KERN_INFO "Processor %d has spun up...\n", cpu);
unsigned long bogosum = 0; return 0;
for (cpu = 0; cpu < NR_CPUS; cpu++)
if (cpu_online_map & (1<<cpu))
bogosum += cpu_data(cpu)->loops_per_jiffy;
printk(KERN_INFO"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
}
}
smp_done:
;
} }
/* /*
...@@ -571,9 +548,6 @@ init_smp_config(void) ...@@ -571,9 +548,6 @@ init_smp_config(void)
ap_startup = (struct fptr *) start_ap; ap_startup = (struct fptr *) start_ap;
sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
__pa(ap_startup->fp), __pa(ap_startup->gp), 0, 0, 0, 0); __pa(ap_startup->fp), __pa(ap_startup->gp), 0, 0, 0, 0);
if (sal_ret < 0) { if (sal_ret < 0)
printk("SMP: Can't set SAL AP Boot Rendezvous: %s\n Forcing UP mode\n", printk("SMP: Can't set SAL AP Boot Rendezvous: %s\n", ia64_sal_strerror(sal_ret));
ia64_sal_strerror(sal_ret));
max_cpus = 0;
}
} }
...@@ -82,7 +82,6 @@ asmlinkage unsigned long ...@@ -82,7 +82,6 @@ asmlinkage unsigned long
ia64_shmat (int shmid, void *shmaddr, int shmflg, long arg3, long arg4, long arg5, long arg6, ia64_shmat (int shmid, void *shmaddr, int shmflg, long arg3, long arg4, long arg5, long arg6,
long arg7, long stack) long arg7, long stack)
{ {
extern int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr);
struct pt_regs *regs = (struct pt_regs *) &stack; struct pt_regs *regs = (struct pt_regs *) &stack;
unsigned long raddr; unsigned long raddr;
int retval; int retval;
...@@ -138,10 +137,6 @@ ia64_brk (unsigned long brk, long arg1, long arg2, long arg3, ...@@ -138,10 +137,6 @@ ia64_brk (unsigned long brk, long arg1, long arg2, long arg3,
if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
goto out; goto out;
/* Check if we have enough memory.. */
if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
goto out;
/* Ok, looks good - let it rip. */ /* Ok, looks good - let it rip. */
if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
goto out; goto out;
......
...@@ -41,21 +41,22 @@ do_profile (unsigned long ip) ...@@ -41,21 +41,22 @@ do_profile (unsigned long ip)
extern unsigned long prof_cpu_mask; extern unsigned long prof_cpu_mask;
extern char _stext; extern char _stext;
if (!prof_buffer)
return;
if (!((1UL << smp_processor_id()) & prof_cpu_mask)) if (!((1UL << smp_processor_id()) & prof_cpu_mask))
return; return;
if (prof_buffer && current->pid) { ip -= (unsigned long) &_stext;
ip -= (unsigned long) &_stext; ip >>= prof_shift;
ip >>= prof_shift; /*
/* * Don't ignore out-of-bounds IP values silently, put them into the last
* Don't ignore out-of-bounds IP values silently, put them into the last * histogram slot, so if present, they will show up as a sharp peak.
* histogram slot, so if present, they will show up as a sharp peak. */
*/ if (ip > prof_len - 1)
if (ip > prof_len - 1) ip = prof_len - 1;
ip = prof_len - 1;
atomic_inc((atomic_t *) &prof_buffer[ip]); atomic_inc((atomic_t *) &prof_buffer[ip]);
}
} }
/* /*
...@@ -285,9 +286,9 @@ ia64_init_itm (void) ...@@ -285,9 +286,9 @@ ia64_init_itm (void)
} }
static struct irqaction timer_irqaction = { static struct irqaction timer_irqaction = {
handler: timer_interrupt, .handler = timer_interrupt,
flags: SA_INTERRUPT, .flags = SA_INTERRUPT,
name: "timer" .name = "timer"
}; };
void __init void __init
......
...@@ -49,10 +49,15 @@ static fpswa_interface_t *fpswa_interface; ...@@ -49,10 +49,15 @@ static fpswa_interface_t *fpswa_interface;
void __init void __init
trap_init (void) trap_init (void)
{ {
printk("fpswa interface at %lx\n", ia64_boot_param->fpswa); int major = 0, minor = 0;
if (ia64_boot_param->fpswa)
if (ia64_boot_param->fpswa) {
/* FPSWA fixup: make the interface pointer a kernel virtual address: */ /* FPSWA fixup: make the interface pointer a kernel virtual address: */
fpswa_interface = __va(ia64_boot_param->fpswa); fpswa_interface = __va(ia64_boot_param->fpswa);
major = fpswa_interface->revision & 0xffff;
minor = fpswa_interface->revision >> 16;
}
printk("fpswa interface at %lx (rev %d.%d)\n", ia64_boot_param->fpswa, major, minor);
} }
/* /*
...@@ -62,27 +67,26 @@ trap_init (void) ...@@ -62,27 +67,26 @@ trap_init (void)
void void
bust_spinlocks (int yes) bust_spinlocks (int yes)
{ {
int loglevel_save = console_loglevel;
spin_lock_init(&timerlist_lock); spin_lock_init(&timerlist_lock);
if (yes) { if (yes) {
oops_in_progress = 1; oops_in_progress = 1;
#ifdef CONFIG_SMP return;
global_irq_lock = 0; /* Many serial drivers do __global_cli() */ }
#endif
} else {
int loglevel_save = console_loglevel;
#ifdef CONFIG_VT #ifdef CONFIG_VT
unblank_screen(); unblank_screen();
#endif #endif
oops_in_progress = 0; oops_in_progress = 0;
/* /*
* OK, the message is on the console. Now we call printk() without * OK, the message is on the console. Now we call printk() without
* oops_in_progress set so that printk will give klogd a poke. Hold onto * oops_in_progress set so that printk will give klogd a poke. Hold onto
* your hats... * your hats...
*/ */
console_loglevel = 15; /* NMI oopser may have shut the console up */ console_loglevel = 15; /* NMI oopser may have shut the console up */
printk(" "); printk(" ");
console_loglevel = loglevel_save; console_loglevel = loglevel_save;
}
} }
void void
...@@ -93,9 +97,9 @@ die (const char *str, struct pt_regs *regs, long err) ...@@ -93,9 +97,9 @@ die (const char *str, struct pt_regs *regs, long err)
int lock_owner; int lock_owner;
int lock_owner_depth; int lock_owner_depth;
} die = { } die = {
lock: SPIN_LOCK_UNLOCKED, .lock = SPIN_LOCK_UNLOCKED,
lock_owner: -1, .lock_owner = -1,
lock_owner_depth: 0 .lock_owner_depth = 0
}; };
if (die.lock_owner != smp_processor_id()) { if (die.lock_owner != smp_processor_id()) {
...@@ -131,6 +135,8 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs) ...@@ -131,6 +135,8 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
siginfo_t siginfo; siginfo_t siginfo;
int sig, code; int sig, code;
die_if_kernel("bad break", regs, break_num);
/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri); siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_imm = break_num; siginfo.si_imm = break_num;
...@@ -247,10 +253,9 @@ disabled_fph_fault (struct pt_regs *regs) ...@@ -247,10 +253,9 @@ disabled_fph_fault (struct pt_regs *regs)
if (fpu_owner) if (fpu_owner)
ia64_flush_fph(fpu_owner); ia64_flush_fph(fpu_owner);
ia64_set_fpu_owner(current);
} }
#endif /* !CONFIG_SMP */ #endif /* !CONFIG_SMP */
ia64_set_fpu_owner(current);
if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) { if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
__ia64_load_fpu(current->thread.fph); __ia64_load_fpu(current->thread.fph);
psr->mfh = 0; psr->mfh = 0;
...@@ -435,7 +440,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -435,7 +440,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
unsigned long code, error = isr; unsigned long code, error = isr;
struct siginfo siginfo; struct siginfo siginfo;
char buf[128]; char buf[128];
int result; int result, sig;
static const char *reason[] = { static const char *reason[] = {
"IA-64 Illegal Operation fault", "IA-64 Illegal Operation fault",
"IA-64 Privileged Operation fault", "IA-64 Privileged Operation fault",
...@@ -479,6 +484,30 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -479,6 +484,30 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
break; break;
case 26: /* NaT Consumption */ case 26: /* NaT Consumption */
if (user_mode(regs)) {
if (((isr >> 4) & 0xf) == 2) {
/* NaT page consumption */
sig = SIGSEGV;
code = SEGV_ACCERR;
} else {
/* register NaT consumption */
sig = SIGILL;
code = ILL_ILLOPN;
}
siginfo.si_signo = sig;
siginfo.si_code = code;
siginfo.si_errno = 0;
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_imm = vector;
siginfo.si_flags = __ISR_VALID;
siginfo.si_isr = isr;
force_sig_info(sig, &siginfo, current);
return;
} else if (done_with_exception(regs))
return;
sprintf(buf, "NaT consumption");
break;
case 31: /* Unsupported Data Reference */ case 31: /* Unsupported Data Reference */
if (user_mode(regs)) { if (user_mode(regs)) {
siginfo.si_signo = SIGILL; siginfo.si_signo = SIGILL;
...@@ -491,7 +520,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -491,7 +520,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
force_sig_info(SIGILL, &siginfo, current); force_sig_info(SIGILL, &siginfo, current);
return; return;
} }
sprintf(buf, (vector == 26) ? "NaT consumption" : "Unsupported data reference"); sprintf(buf, "Unsupported data reference");
break; break;
case 29: /* Debug */ case 29: /* Debug */
...@@ -508,16 +537,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -508,16 +537,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
if (ia64_psr(regs)->is == 0) if (ia64_psr(regs)->is == 0)
ifa = regs->cr_iip; ifa = regs->cr_iip;
#endif #endif
siginfo.si_addr = (void *) ifa;
break; break;
case 35: siginfo.si_code = TRAP_BRANCH; break; case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
case 36: siginfo.si_code = TRAP_TRACE; break; case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
} }
siginfo.si_signo = SIGTRAP; siginfo.si_signo = SIGTRAP;
siginfo.si_errno = 0; siginfo.si_errno = 0;
siginfo.si_flags = 0; siginfo.si_flags = 0;
siginfo.si_isr = 0; siginfo.si_isr = 0;
siginfo.si_addr = 0; siginfo.si_addr = (void *) ifa;
siginfo.si_imm = 0; siginfo.si_imm = 0;
force_sig_info(SIGTRAP, &siginfo, current); force_sig_info(SIGTRAP, &siginfo, current);
return; return;
......
...@@ -140,13 +140,13 @@ static struct { ...@@ -140,13 +140,13 @@ static struct {
} stat; } stat;
# endif # endif
} unw = { } unw = {
tables: &unw.kernel_table, .tables = &unw.kernel_table,
lock: SPIN_LOCK_UNLOCKED, .lock = SPIN_LOCK_UNLOCKED,
save_order: { .save_order = {
UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
}, },
preg_index: { .preg_index = {
struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */ struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */ struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
struct_offset(struct unw_frame_info, bsp_loc)/8, struct_offset(struct unw_frame_info, bsp_loc)/8,
...@@ -189,9 +189,9 @@ static struct { ...@@ -189,9 +189,9 @@ static struct {
struct_offset(struct unw_frame_info, fr_loc[30 - 16])/8, struct_offset(struct unw_frame_info, fr_loc[30 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[31 - 16])/8, struct_offset(struct unw_frame_info, fr_loc[31 - 16])/8,
}, },
hash : { [0 ... UNW_HASH_SIZE - 1] = -1 }, .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
#if UNW_DEBUG #if UNW_DEBUG
preg_name: { .preg_name = {
"pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp", "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
"r4", "r5", "r6", "r7", "r4", "r5", "r6", "r7",
"ar.unat", "pr", "ar.lc", "ar.fpsr", "ar.unat", "pr", "ar.lc", "ar.fpsr",
...@@ -634,8 +634,8 @@ alloc_spill_area (unsigned long *offp, unsigned long regsize, ...@@ -634,8 +634,8 @@ alloc_spill_area (unsigned long *offp, unsigned long regsize,
for (reg = hi; reg >= lo; --reg) { for (reg = hi; reg >= lo; --reg) {
if (reg->where == UNW_WHERE_SPILL_HOME) { if (reg->where == UNW_WHERE_SPILL_HOME) {
reg->where = UNW_WHERE_PSPREL; reg->where = UNW_WHERE_PSPREL;
reg->val = 0x10 - *offp; *offp -= regsize;
*offp += regsize; reg->val = *offp;
} }
} }
} }
...@@ -814,7 +814,8 @@ desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *s ...@@ -814,7 +814,8 @@ desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *s
} }
for (i = 0; i < 20; ++i) { for (i = 0; i < 20; ++i) {
if ((frmask & 1) != 0) { if ((frmask & 1) != 0) {
set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
sr->region_start + sr->region_len - 1, 0); sr->region_start + sr->region_len - 1, 0);
sr->any_spills = 1; sr->any_spills = 1;
} }
......
This diff is collapsed.
...@@ -237,15 +237,17 @@ GLOBAL_ENTRY(__copy_user) ...@@ -237,15 +237,17 @@ GLOBAL_ENTRY(__copy_user)
.copy_user_bit##rshift: \ .copy_user_bit##rshift: \
1: \ 1: \
EX(.failure_out,(EPI) st8 [dst1]=tmp,8); \ EX(.failure_out,(EPI) st8 [dst1]=tmp,8); \
(EPI_1) shrp tmp=val1[PIPE_DEPTH-3],val1[PIPE_DEPTH-2],rshift; \ (EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \
EX(3f,(p16) ld8 val1[0]=[src1],8); \ EX(3f,(p16) ld8 val1[1]=[src1],8); \
(p16) mov val1[0]=r0; \
br.ctop.dptk 1b; \ br.ctop.dptk 1b; \
;; \ ;; \
br.cond.sptk.many .diff_align_do_tail; \ br.cond.sptk.many .diff_align_do_tail; \
2: \ 2: \
(EPI) st8 [dst1]=tmp,8; \ (EPI) st8 [dst1]=tmp,8; \
(EPI_1) shrp tmp=val1[PIPE_DEPTH-3],val1[PIPE_DEPTH-2],rshift; \ (EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \
3: \ 3: \
(p16) mov val1[1]=r0; \
(p16) mov val1[0]=r0; \ (p16) mov val1[0]=r0; \
br.ctop.dptk 2b; \ br.ctop.dptk 2b; \
;; \ ;; \
......
...@@ -87,6 +87,12 @@ ia64_outl (unsigned int val, unsigned long port) ...@@ -87,6 +87,12 @@ ia64_outl (unsigned int val, unsigned long port)
__ia64_outl(val, port); __ia64_outl(val, port);
} }
void
ia64_mmiob (void)
{
__ia64_mmiob();
}
/* define aliases: */ /* define aliases: */
asm (".global __ia64_inb, __ia64_inw, __ia64_inl"); asm (".global __ia64_inb, __ia64_inw, __ia64_inl");
...@@ -99,4 +105,7 @@ asm ("__ia64_outb = ia64_outb"); ...@@ -99,4 +105,7 @@ asm ("__ia64_outb = ia64_outb");
asm ("__ia64_outw = ia64_outw"); asm ("__ia64_outw = ia64_outw");
asm ("__ia64_outl = ia64_outl"); asm ("__ia64_outl = ia64_outl");
asm (".global __ia64_mmiob");
asm ("__ia64_mmiob = ia64_mmiob");
#endif /* CONFIG_IA64_GENERIC */ #endif /* CONFIG_IA64_GENERIC */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment