Commit cdb8355a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] minor reformatting to vmlinux.lds.S
  [IA64] CMC/CPE: Reverse the order of fetching log and checking poll threshold
  [IA64] PAL calls need physical mode, stacked
  [IA64] ar.fpsr not set on MCA/INIT kernel entry
  [IA64] printing support for MCA/INIT
  [IA64] trim output of show_mem()
  [IA64] show_mem() printk levels
  [IA64] Make gp value point to Region 5 in mca handler
  Revert "[IA64] Unwire set/get_robust_list"
  [IA64] Implement futex primitives
  [IA64-SGI] Do not request DMA memory for BTE
  [IA64] Move perfmon tables from thread_struct to pfm_context
  [IA64] Add interface so modules can discover whether multithreading is on.
  [IA64] kprobes: fixup the pagefault exception caused by probehandlers
  [IA64] kprobe opcode 16 bytes alignment on IA64
  [IA64] esi-support
  [IA64] Add "model name" to /proc/cpuinfo
parents b98adfcc df8f0ec1
......@@ -423,6 +423,14 @@ config IA64_PALINFO
config SGI_SN
def_bool y if (IA64_SGI_SN2 || IA64_GENERIC)
config IA64_ESI
bool "ESI (Extensible SAL Interface) support"
help
If you say Y here, support is built into the kernel to
make ESI calls. ESI calls are used to support vendor-specific
firmware extensions, such as the ability to inject memory-errors
for test-purposes. If you're unsure, say N.
source "drivers/sn/Kconfig"
source "drivers/firmware/Kconfig"
......
......@@ -32,6 +32,11 @@ obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
obj-$(CONFIG_AUDIT) += audit.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
obj-$(CONFIG_IA64_ESI) += esi.o
ifneq ($(CONFIG_IA64_ESI),)
obj-y += esi_stub.o # must be in kernel proper
endif
# The gate DSO image is built using a special linker script.
targets += gate.so gate-syms.o
......
......@@ -1605,8 +1605,8 @@ sys_call_table:
data8 sys_ni_syscall // 1295 reserved for ppoll
data8 sys_unshare
data8 sys_splice
data8 sys_ni_syscall // reserved for set_robust_list
data8 sys_ni_syscall // reserved for get_robust_list
data8 sys_set_robust_list
data8 sys_get_robust_list
data8 sys_sync_file_range // 1300
data8 sys_tee
data8 sys_vmsplice
......
/*
* Extensible SAL Interface (ESI) support routines.
*
* Copyright (C) 2006 Hewlett-Packard Co
* Alex Williamson <alex.williamson@hp.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/esi.h>
#include <asm/sal.h>
MODULE_AUTHOR("Alex Williamson <alex.williamson@hp.com>");
MODULE_DESCRIPTION("Extensible SAL Interface (ESI) support");
MODULE_LICENSE("GPL");
#define MODULE_NAME "esi"
#define ESI_TABLE_GUID \
EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \
0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4)
enum esi_systab_entry_type {
ESI_DESC_ENTRY_POINT = 0
};
/*
* Entry type: Size:
* 0 48
*/
#define ESI_DESC_SIZE(type) "\060"[(unsigned) (type)]
typedef struct ia64_esi_desc_entry_point {
u8 type;
u8 reserved1[15];
u64 esi_proc;
u64 gp;
efi_guid_t guid;
} ia64_esi_desc_entry_point_t;
struct pdesc {
void *addr;
void *gp;
};
static struct ia64_sal_systab *esi_systab;
static int __init esi_init (void)
{
efi_config_table_t *config_tables;
struct ia64_sal_systab *systab;
unsigned long esi = 0;
char *p;
int i;
config_tables = __va(efi.systab->tables);
for (i = 0; i < (int) efi.systab->nr_tables; ++i) {
if (efi_guidcmp(config_tables[i].guid, ESI_TABLE_GUID) == 0) {
esi = config_tables[i].table;
break;
}
}
if (!esi)
return -ENODEV;;
systab = __va(esi);
if (strncmp(systab->signature, "ESIT", 4) != 0) {
printk(KERN_ERR "bad signature in ESI system table!");
return -ENODEV;
}
p = (char *) (systab + 1);
for (i = 0; i < systab->entry_count; i++) {
/*
* The first byte of each entry type contains the type
* descriptor.
*/
switch (*p) {
case ESI_DESC_ENTRY_POINT:
break;
default:
printk(KERN_WARNING "Unkown table type %d found in "
"ESI table, ignoring rest of table\n", *p);
return -ENODEV;
}
p += ESI_DESC_SIZE(*p);
}
esi_systab = systab;
return 0;
}
int ia64_esi_call (efi_guid_t guid, struct ia64_sal_retval *isrvp,
enum esi_proc_type proc_type, u64 func,
u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
u64 arg7)
{
struct ia64_fpreg fr[6];
unsigned long flags = 0;
int i;
char *p;
if (!esi_systab)
return -1;
p = (char *) (esi_systab + 1);
for (i = 0; i < esi_systab->entry_count; i++) {
if (*p == ESI_DESC_ENTRY_POINT) {
ia64_esi_desc_entry_point_t *esi = (void *)p;
if (!efi_guidcmp(guid, esi->guid)) {
ia64_sal_handler esi_proc;
struct pdesc pdesc;
pdesc.addr = __va(esi->esi_proc);
pdesc.gp = __va(esi->gp);
esi_proc = (ia64_sal_handler) &pdesc;
ia64_save_scratch_fpregs(fr);
if (proc_type == ESI_PROC_SERIALIZED)
spin_lock_irqsave(&sal_lock, flags);
else if (proc_type == ESI_PROC_MP_SAFE)
local_irq_save(flags);
else
preempt_disable();
*isrvp = (*esi_proc)(func, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
if (proc_type == ESI_PROC_SERIALIZED)
spin_unlock_irqrestore(&sal_lock,
flags);
else if (proc_type == ESI_PROC_MP_SAFE)
local_irq_restore(flags);
else
preempt_enable();
ia64_load_scratch_fpregs(fr);
return 0;
}
}
p += ESI_DESC_SIZE(*p);
}
return -1;
}
EXPORT_SYMBOL_GPL(ia64_esi_call);
int ia64_esi_call_phys (efi_guid_t guid, struct ia64_sal_retval *isrvp,
u64 func, u64 arg1, u64 arg2, u64 arg3, u64 arg4,
u64 arg5, u64 arg6, u64 arg7)
{
struct ia64_fpreg fr[6];
unsigned long flags;
u64 esi_params[8];
char *p;
int i;
if (!esi_systab)
return -1;
p = (char *) (esi_systab + 1);
for (i = 0; i < esi_systab->entry_count; i++) {
if (*p == ESI_DESC_ENTRY_POINT) {
ia64_esi_desc_entry_point_t *esi = (void *)p;
if (!efi_guidcmp(guid, esi->guid)) {
ia64_sal_handler esi_proc;
struct pdesc pdesc;
pdesc.addr = (void *)esi->esi_proc;
pdesc.gp = (void *)esi->gp;
esi_proc = (ia64_sal_handler) &pdesc;
esi_params[0] = func;
esi_params[1] = arg1;
esi_params[2] = arg2;
esi_params[3] = arg3;
esi_params[4] = arg4;
esi_params[5] = arg5;
esi_params[6] = arg6;
esi_params[7] = arg7;
ia64_save_scratch_fpregs(fr);
spin_lock_irqsave(&sal_lock, flags);
*isrvp = esi_call_phys(esi_proc, esi_params);
spin_unlock_irqrestore(&sal_lock, flags);
ia64_load_scratch_fpregs(fr);
return 0;
}
}
p += ESI_DESC_SIZE(*p);
}
return -1;
}
EXPORT_SYMBOL_GPL(ia64_esi_call_phys);
static void __exit esi_exit (void)
{
}
module_init(esi_init);
module_exit(esi_exit); /* makes module removable... */
/*
* ESI call stub.
*
* Copyright (C) 2005 Hewlett-Packard Co
* Alex Williamson <alex.williamson@hp.com>
*
* Based on EFI call stub by David Mosberger. The stub is virtually
* identical to the one for EFI phys-mode calls, except that ESI
* calls may have up to 8 arguments, so they get passed to this routine
* through memory.
*
* This stub allows us to make ESI calls in physical mode with interrupts
* turned off. ESI calls may not support calling from virtual mode.
*
* Google for "Extensible SAL specification" for a document describing the
* ESI standard.
*/
/*
* PSR settings as per SAL spec (Chapter 8 in the "IA-64 System
* Abstraction Layer Specification", revision 2.6e). Note that
* psr.dfl and psr.dfh MUST be cleared, despite what this manual says.
* Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call
* (the br.ia instruction fails unless psr.dfl and psr.dfh are
* cleared). Fortunately, SAL promises not to touch the floating
* point regs, so at least we don't have to save f2-f127.
*/
#define PSR_BITS_TO_CLEAR \
(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
IA64_PSR_DFL | IA64_PSR_DFH)
#define PSR_BITS_TO_SET \
(IA64_PSR_BN)
#include <asm/processor.h>
#include <asm/asmmacro.h>
/*
* Inputs:
* in0 = address of function descriptor of ESI routine to call
* in1 = address of array of ESI parameters
*
* Outputs:
* r8 = result returned by called function
*/
GLOBAL_ENTRY(esi_call_phys)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
alloc loc1=ar.pfs,2,7,8,0
ld8 r2=[in0],8 // load ESI function's entry point
mov loc0=rp
.body
;;
ld8 out0=[in1],8 // ESI params loaded from array
;; // passing all as inputs doesn't work
ld8 out1=[in1],8
;;
ld8 out2=[in1],8
;;
ld8 out3=[in1],8
;;
ld8 out4=[in1],8
;;
ld8 out5=[in1],8
;;
ld8 out6=[in1],8
;;
ld8 out7=[in1]
mov loc2=gp // save global pointer
mov loc4=ar.rsc // save RSE configuration
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
;;
ld8 gp=[in0] // load ESI function's global pointer
movl r16=PSR_BITS_TO_CLEAR
mov loc3=psr // save processor status word
movl r17=PSR_BITS_TO_SET
;;
or loc3=loc3,r17
mov b6=r2
;;
andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
br.call.sptk.many rp=ia64_switch_mode_phys
.ret0: mov loc5=r19 // old ar.bsp
mov loc6=r20 // old sp
br.call.sptk.many rp=b6 // call the ESI function
.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // save virtual mode psr
mov r19=loc5 // save virtual mode bspstore
mov r20=loc6 // save virtual mode sp
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2: mov ar.rsc=loc4 // restore RSE configuration
mov ar.pfs=loc1
mov rp=loc0
mov gp=loc2
br.ret.sptk.many rp
END(esi_call_phys)
......@@ -105,5 +105,9 @@ EXPORT_SYMBOL(ia64_spinlock_contention);
# endif
#endif
#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
extern void esi_call_phys (void);
EXPORT_SYMBOL_GPL(esi_call_phys);
#endif
extern char ia64_ivt[];
EXPORT_SYMBOL(ia64_ivt);
......@@ -136,10 +136,8 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
static int __kprobes unsupported_inst(uint template, uint slot,
uint major_opcode,
unsigned long kprobe_inst,
struct kprobe *p)
unsigned long addr)
{
unsigned long addr = (unsigned long)p->addr;
if (bundle_encoding[template][slot] == I) {
switch (major_opcode) {
case 0x0: //I_UNIT_MISC_OPCODE:
......@@ -217,7 +215,7 @@ static void __kprobes prepare_break_inst(uint template, uint slot,
struct kprobe *p)
{
unsigned long break_inst = BREAK_INST;
bundle_t *bundle = &p->ainsn.insn.bundle;
bundle_t *bundle = &p->opcode.bundle;
/*
* Copy the original kprobe_inst qualifying predicate(qp)
......@@ -423,11 +421,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
unsigned long kprobe_inst=0;
unsigned int slot = addr & 0xf, template, major_opcode = 0;
bundle_t *bundle = &p->ainsn.insn.bundle;
memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
bundle_t *bundle;
bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
template = bundle->quad0.template;
if(valid_kprobe_addr(template, slot, addr))
......@@ -440,20 +436,19 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
/* Get kprobe_inst and major_opcode from the bundle */
get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
if (unsupported_inst(template, slot, major_opcode, kprobe_inst, p))
if (unsupported_inst(template, slot, major_opcode, kprobe_inst, addr))
return -EINVAL;
prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
return 0;
}
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn)
return -ENOMEM;
memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t));
memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t));
void __kprobes flush_insn_slot(struct kprobe *p)
{
unsigned long arm_addr;
prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
arm_addr = ((unsigned long)&p->opcode.bundle) & ~0xFULL;
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
return 0;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
......@@ -461,9 +456,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
unsigned long addr = (unsigned long)p->addr;
unsigned long arm_addr = addr & ~0xFULL;
flush_insn_slot(p);
memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
flush_icache_range((unsigned long)p->ainsn.insn,
(unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
memcpy((char *)arm_addr, &p->opcode, sizeof(kprobe_opcode_t));
flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
......@@ -471,11 +467,18 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
unsigned long addr = (unsigned long)p->addr;
unsigned long arm_addr = addr & ~0xFULL;
/* p->opcode contains the original unaltered bundle */
memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t));
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
/* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
memcpy((char *) arm_addr, (char *) p->ainsn.insn,
sizeof(kprobe_opcode_t));
flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
mutex_lock(&kprobe_mutex);
free_insn_slot(p->ainsn.insn);
mutex_unlock(&kprobe_mutex);
}
/*
* We are resuming execution after a single step fault, so the pt_regs
* structure reflects the register state after we executed the instruction
......@@ -486,12 +489,12 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
*/
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
unsigned long template;
int slot = ((unsigned long)p->addr & 0xf);
template = p->opcode.bundle.quad0.template;
template = p->ainsn.insn->bundle.quad0.template;
if (slot == 1 && bundle_encoding[template][1] == L)
slot = 2;
......@@ -553,7 +556,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
{
unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle;
unsigned long slot = (unsigned long)p->addr & 0xf;
/* single step inline if break instruction */
......@@ -768,6 +771,12 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if (ia64_done_with_exception(regs))
return 1;
/*
* Let ia64_do_page_fault() fix it.
......
This diff is collapsed.
......@@ -1025,18 +1025,13 @@ ia64_old_stack:
ia64_set_kernel_registers:
add temp3=MCA_SP_OFFSET, r3
add temp4=MCA_SOS_OFFSET+SOS(OS_GP), r3
mov b0=r2 // save return address
GET_IA64_MCA_DATA(temp1)
;;
add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp
add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
add r13=temp1, r3 // set current to start of MCA/INIT stack
add r20=temp1, r3 // physical start of MCA/INIT stack
;;
ld8 r1=[temp4] // OS GP from SAL OS state
;;
DATA_PA_TO_VA(r1,temp1)
DATA_PA_TO_VA(r12,temp2)
DATA_PA_TO_VA(r13,temp3)
;;
......@@ -1067,6 +1062,10 @@ ia64_set_kernel_registers:
mov cr.itir=r18
mov cr.ifa=r13
mov r20=IA64_TR_CURRENT_STACK
movl r17=FPSR_DEFAULT
;;
mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
;;
itr.d dtr[r20]=r21
;;
......
......@@ -79,14 +79,30 @@ static int
fatal_mca(const char *fmt, ...)
{
va_list args;
char buf[256];
va_start(args, fmt);
vprintk(fmt, args);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
ia64_mca_printk(KERN_ALERT "MCA: %s\n", buf);
return MCA_NOT_RECOVERED;
}
static int
mca_recovered(const char *fmt, ...)
{
va_list args;
char buf[256];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
ia64_mca_printk(KERN_INFO "MCA: %s\n", buf);
return MCA_RECOVERED;
}
/**
* mca_page_isolate - isolate a poisoned page in order not to use it later
* @paddr: poisoned memory location
......@@ -140,6 +156,7 @@ mca_page_isolate(unsigned long paddr)
void
mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
{
ia64_mlogbuf_dump();
printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, "
"iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n",
raw_smp_processor_id(), current->pid, current->uid,
......@@ -440,7 +457,7 @@ recover_from_read_error(slidx_table_t *slidx,
/* Is target address valid? */
if (!pbci->tv)
return fatal_mca(KERN_ALERT "MCA: target address not valid\n");
return fatal_mca("target address not valid");
/*
* cpu read or memory-mapped io read
......@@ -458,7 +475,7 @@ recover_from_read_error(slidx_table_t *slidx,
/* Is minstate valid? */
if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
return fatal_mca(KERN_ALERT "MCA: minstate not valid\n");
return fatal_mca("minstate not valid");
psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr);
......@@ -492,13 +509,14 @@ recover_from_read_error(slidx_table_t *slidx,
psr2->bn = 1;
psr2->i = 0;
return MCA_RECOVERED;
return mca_recovered("user memory corruption. "
"kill affected process - recovered.");
}
}
return fatal_mca(KERN_ALERT "MCA: kernel context not recovered,"
" iip 0x%lx\n", pmsa->pmsa_iip);
return fatal_mca("kernel context not recovered, iip 0x%lx\n",
pmsa->pmsa_iip);
}
/**
......@@ -584,13 +602,13 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
* The machine check is corrected.
*/
if (psp->cm == 1)
return MCA_RECOVERED;
return mca_recovered("machine check is already corrected.");
/*
* The error was not contained. Software must be reset.
*/
if (psp->us || psp->ci == 0)
return fatal_mca(KERN_ALERT "MCA: error not contained\n");
return fatal_mca("error not contained");
/*
* The cache check and bus check bits have four possible states
......@@ -601,22 +619,22 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
* 1 1 Memory error, attempt recovery
*/
if (psp->bc == 0 || pbci == NULL)
return fatal_mca(KERN_ALERT "MCA: No bus check\n");
return fatal_mca("No bus check");
/*
* Sorry, we cannot handle so many.
*/
if (peidx_bus_check_num(peidx) > 1)
return fatal_mca(KERN_ALERT "MCA: Too many bus checks\n");
return fatal_mca("Too many bus checks");
/*
* Well, here is only one bus error.
*/
if (pbci->ib)
return fatal_mca(KERN_ALERT "MCA: Internal Bus error\n");
return fatal_mca("Internal Bus error");
if (pbci->cc)
return fatal_mca(KERN_ALERT "MCA: Cache-cache error\n");
return fatal_mca("Cache-cache error");
if (pbci->eb && pbci->bsi > 0)
return fatal_mca(KERN_ALERT "MCA: External bus check fatal status\n");
return fatal_mca("External bus check fatal status");
/*
* This is a local MCA and estimated as recoverble external bus error.
......@@ -628,7 +646,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
/*
* On account of strange SAL error record, we cannot recover.
*/
return fatal_mca(KERN_ALERT "MCA: Strange SAL record\n");
return fatal_mca("Strange SAL record");
}
/**
......@@ -657,10 +675,10 @@ mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
/* Now, OS can recover when there is one processor error section */
if (n_proc_err > 1)
return fatal_mca(KERN_ALERT "MCA: Too Many Errors\n");
return fatal_mca("Too Many Errors");
else if (n_proc_err == 0)
/* Weird SAL record ... We need not to recover */
return fatal_mca(KERN_ALERT "MCA: Weird SAL record\n");
/* Weird SAL record ... We can't do anything */
return fatal_mca("Weird SAL record");
/* Make index of processor error section */
mca_make_peidx((sal_log_processor_info_t*)
......@@ -671,7 +689,7 @@ mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
/* Check whether MCA is global or not */
if (is_mca_global(&peidx, &pbci, sos))
return fatal_mca(KERN_ALERT "MCA: global MCA\n");
return fatal_mca("global MCA");
/* Try to recover a processor error */
return recover_from_processor_error(platform_err, &slidx, &peidx,
......
......@@ -118,3 +118,7 @@ struct mca_table_entry {
extern const struct mca_table_entry *search_mca_tables (unsigned long addr);
extern int mca_recover_range(unsigned long);
extern void ia64_mca_printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2)));
extern void ia64_mlogbuf_dump(void);
This diff is collapsed.
......@@ -266,6 +266,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
/* Check for outstanding MCA/INIT records every minute (arbitrary) */
#define SALINFO_TIMER_DELAY (60*HZ)
static struct timer_list salinfo_timer;
extern void ia64_mlogbuf_dump(void);
static void
salinfo_timeout_check(struct salinfo_data *data)
......@@ -283,6 +284,7 @@ salinfo_timeout_check(struct salinfo_data *data)
static void
salinfo_timeout (unsigned long arg)
{
ia64_mlogbuf_dump();
salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA);
salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_INIT);
salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY;
......@@ -332,6 +334,8 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
if (cpu == -1)
goto retry;
ia64_mlogbuf_dump();
/* for next read, start checking at next CPU */
data->cpu_check = cpu;
if (++data->cpu_check == NR_CPUS)
......
......@@ -509,7 +509,7 @@ show_cpuinfo (struct seq_file *m, void *v)
{ 1UL << 1, "spontaneous deferral"},
{ 1UL << 2, "16-byte atomic ops" }
};
char family[32], features[128], *cp, sep;
char features[128], *cp, sep;
struct cpuinfo_ia64 *c = v;
unsigned long mask;
unsigned long proc_freq;
......@@ -517,12 +517,6 @@ show_cpuinfo (struct seq_file *m, void *v)
mask = c->features;
switch (c->family) {
case 0x07: memcpy(family, "Itanium", 8); break;
case 0x1f: memcpy(family, "Itanium 2", 10); break;
default: sprintf(family, "%u", c->family); break;
}
/* build the feature string: */
memcpy(features, " standard", 10);
cp = features;
......@@ -553,8 +547,9 @@ show_cpuinfo (struct seq_file *m, void *v)
"processor : %d\n"
"vendor : %s\n"
"arch : IA-64\n"
"family : %s\n"
"family : %u\n"
"model : %u\n"
"model name : %s\n"
"revision : %u\n"
"archrev : %u\n"
"features :%s\n" /* don't change this---it _is_ right! */
......@@ -563,7 +558,8 @@ show_cpuinfo (struct seq_file *m, void *v)
"cpu MHz : %lu.%06lu\n"
"itc MHz : %lu.%06lu\n"
"BogoMIPS : %lu.%02lu\n",
cpunum, c->vendor, family, c->model, c->revision, c->archrev,
cpunum, c->vendor, c->family, c->model,
c->model_name, c->revision, c->archrev,
features, c->ppn, c->number,
proc_freq / 1000, proc_freq % 1000,
c->itc_freq / 1000000, c->itc_freq % 1000000,
......@@ -611,6 +607,31 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo
};
static char brandname[128];
static char * __cpuinit
get_model_name(__u8 family, __u8 model)
{
char brand[128];
if (ia64_pal_get_brand_info(brand)) {
if (family == 0x7)
memcpy(brand, "Merced", 7);
else if (family == 0x1f) switch (model) {
case 0: memcpy(brand, "McKinley", 9); break;
case 1: memcpy(brand, "Madison", 8); break;
case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
} else
memcpy(brand, "Unknown", 8);
}
if (brandname[0] == '\0')
return strcpy(brandname, brand);
else if (strcmp(brandname, brand) == 0)
return brandname;
else
return kstrdup(brand, GFP_KERNEL);
}
static void __cpuinit
identify_cpu (struct cpuinfo_ia64 *c)
{
......@@ -640,7 +661,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
pal_status_t status;
unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
int i;
for (i = 0; i < 5; ++i)
cpuid.bits[i] = ia64_get_cpuid(i);
......@@ -663,6 +683,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->family = cpuid.field.family;
c->archrev = cpuid.field.archrev;
c->features = cpuid.field.features;
c->model_name = get_model_name(c->family, c->model);
status = ia64_pal_vm_summary(&vm1, &vm2);
if (status == PAL_STATUS_SUCCESS) {
......
......@@ -879,3 +879,27 @@ identify_siblings(struct cpuinfo_ia64 *c)
c->core_id = info.log1_cid;
c->thread_id = info.log1_tid;
}
/*
* returns non zero, if multi-threading is enabled
* on at least one physical package. Due to hotplug cpu
* and (maxcpus=), all threads may not necessarily be enabled
* even though the processor supports multi-threading.
*/
int is_multithreading_enabled(void)
{
int i, j;
for_each_present_cpu(i) {
for_each_present_cpu(j) {
if (j == i)
continue;
if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
if (cpu_data(j)->core_id == cpu_data(i)->core_id)
return 1;
}
}
}
return 0;
}
EXPORT_SYMBOL_GPL(is_multithreading_enabled);
......@@ -184,7 +184,9 @@ SECTIONS
*(.data.gate)
__stop_gate_section = .;
}
. = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */
. = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
* kernel data
*/
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
{ *(.data.read_mostly) }
......@@ -202,7 +204,9 @@ SECTIONS
*(.data.percpu)
__per_cpu_end = .;
}
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
* into percpu page size
*/
data : { } :data
.data : AT(ADDR(.data) - LOAD_OFFSET)
......
......@@ -40,10 +40,11 @@ show_mem (void)
int i, total = 0, reserved = 0;
int shared = 0, cached = 0;
printk("Mem-info:\n");
printk(KERN_INFO "Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
printk(KERN_INFO "Free swap: %6ldkB\n",
nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
for (i = 0; i < max_mapnr; i++) {
if (!pfn_valid(i)) {
......@@ -62,12 +63,12 @@ show_mem (void)
else if (page_count(mem_map + i))
shared += page_count(mem_map + i) - 1;
}
printk("%d pages of RAM\n", total);
printk("%d reserved pages\n", reserved);
printk("%d pages shared\n", shared);
printk("%d pages swap cached\n", cached);
printk("%ld pages in page table cache\n",
pgtable_quicklist_total_size());
printk(KERN_INFO "%d pages of RAM\n", total);
printk(KERN_INFO "%d reserved pages\n", reserved);
printk(KERN_INFO "%d pages shared\n", shared);
printk(KERN_INFO "%d pages swap cached\n", cached);
printk(KERN_INFO "%ld pages in page table cache\n",
pgtable_quicklist_total_size());
}
/* physical address where the bootmem map is located */
......
......@@ -547,15 +547,16 @@ void show_mem(void)
unsigned long total_present = 0;
pg_data_t *pgdat;
printk("Mem-info:\n");
printk(KERN_INFO "Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
printk(KERN_INFO "Free swap: %6ldkB\n",
nr_swap_pages<<(PAGE_SHIFT-10));
printk(KERN_INFO "Node memory in pages:\n");
for_each_online_pgdat(pgdat) {
unsigned long present;
unsigned long flags;
int shared = 0, cached = 0, reserved = 0;
printk("Node ID: %d\n", pgdat->node_id);
pgdat_resize_lock(pgdat, &flags);
present = pgdat->node_present_pages;
for(i = 0; i < pgdat->node_spanned_pages; i++) {
......@@ -579,18 +580,17 @@ void show_mem(void)
total_reserved += reserved;
total_cached += cached;
total_shared += shared;
printk("\t%ld pages of RAM\n", present);
printk("\t%d reserved pages\n", reserved);
printk("\t%d pages shared\n", shared);
printk("\t%d pages swap cached\n", cached);
printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
"shrd: %10d, swpd: %10d\n", pgdat->node_id,
present, reserved, shared, cached);
}
printk("%ld pages of RAM\n", total_present);
printk("%d reserved pages\n", total_reserved);
printk("%d pages shared\n", total_shared);
printk("%d pages swap cached\n", total_cached);
printk("Total of %ld pages in page table cache\n",
pgtable_quicklist_total_size());
printk("%d free buffer pages\n", nr_free_buffer_pages());
printk(KERN_INFO "%ld pages of RAM\n", total_present);
printk(KERN_INFO "%d reserved pages\n", total_reserved);
printk(KERN_INFO "%d pages shared\n", total_shared);
printk(KERN_INFO "%d pages swap cached\n", total_cached);
printk(KERN_INFO "Total of %ld pages in page table cache\n",
pgtable_quicklist_total_size());
printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
}
/**
......
......@@ -277,8 +277,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
}
/* temporary buffer used during unaligned transfers */
bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES,
GFP_KERNEL | GFP_DMA);
bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES, GFP_KERNEL);
if (bteBlock_unaligned == NULL) {
return BTEFAIL_NOTAVAIL;
}
......
/*
* ESI service calls.
*
* Copyright (c) Copyright 2005-2006 Hewlett-Packard Development Company, L.P.
* Alex Williamson <alex.williamson@hp.com>
*/
#ifndef esi_h
#define esi_h
#include <linux/efi.h>
#define ESI_QUERY 0x00000001
#define ESI_OPEN_HANDLE 0x02000000
#define ESI_CLOSE_HANDLE 0x02000001
enum esi_proc_type {
ESI_PROC_SERIALIZED, /* calls need to be serialized */
ESI_PROC_MP_SAFE, /* MP-safe, but not reentrant */
ESI_PROC_REENTRANT /* MP-safe and reentrant */
};
extern int ia64_esi_init (void);
extern struct ia64_sal_retval esi_call_phys (void *, u64 *);
extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *,
enum esi_proc_type,
u64, u64, u64, u64, u64, u64, u64, u64);
extern int ia64_esi_call_phys(efi_guid_t, struct ia64_sal_retval *, u64, u64,
u64, u64, u64, u64, u64, u64);
#endif /* esi_h */
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#include <asm-generic/futex.h>
#include <linux/futex.h>
#include <asm/errno.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#endif
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
do { \
register unsigned long r8 __asm ("r8") = 0; \
__asm__ __volatile__( \
" mf;; \n" \
"[1:] " insn ";; \n" \
" .xdata4 \"__ex_table\", 1b-., 2f-. \n" \
"[2:]" \
: "+r" (r8), "=r" (oldval) \
: "r" (uaddr), "r" (oparg) \
: "memory"); \
ret = r8; \
} while (0)
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
do { \
register unsigned long r8 __asm ("r8") = 0; \
int val, newval; \
do { \
__asm__ __volatile__( \
" mf;; \n" \
"[1:] ld4 %3=[%4];; \n" \
" mov %2=%3 \n" \
insn ";; \n" \
" mov ar.ccv=%2;; \n" \
"[2:] cmpxchg4.acq %1=[%4],%3,ar.ccv;; \n" \
" .xdata4 \"__ex_table\", 1b-., 3f-.\n" \
" .xdata4 \"__ex_table\", 2b-., 3f-.\n" \
"[3:]" \
: "+r" (r8), "=r" (val), "=&r" (oldval), \
"=&r" (newval) \
: "r" (uaddr), "r" (oparg) \
: "memory"); \
if (unlikely (r8)) \
break; \
} while (unlikely (val != oldval)); \
ret = r8; \
} while (0)
static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
inc_preempt_count();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op1("xchg4 %1=[%2],%3", ret, oldval, uaddr,
oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op2("add %3=%3,%5", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op2("or %3=%3,%5", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op2("and %3=%3,%5", ret, oldval, uaddr,
~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op2("xor %3=%3,%5", ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
dec_preempt_count();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
{
register unsigned long r8 __asm ("r8");
__asm__ __volatile__(
" mf;; \n"
" mov ar.ccv=%3;; \n"
"[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]"
: "=r" (r8)
: "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval)
: "memory");
return r8;
}
}
#endif /* _ASM_FUTEX_H */
......@@ -29,7 +29,8 @@
#include <linux/percpu.h>
#include <asm/break.h>
#define MAX_INSN_SIZE 16
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 1
#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
typedef union cmp_inst {
......@@ -94,7 +95,7 @@ struct kprobe_ctlblk {
#define IP_RELATIVE_PREDICT_OPCODE (7)
#define LONG_BRANCH_OPCODE (0xC)
#define LONG_CALL_OPCODE (0xD)
#define arch_remove_kprobe(p) do {} while (0)
#define flush_insn_slot(p) do { } while (0)
typedef struct kprobe_opcode {
bundle_t bundle;
......@@ -108,7 +109,7 @@ struct fnptr {
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
/* copy of the instruction to be emulated */
kprobe_opcode_t insn;
kprobe_opcode_t *insn;
#define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
#define INST_FLAG_FIX_BRANCH_REG 2
#define INST_FLAG_BREAK_INST 4
......@@ -125,6 +126,6 @@ static inline void jprobe_return(void)
}
extern void invalidate_stacked_regs(void);
extern void flush_register_stack(void);
extern void flush_insn_slot(struct kprobe *p);
extern void arch_remove_kprobe(struct kprobe *p);
#endif /* _ASM_KPROBES_H */
......@@ -197,9 +197,9 @@
movl temp2 = start_addr; \
;; \
mov cr.iip = temp2; \
movl gp = __gp \
;; \
DATA_PA_TO_VA(sp, temp1); \
DATA_PA_TO_VA(gp, temp2); \
srlz.i; \
;; \
nop 1; \
......
......@@ -78,6 +78,7 @@
#define PAL_VM_TR_READ 261 /* read contents of translation register */
#define PAL_GET_PSTATE 262 /* get the current P-state */
#define PAL_SET_PSTATE 263 /* set the P-state */
#define PAL_BRAND_INFO 274 /* Processor branding information */
#ifndef __ASSEMBLY__
......@@ -963,7 +964,8 @@ static inline s64
ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
{
struct ia64_pal_retval iprv;
PAL_CALL(iprv, PAL_CACHE_READ, line_id.pclid_data, physical_addr, 0);
PAL_CALL_PHYS_STK(iprv, PAL_CACHE_READ, line_id.pclid_data,
physical_addr, 0);
return iprv.status;
}
......@@ -985,7 +987,8 @@ static inline s64
ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data)
{
struct ia64_pal_retval iprv;
PAL_CALL(iprv, PAL_CACHE_WRITE, line_id.pclid_data, physical_addr, data);
PAL_CALL_PHYS_STK(iprv, PAL_CACHE_WRITE, line_id.pclid_data,
physical_addr, data);
return iprv.status;
}
......@@ -1133,6 +1136,15 @@ ia64_pal_set_pstate (u64 pstate_index)
return iprv.status;
}
/* Processor branding information*/
static inline s64
ia64_pal_get_brand_info (char *brand_info)
{
struct ia64_pal_retval iprv;
PAL_CALL_STK(iprv, PAL_BRAND_INFO, 0, (u64)brand_info, 0);
return iprv.status;
}
/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
* suspended, but cache and TLB coherency is maintained.
*/
......
......@@ -20,12 +20,6 @@
#include <asm/ustack.h>
#define IA64_NUM_DBG_REGS 8
/*
* Limits for PMC and PMD are set to less than maximum architected values
* but should be sufficient for a while
*/
#define IA64_NUM_PMC_REGS 64
#define IA64_NUM_PMD_REGS 64
#define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
#define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
......@@ -163,6 +157,7 @@ struct cpuinfo_ia64 {
__u8 family;
__u8 archrev;
char vendor[16];
char *model_name;
#ifdef CONFIG_NUMA
struct ia64_node_data *node_data;
......@@ -262,13 +257,9 @@ struct thread_struct {
# define INIT_THREAD_IA32
#endif /* CONFIG_IA32_SUPPORT */
#ifdef CONFIG_PERFMON
__u64 pmcs[IA64_NUM_PMC_REGS];
__u64 pmds[IA64_NUM_PMD_REGS];
void *pfm_context; /* pointer to detailed PMU context */
unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
# define INIT_THREAD_PM .pmcs = {0UL, }, \
.pmds = {0UL, }, \
.pfm_context = NULL, \
# define INIT_THREAD_PM .pfm_context = NULL, \
.pfm_needs_checking = 0UL,
#else
# define INIT_THREAD_PM
......
......@@ -126,6 +126,7 @@ extern void smp_send_reschedule (int cpu);
extern void lock_ipi_calllock(void);
extern void unlock_ipi_calllock(void);
extern void identify_siblings (struct cpuinfo_ia64 *);
extern int is_multithreading_enabled(void);
#else
......
......@@ -286,7 +286,8 @@
/* 1294, 1295 reserved for pselect/ppoll */
#define __NR_unshare 1296
#define __NR_splice 1297
/* 1298, 1299 reserved for set_robust_list/get_robust_list */
#define __NR_set_robust_list 1298
#define __NR_get_robust_list 1299
#define __NR_sync_file_range 1300
#define __NR_tee 1301
#define __NR_vmsplice 1302
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment