Commit b22793f7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] Cannot use register_percpu_irq() from ia64_mca_init()
  [IA64] Initialize interrupts later (from init_IRQ())
  [IA64] enable ARCH_DMA_ADDR_T_64BIT
  [IA64] ioc3_serial: release resources in error return path
  [IA64] Stop using the deprecated __do_IRQ() code path
  [IA64] Remove unnecessary casts of private_data in perfmon.c
  [IA64] Fix missing iounmap in error path in cyclone.c
  [IA64] salinfo: sema_init instead of init_MUTEX
  [IA64] xen: use ARRAY_SIZE macro in xen_pv_ops.c
  [IA64] Use static const char * const in palinfo.c
  [IA64] remove asm/compat.h
  [IA64] Add CONFIG_STACKTRACE_SUPPORT
  [IA64] Move local_softirq_pending() definition
  [IA64] iommu: Add a dummy iommu_table.h file in IA64.
  [IA64] unwind - optimise linked-list searches for modules
  [IA64] unwind: remove preprocesser noise, and correct comment
parents b6537889 c0f37d2a
...@@ -53,6 +53,9 @@ config MMU ...@@ -53,6 +53,9 @@ config MMU
bool bool
default y default y
config ARCH_DMA_ADDR_T_64BIT
def_bool y
config NEED_DMA_MAP_STATE config NEED_DMA_MAP_STATE
def_bool y def_bool y
...@@ -62,6 +65,9 @@ config NEED_SG_DMA_LENGTH ...@@ -62,6 +65,9 @@ config NEED_SG_DMA_LENGTH
config SWIOTLB config SWIOTLB
bool bool
config STACKTRACE_SUPPORT
def_bool y
config GENERIC_LOCKBREAK config GENERIC_LOCKBREAK
def_bool n def_bool n
...@@ -683,8 +689,10 @@ source "lib/Kconfig" ...@@ -683,8 +689,10 @@ source "lib/Kconfig"
# Use the generic interrupt handling code in kernel/irq/: # Use the generic interrupt handling code in kernel/irq/:
# #
config GENERIC_HARDIRQS config GENERIC_HARDIRQS
bool def_bool y
default y
config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y
config GENERIC_IRQ_PROBE config GENERIC_IRQ_PROBE
bool bool
......
#ifndef _ASM_IA64_COMPAT_H
#define _ASM_IA64_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "i686\0\0\0"
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_key_t;
typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_timer_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef s64 __attribute__((aligned(4))) compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 __attribute__((aligned(4))) compat_u64;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_size;
u32 st_blksize;
u32 st_blocks;
u32 st_atime;
u32 st_atime_nsec;
u32 st_mtime;
u32 st_mtime_nsec;
u32 st_ctime;
u32 st_ctime_nsec;
u32 __unused4;
u32 __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
/*
* IA32 uses 4 byte alignment for 64 bit quantities,
* so we need to pack this structure.
*/
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
} __attribute__((packed));
struct compat_statfs {
int f_type;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
compat_fsid_t f_fsid;
int f_namelen; /* SunOS ignores this field. */
int f_frsize;
int f_spare[5];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
__compat_gid32_t gid;
__compat_uid32_t cuid;
__compat_gid32_t cgid;
unsigned short mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
compat_ulong_t unused1;
compat_ulong_t unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_time_t sem_otime;
compat_ulong_t __unused1;
compat_time_t sem_ctime;
compat_ulong_t __unused2;
compat_ulong_t sem_nsems;
compat_ulong_t __unused3;
compat_ulong_t __unused4;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
compat_time_t msg_stime;
compat_ulong_t __unused1;
compat_time_t msg_rtime;
compat_ulong_t __unused2;
compat_time_t msg_ctime;
compat_ulong_t __unused3;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
compat_ulong_t __unused1;
compat_time_t shm_dtime;
compat_ulong_t __unused2;
compat_time_t shm_ctime;
compat_ulong_t __unused3;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
/*
* A pointer passed in from user mode. This should not be used for syscall parameters,
* just declare them as pointers because the syscall entry code will have appropriately
* converted them already.
*/
typedef u32 compat_uptr_t;
static inline void __user *
compat_ptr (compat_uptr_t uptr)
{
return (void __user *) (unsigned long) uptr;
}
static inline compat_uptr_t
ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
static __inline__ void __user *
arch_compat_alloc_user_space (long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
}
#endif /* _ASM_IA64_COMPAT_H */
...@@ -34,6 +34,7 @@ obj-$(CONFIG_AUDIT) += audit.o ...@@ -34,6 +34,7 @@ obj-$(CONFIG_AUDIT) += audit.o
obj-$(CONFIG_PCI_MSI) += msi_ia64.o obj-$(CONFIG_PCI_MSI) += msi_ia64.o
mca_recovery-y += mca_drv.o mca_drv_asm.o mca_recovery-y += mca_drv.o mca_drv_asm.o
obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \
paravirt_patch.o paravirt_patch.o
......
...@@ -59,13 +59,13 @@ int __init init_cyclone_clock(void) ...@@ -59,13 +59,13 @@ int __init init_cyclone_clock(void)
return -ENODEV; return -ENODEV;
} }
base = readq(reg); base = readq(reg);
iounmap(reg);
if(!base){ if(!base){
printk(KERN_ERR "Summit chipset: Could not find valid CBAR" printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
" value.\n"); " value.\n");
use_cyclone = 0; use_cyclone = 0;
return -ENODEV; return -ENODEV;
} }
iounmap(reg);
/* setup PMCC */ /* setup PMCC */
offset = (base + CYCLONE_PMCC_OFFSET); offset = (base + CYCLONE_PMCC_OFFSET);
......
...@@ -108,10 +108,6 @@ ...@@ -108,10 +108,6 @@
#define DBG(fmt...) #define DBG(fmt...)
#endif #endif
#define NR_PREALLOCATE_RTE_ENTRIES \
(PAGE_SIZE / sizeof(struct iosapic_rte_info))
#define RTE_PREALLOCATED (1)
static DEFINE_SPINLOCK(iosapic_lock); static DEFINE_SPINLOCK(iosapic_lock);
/* /*
...@@ -136,7 +132,6 @@ struct iosapic_rte_info { ...@@ -136,7 +132,6 @@ struct iosapic_rte_info {
struct list_head rte_list; /* RTEs sharing the same vector */ struct list_head rte_list; /* RTEs sharing the same vector */
char rte_index; /* IOSAPIC RTE index */ char rte_index; /* IOSAPIC RTE index */
int refcnt; /* reference counter */ int refcnt; /* reference counter */
unsigned int flags; /* flags */
struct iosapic *iosapic; struct iosapic *iosapic;
} ____cacheline_aligned; } ____cacheline_aligned;
...@@ -155,9 +150,6 @@ static struct iosapic_intr_info { ...@@ -155,9 +150,6 @@ static struct iosapic_intr_info {
static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */ static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
static int iosapic_kmalloc_ok;
static LIST_HEAD(free_rte_list);
static inline void static inline void
iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val) iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
{ {
...@@ -394,7 +386,7 @@ iosapic_startup_level_irq (unsigned int irq) ...@@ -394,7 +386,7 @@ iosapic_startup_level_irq (unsigned int irq)
} }
static void static void
iosapic_end_level_irq (unsigned int irq) iosapic_unmask_level_irq (unsigned int irq)
{ {
ia64_vector vec = irq_to_vector(irq); ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
...@@ -404,7 +396,8 @@ iosapic_end_level_irq (unsigned int irq) ...@@ -404,7 +396,8 @@ iosapic_end_level_irq (unsigned int irq)
if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
do_unmask_irq = 1; do_unmask_irq = 1;
mask_irq(irq); mask_irq(irq);
} } else
unmask_irq(irq);
list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
iosapic_eoi(rte->iosapic->addr, vec); iosapic_eoi(rte->iosapic->addr, vec);
...@@ -427,9 +420,8 @@ static struct irq_chip irq_type_iosapic_level = { ...@@ -427,9 +420,8 @@ static struct irq_chip irq_type_iosapic_level = {
.enable = iosapic_enable_level_irq, .enable = iosapic_enable_level_irq,
.disable = iosapic_disable_level_irq, .disable = iosapic_disable_level_irq,
.ack = iosapic_ack_level_irq, .ack = iosapic_ack_level_irq,
.end = iosapic_end_level_irq,
.mask = mask_irq, .mask = mask_irq,
.unmask = unmask_irq, .unmask = iosapic_unmask_level_irq,
.set_affinity = iosapic_set_affinity .set_affinity = iosapic_set_affinity
}; };
...@@ -552,37 +544,6 @@ iosapic_reassign_vector (int irq) ...@@ -552,37 +544,6 @@ iosapic_reassign_vector (int irq)
} }
} }
static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
{
int i;
struct iosapic_rte_info *rte;
int preallocated = 0;
if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
NR_PREALLOCATE_RTE_ENTRIES);
for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
list_add(&rte->rte_list, &free_rte_list);
}
if (!list_empty(&free_rte_list)) {
rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
rte_list);
list_del(&rte->rte_list);
preallocated++;
} else {
rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
if (!rte)
return NULL;
}
memset(rte, 0, sizeof(struct iosapic_rte_info));
if (preallocated)
rte->flags |= RTE_PREALLOCATED;
return rte;
}
static inline int irq_is_shared (int irq) static inline int irq_is_shared (int irq)
{ {
return (iosapic_intr_info[irq].count > 1); return (iosapic_intr_info[irq].count > 1);
...@@ -615,7 +576,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, ...@@ -615,7 +576,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
rte = find_rte(irq, gsi); rte = find_rte(irq, gsi);
if (!rte) { if (!rte) {
rte = iosapic_alloc_rte(); rte = kzalloc(sizeof (*rte), GFP_ATOMIC);
if (!rte) { if (!rte) {
printk(KERN_WARNING "%s: cannot allocate memory\n", printk(KERN_WARNING "%s: cannot allocate memory\n",
__func__); __func__);
...@@ -658,6 +619,10 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, ...@@ -658,6 +619,10 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
idesc->chip->name, irq_type->name); idesc->chip->name, irq_type->name);
idesc->chip = irq_type; idesc->chip = irq_type;
} }
if (trigger == IOSAPIC_EDGE)
__set_irq_handler_unlocked(irq, handle_edge_irq);
else
__set_irq_handler_unlocked(irq, handle_level_irq);
return 0; return 0;
} }
...@@ -1161,10 +1126,3 @@ map_iosapic_to_node(unsigned int gsi_base, int node) ...@@ -1161,10 +1126,3 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
return; return;
} }
#endif #endif
static int __init iosapic_enable_kmalloc (void)
{
iosapic_kmalloc_ok = 1;
return 0;
}
core_initcall (iosapic_enable_kmalloc);
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/acpi.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
...@@ -635,6 +636,7 @@ ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) ...@@ -635,6 +636,7 @@ ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
desc->chip = &irq_type_ia64_lsapic; desc->chip = &irq_type_ia64_lsapic;
if (action) if (action)
setup_irq(irq, action); setup_irq(irq, action);
set_irq_handler(irq, handle_percpu_irq);
} }
void __init void __init
...@@ -650,6 +652,9 @@ ia64_native_register_ipi(void) ...@@ -650,6 +652,9 @@ ia64_native_register_ipi(void)
void __init void __init
init_IRQ (void) init_IRQ (void)
{ {
#ifdef CONFIG_ACPI
acpi_boot_init();
#endif
ia64_register_ipi(); ia64_register_ipi();
register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -2055,25 +2055,6 @@ ia64_mca_init(void) ...@@ -2055,25 +2055,6 @@ ia64_mca_init(void)
IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
/*
* Configure the CMCI/P vector and handler. Interrupts for CMC are
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
*/
register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
/* Setup the MCA rendezvous interrupt vector */
register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
/* Setup the MCA wakeup interrupt vector */
register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
#ifdef CONFIG_ACPI
/* Setup the CPEI/P handler */
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif
/* Initialize the areas set aside by the OS to buffer the /* Initialize the areas set aside by the OS to buffer the
* platform/processor error states for MCA/INIT/CMC * platform/processor error states for MCA/INIT/CMC
* handling. * handling.
...@@ -2103,6 +2084,25 @@ ia64_mca_late_init(void) ...@@ -2103,6 +2084,25 @@ ia64_mca_late_init(void)
if (!mca_init) if (!mca_init)
return 0; return 0;
/*
* Configure the CMCI/P vector and handler. Interrupts for CMC are
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
*/
register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
/* Setup the MCA rendezvous interrupt vector */
register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
/* Setup the MCA wakeup interrupt vector */
register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
#ifdef CONFIG_ACPI
/* Setup the CPEI/P handler */
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif
register_hotcpu_notifier(&mca_cpu_notifier); register_hotcpu_notifier(&mca_cpu_notifier);
/* Setup the CMCI/P vector and handler */ /* Setup the CMCI/P vector and handler */
......
...@@ -434,7 +434,7 @@ register_info(char *page) ...@@ -434,7 +434,7 @@ register_info(char *page)
unsigned long phys_stacked; unsigned long phys_stacked;
pal_hints_u_t hints; pal_hints_u_t hints;
unsigned long iregs, dregs; unsigned long iregs, dregs;
char *info_type[]={ static const char * const info_type[] = {
"Implemented AR(s)", "Implemented AR(s)",
"AR(s) with read side-effects", "AR(s) with read side-effects",
"Implemented CR(s)", "Implemented CR(s)",
......
...@@ -1573,7 +1573,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) ...@@ -1573,7 +1573,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
return -EINVAL; return -EINVAL;
} }
ctx = (pfm_context_t *)filp->private_data; ctx = filp->private_data;
if (ctx == NULL) { if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
return -EINVAL; return -EINVAL;
...@@ -1673,7 +1673,7 @@ pfm_poll(struct file *filp, poll_table * wait) ...@@ -1673,7 +1673,7 @@ pfm_poll(struct file *filp, poll_table * wait)
return 0; return 0;
} }
ctx = (pfm_context_t *)filp->private_data; ctx = filp->private_data;
if (ctx == NULL) { if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
return 0; return 0;
...@@ -1733,7 +1733,7 @@ pfm_fasync(int fd, struct file *filp, int on) ...@@ -1733,7 +1733,7 @@ pfm_fasync(int fd, struct file *filp, int on)
return -EBADF; return -EBADF;
} }
ctx = (pfm_context_t *)filp->private_data; ctx = filp->private_data;
if (ctx == NULL) { if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
return -EBADF; return -EBADF;
...@@ -1841,7 +1841,7 @@ pfm_flush(struct file *filp, fl_owner_t id) ...@@ -1841,7 +1841,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
return -EBADF; return -EBADF;
} }
ctx = (pfm_context_t *)filp->private_data; ctx = filp->private_data;
if (ctx == NULL) { if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
return -EBADF; return -EBADF;
...@@ -1984,7 +1984,7 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1984,7 +1984,7 @@ pfm_close(struct inode *inode, struct file *filp)
return -EBADF; return -EBADF;
} }
ctx = (pfm_context_t *)filp->private_data; ctx = filp->private_data;
if (ctx == NULL) { if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
return -EBADF; return -EBADF;
...@@ -4907,7 +4907,7 @@ sys_perfmonctl (int fd, int cmd, void __user *arg, int count) ...@@ -4907,7 +4907,7 @@ sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
goto error_args; goto error_args;
} }
ctx = (pfm_context_t *)file->private_data; ctx = file->private_data;
if (unlikely(ctx == NULL)) { if (unlikely(ctx == NULL)) {
DPRINT(("no context for fd %d\n", fd)); DPRINT(("no context for fd %d\n", fd));
goto error_args; goto error_args;
......
...@@ -642,7 +642,7 @@ salinfo_init(void) ...@@ -642,7 +642,7 @@ salinfo_init(void)
for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
data = salinfo_data + i; data = salinfo_data + i;
data->type = i; data->type = i;
init_MUTEX(&data->mutex); sema_init(&data->mutex, 1);
dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); dir = proc_mkdir(salinfo_log_name[i], salinfo_dir);
if (!dir) if (!dir)
continue; continue;
......
...@@ -594,10 +594,6 @@ setup_arch (char **cmdline_p) ...@@ -594,10 +594,6 @@ setup_arch (char **cmdline_p)
cpu_init(); /* initialize the bootstrap CPU */ cpu_init(); /* initialize the bootstrap CPU */
mmu_context_init(); /* initialize context_id bitmap */ mmu_context_init(); /* initialize context_id bitmap */
#ifdef CONFIG_ACPI
acpi_boot_init();
#endif
paravirt_banner(); paravirt_banner();
paravirt_arch_setup_console(cmdline_p); paravirt_arch_setup_console(cmdline_p);
......
/*
* arch/ia64/kernel/stacktrace.c
*
* Stack trace management functions
*
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/module.h>
static void
ia64_do_save_stack(struct unw_frame_info *info, void *arg)
{
struct stack_trace *trace = arg;
unsigned long ip;
int skip = trace->skip;
trace->nr_entries = 0;
do {
unw_get_ip(info, &ip);
if (ip == 0)
break;
if (skip == 0) {
trace->entries[trace->nr_entries++] = ip;
if (trace->nr_entries == trace->max_entries)
break;
} else
skip--;
} while (unw_unwind(info) >= 0);
}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
unw_init_running(ia64_do_save_stack, trace);
}
EXPORT_SYMBOL(save_stack_trace);
...@@ -1204,10 +1204,10 @@ desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word ...@@ -1204,10 +1204,10 @@ desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word
static inline unw_hash_index_t static inline unw_hash_index_t
hash (unsigned long ip) hash (unsigned long ip)
{ {
# define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */ /* magic number = ((sqrt(5)-1)/2)*2^64 */
static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL;
return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE); return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE);
#undef hashmagic
} }
static inline long static inline long
...@@ -1531,7 +1531,7 @@ build_script (struct unw_frame_info *info) ...@@ -1531,7 +1531,7 @@ build_script (struct unw_frame_info *info)
struct unw_labeled_state *ls, *next; struct unw_labeled_state *ls, *next;
unsigned long ip = info->ip; unsigned long ip = info->ip;
struct unw_state_record sr; struct unw_state_record sr;
struct unw_table *table; struct unw_table *table, *prev;
struct unw_reg_info *r; struct unw_reg_info *r;
struct unw_insn insn; struct unw_insn insn;
u8 *dp, *desc_end; u8 *dp, *desc_end;
...@@ -1560,11 +1560,26 @@ build_script (struct unw_frame_info *info) ...@@ -1560,11 +1560,26 @@ build_script (struct unw_frame_info *info)
STAT(parse_start = ia64_get_itc()); STAT(parse_start = ia64_get_itc());
prev = NULL;
for (table = unw.tables; table; table = table->next) { for (table = unw.tables; table; table = table->next) {
if (ip >= table->start && ip < table->end) { if (ip >= table->start && ip < table->end) {
/*
* Leave the kernel unwind table at the very front,
* lest moving it breaks some assumption elsewhere.
* Otherwise, move the matching table to the second
* position in the list so that traversals can benefit
* from commonality in backtrace paths.
*/
if (prev && prev != unw.tables) {
/* unw is safe - we're already spinlocked */
prev->next = table->next;
table->next = unw.tables->next;
unw.tables->next = table;
}
e = lookup(table, ip - table->segment_base); e = lookup(table, ip - table->segment_base);
break; break;
} }
prev = table;
} }
if (!e) { if (!e) {
/* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
......
...@@ -1136,7 +1136,6 @@ __initconst = { ...@@ -1136,7 +1136,6 @@ __initconst = {
static void __init static void __init
xen_patch_branch(unsigned long tag, unsigned long type) xen_patch_branch(unsigned long tag, unsigned long type)
{ {
const unsigned long nelem = __paravirt_patch_apply_branch(tag, type, xen_branch_target,
sizeof(xen_branch_target) / sizeof(xen_branch_target[0]); ARRAY_SIZE(xen_branch_target));
__paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem);
} }
...@@ -2017,6 +2017,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) ...@@ -2017,6 +2017,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
struct ioc3_port *port; struct ioc3_port *port;
struct ioc3_port *ports[PORTS_PER_CARD]; struct ioc3_port *ports[PORTS_PER_CARD];
int phys_port; int phys_port;
int cnt;
DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd)); DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd));
...@@ -2147,6 +2148,9 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) ...@@ -2147,6 +2148,9 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
/* error exits that give back resources */ /* error exits that give back resources */
out4: out4:
for (cnt = 0; cnt < phys_port; cnt++)
kfree(ports[cnt]);
kfree(card_ptr); kfree(card_ptr);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment