Commit e498b943 authored by Tony Luck's avatar Tony Luck

Merge intel.com:/data/home/aegl/BK/Linus

into intel.com:/data/home/aegl/BK/linux-ia64-release-2.6.10
parents b3436fb3 1b59e286
......@@ -521,7 +521,12 @@ get_target_cpu (unsigned int gsi, int vector)
goto skip_numa_setup;
cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
for_each_cpu_mask(numa_cpu, cpu_mask) {
if (!cpu_online(numa_cpu))
cpu_clear(numa_cpu, cpu_mask);
}
num_cpus = cpus_weight(cpu_mask);
if (!num_cpus)
......
......@@ -215,11 +215,6 @@ int show_interrupts(struct seq_file *p, void *v)
skip:
spin_unlock_irqrestore(&idesc->lock, flags);
} else if (i == NR_IRQS) {
seq_puts(p, "NMI: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
#ifdef CONFIG_X86_LOCAL_APIC
seq_puts(p, "LOC: ");
for (j = 0; j < NR_CPUS; j++)
......
......@@ -3057,7 +3057,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
#endif
}
DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%lx all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
cnum,
value,
is_loaded,
......@@ -5057,6 +5057,18 @@ pfm_handle_work(void)
UNPROTECT_CTX(ctx, flags);
/*
* pfm_handle_work() is currently called with interrupts disabled.
* The down_interruptible call may sleep, therefore we
* must re-enable interrupts to avoid deadlocks. It is
* safe to do so because this function is called ONLY
* when returning to user level (PUStk=1), in which case
* there is no risk of kernel stack overflow due to deep
* interrupt nesting.
*/
BUG_ON(flags & IA64_PSR_I);
local_irq_enable();
DPRINT(("before block sleeping\n"));
/*
......@@ -5067,6 +5079,12 @@ pfm_handle_work(void)
DPRINT(("after block sleeping ret=%d\n", ret));
/*
* disable interrupts to restore state we had upon entering
* this function
*/
local_irq_disable();
PROTECT_CTX(ctx, flags);
/*
......
......@@ -20,14 +20,7 @@
#define __ARCH_IRQ_STAT 1
#define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending)
#define syscall_count(cpu) /* unused on IA-64 */
#define ksoftirqd_task(cpu) (cpu_data(cpu)->ksoftirqd)
#define nmi_count(cpu) 0
#define local_softirq_pending() (local_cpu_data->softirq_pending)
#define local_syscall_count() /* unused on IA-64 */
#define local_ksoftirqd_task() (local_cpu_data->ksoftirqd)
#define local_nmi_count() 0
#define HARDIRQ_BITS 14
......
......@@ -1531,7 +1531,7 @@ static inline s64
ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t *tr_valid)
{
struct ia64_pal_retval iprv;
PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, tr_type,(u64)__pa(tr_buffer));
PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, tr_type,(u64)ia64_tpa(tr_buffer));
if (tr_valid)
tr_valid->piv_val = iprv.v0;
return iprv.status;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment