Commit a8e498b7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc/qe_ic: Fix another breakage from the irq_data conversion
  powerpc/8xx: Fix another breakage from the irq_data conversion
  powerpc/cell: Use handle_edge_eoi_irq for real
  powerpc/pseries: Enable Chelsio network and iWARP drivers
  powerpc/mm: Move the STAB0 location to 0x8000 to make room in low memory
  powerpc: Fix accounting of softirq time when idle
  powerpc/pseries/smp: query-cpu-stopped-state support won't change
  powerpc/xics: Use hwirq for xics domain irq number
  powerpc/xics: Fix numberspace mismatch from irq_desc conversion
  powerpc: Wire up new syscalls
  powerpc/booke: Correct the SPRN_MAS5 definition.
  powerpc: ARCH_PFN_OFFSET should be unsigned long
  powerpc: Implement dma_mmap_coherent()
  powerpc/nvram: Don't overwrite oops/panic report on normal shutdown
  powerpc: Restore some misc devices to our configs
parents a8a44921 d4db35e8
......@@ -47,6 +47,7 @@ CONFIG_MTD_NAND_NDFC=y
CONFIG_MTD_UBI=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
......
......@@ -43,6 +43,7 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_SCSI_TGT=y
CONFIG_BLK_DEV_SD=y
......
......@@ -85,6 +85,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=y
......
......@@ -85,6 +85,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=y
......
......@@ -138,6 +138,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
......
......@@ -63,6 +63,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
......
......@@ -32,6 +32,7 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
......
......@@ -78,6 +78,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
......
......@@ -61,6 +61,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_BLK_DEV_XIP=y
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
......
......@@ -52,6 +52,7 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI_TGT=y
CONFIG_BLK_DEV_SD=y
......
......@@ -82,6 +82,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
......
......@@ -84,6 +84,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
......
......@@ -66,6 +66,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
......
......@@ -59,6 +59,7 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
......
......@@ -398,6 +398,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_CDROM_PKTCDVD=m
CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_HD=y
CONFIG_MISC_DEVICES=y
CONFIG_ENCLOSURE_SERVICES=m
CONFIG_SENSORS_TSL2550=m
CONFIG_EEPROM_AT24=m
......
......@@ -189,6 +189,7 @@ CONFIG_TIGON3=y
CONFIG_BNX2=m
CONFIG_CHELSIO_T1=m
CONFIG_CHELSIO_T3=m
CONFIG_CHELSIO_T4=m
CONFIG_EHEA=y
CONFIG_IXGBE=m
CONFIG_IXGB=m
......@@ -255,6 +256,8 @@ CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
CONFIG_INFINIBAND_EHCA=m
CONFIG_INFINIBAND_CXGB3=m
CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_INFINIBAND_IPOIB=m
CONFIG_INFINIBAND_IPOIB_CM=y
......
......@@ -42,6 +42,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
......@@ -198,6 +199,11 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t);
#define ARCH_HAS_DMA_MMAP_COHERENT
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
......
......@@ -27,7 +27,7 @@
#define STE_VSID_SHIFT 12
/* Location of cpu0's segment table */
#define STAB0_PAGE 0x6
#define STAB0_PAGE 0x8
#define STAB0_OFFSET (STAB0_PAGE << 12)
#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
......
......@@ -100,7 +100,7 @@ extern phys_addr_t kernstart_addr;
#endif
#ifdef CONFIG_FLATMEM
#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
#define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
#endif
......
......@@ -81,7 +81,7 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
static inline void qe_ic_cascade_low_ipic(unsigned int irq,
struct irq_desc *desc)
{
struct qe_ic *qe_ic = get_irq_desc_data(desc);
struct qe_ic *qe_ic = irq_desc_get_chip_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
if (cascade_irq != NO_IRQ)
......@@ -91,7 +91,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq,
static inline void qe_ic_cascade_high_ipic(unsigned int irq,
struct irq_desc *desc)
{
struct qe_ic *qe_ic = get_irq_desc_data(desc);
struct qe_ic *qe_ic = irq_desc_get_chip_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq != NO_IRQ)
......@@ -101,9 +101,9 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq,
static inline void qe_ic_cascade_low_mpic(unsigned int irq,
struct irq_desc *desc)
{
struct qe_ic *qe_ic = get_irq_desc_data(desc);
struct qe_ic *qe_ic = irq_desc_get_chip_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
struct irq_chip *chip = get_irq_desc_chip(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
......@@ -114,9 +114,9 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq,
static inline void qe_ic_cascade_high_mpic(unsigned int irq,
struct irq_desc *desc)
{
struct qe_ic *qe_ic = get_irq_desc_data(desc);
struct qe_ic *qe_ic = irq_desc_get_chip_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
struct irq_chip *chip = get_irq_desc_chip(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
......@@ -127,9 +127,9 @@ static inline void qe_ic_cascade_high_mpic(unsigned int irq,
static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
struct irq_desc *desc)
{
struct qe_ic *qe_ic = get_irq_desc_data(desc);
struct qe_ic *qe_ic = irq_desc_get_chip_data(desc);
unsigned int cascade_irq;
struct irq_chip *chip = get_irq_desc_chip(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq == NO_IRQ)
......
......@@ -110,7 +110,7 @@
#define SPRN_MAS2 0x272 /* MMU Assist Register 2 */
#define SPRN_MAS3 0x273 /* MMU Assist Register 3 */
#define SPRN_MAS4 0x274 /* MMU Assist Register 4 */
#define SPRN_MAS5 0x275 /* MMU Assist Register 5 */
#define SPRN_MAS5 0x153 /* MMU Assist Register 5 */
#define SPRN_MAS6 0x276 /* MMU Assist Register 6 */
#define SPRN_PID1 0x279 /* Process ID Register 1 */
#define SPRN_PID2 0x27A /* Process ID Register 2 */
......
......@@ -348,3 +348,7 @@ COMPAT_SYS_SPU(sendmsg)
COMPAT_SYS_SPU(recvmsg)
COMPAT_SYS_SPU(recvmmsg)
SYSCALL_SPU(accept4)
SYSCALL_SPU(name_to_handle_at)
COMPAT_SYS_SPU(open_by_handle_at)
COMPAT_SYS_SPU(clock_adjtime)
SYSCALL_SPU(syncfs)
......@@ -367,10 +367,14 @@
#define __NR_recvmsg 342
#define __NR_recvmmsg 343
#define __NR_accept4 344
#define __NR_name_to_handle_at 345
#define __NR_open_by_handle_at 346
#define __NR_clock_adjtime 347
#define __NR_syncfs 348
#ifdef __KERNEL__
#define __NR_syscalls 345
#define __NR_syscalls 349
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
......
......@@ -179,3 +179,21 @@ static int __init dma_init(void)
return 0;
}
fs_initcall(dma_init);
int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size)
{
unsigned long pfn;
#ifdef CONFIG_NOT_COHERENT_CACHE
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
#else
pfn = page_to_pfn(virt_to_page(cpu_addr));
#endif
return remap_pfn_range(vma, vma->vm_start,
pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
EXPORT_SYMBOL_GPL(dma_mmap_coherent);
......@@ -977,20 +977,6 @@ _GLOBAL(do_stab_bolted)
rfid
b . /* prevent speculative execution */
/*
* Space for CPU0's segment table.
*
* On iSeries, the hypervisor must fill in at least one entry before
* we get control (with relocate on). The address is given to the hv
* as a page number (see xLparMap below), so this must be at a
* fixed address (the linker can't compute (u64)&initial_stab >>
* PAGE_SHIFT).
*/
. = STAB0_OFFSET /* 0x6000 */
.globl initial_stab
initial_stab:
.space 4096
#ifdef CONFIG_PPC_PSERIES
/*
* Data area reserved for FWNMI option.
......@@ -1027,3 +1013,17 @@ xLparMap:
#ifdef CONFIG_PPC_PSERIES
. = 0x8000
#endif /* CONFIG_PPC_PSERIES */
/*
* Space for CPU0's segment table.
*
* On iSeries, the hypervisor must fill in at least one entry before
* we get control (with relocate on). The address is given to the hv
* as a page number (see xLparMap above), so this must be at a
* fixed address (the linker can't compute (u64)&initial_stab >>
* PAGE_SHIFT).
*/
. = STAB0_OFFSET /* 0x8000 */
.globl initial_stab
initial_stab:
.space 4096
......@@ -356,7 +356,7 @@ void account_system_vtime(struct task_struct *tsk)
}
get_paca()->user_time_scaled += user_scaled;
if (in_irq() || idle_task(smp_processor_id()) != tsk) {
if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
account_system_time(tsk, 0, delta, sys_scaled);
if (stolen)
account_steal_time(stolen);
......
......@@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset,
#endif
}
EXPORT_SYMBOL(__dma_sync_page);
/*
* Return the PFN for a given cpu virtual address returned by
* __dma_alloc_coherent. This is used by dma_mmap_coherent()
*/
unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
{
/* This should always be populated, so we don't test every
* level. If that fails, we'll have a nice crash which
* will be as good as a BUG_ON()
*/
pgd_t *pgd = pgd_offset_k(cpu_addr);
pud_t *pud = pud_offset(pgd, cpu_addr);
pmd_t *pmd = pmd_offset(pud, cpu_addr);
pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
if (pte_none(*ptep) || !pte_present(*ptep))
return 0;
return pte_pfn(*ptep);
}
......@@ -480,8 +480,32 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
const char *new_msgs, unsigned long new_len)
{
static unsigned int oops_count = 0;
static bool panicking = false;
size_t text_len;
switch (reason) {
case KMSG_DUMP_RESTART:
case KMSG_DUMP_HALT:
case KMSG_DUMP_POWEROFF:
/* These are almost always orderly shutdowns. */
return;
case KMSG_DUMP_OOPS:
case KMSG_DUMP_KEXEC:
break;
case KMSG_DUMP_PANIC:
panicking = true;
break;
case KMSG_DUMP_EMERG:
if (panicking)
/* Panic report already captured. */
return;
break;
default:
pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
__FUNCTION__, (int) reason);
return;
}
if (clobbering_unread_rtas_event())
return;
......
......@@ -64,8 +64,8 @@ int smp_query_cpu_stopped(unsigned int pcpu)
int qcss_tok = rtas_token("query-cpu-stopped-state");
if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "Firmware doesn't support "
"query-cpu-stopped-state\n");
printk_once(KERN_INFO
"Firmware doesn't support query-cpu-stopped-state\n");
return QCSS_HARDWARE_ERROR;
}
......
......@@ -204,33 +204,33 @@ static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
static void xics_unmask_irq(struct irq_data *d)
{
unsigned int irq;
unsigned int hwirq;
int call_status;
int server;
pr_devel("xics: unmask virq %d\n", d->irq);
irq = (unsigned int)irq_map[d->irq].hwirq;
pr_devel(" -> map to hwirq 0x%x\n", irq);
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
hwirq = (unsigned int)irq_map[d->irq].hwirq;
pr_devel(" -> map to hwirq 0x%x\n", hwirq);
if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
return;
server = get_irq_server(d->irq, d->affinity, 0);
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server,
DEFAULT_PRIORITY);
if (call_status != 0) {
printk(KERN_ERR
"%s: ibm_set_xive irq %u server %x returned %d\n",
__func__, irq, server, call_status);
__func__, hwirq, server, call_status);
return;
}
/* Now unmask the interrupt (often a no-op) */
call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
__func__, irq, call_status);
__func__, hwirq, call_status);
return;
}
}
......@@ -250,46 +250,46 @@ static unsigned int xics_startup(struct irq_data *d)
return 0;
}
static void xics_mask_real_irq(struct irq_data *d)
static void xics_mask_real_irq(unsigned int hwirq)
{
int call_status;
if (d->irq == XICS_IPI)
if (hwirq == XICS_IPI)
return;
call_status = rtas_call(ibm_int_off, 1, 1, NULL, d->irq);
call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
__func__, d->irq, call_status);
__func__, hwirq, call_status);
return;
}
/* Have to set XIVE to 0xff to be able to remove a slot */
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, d->irq,
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq,
default_server, 0xff);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
__func__, d->irq, call_status);
__func__, hwirq, call_status);
return;
}
}
static void xics_mask_irq(struct irq_data *d)
{
unsigned int irq;
unsigned int hwirq;
pr_devel("xics: mask virq %d\n", d->irq);
irq = (unsigned int)irq_map[d->irq].hwirq;
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
hwirq = (unsigned int)irq_map[d->irq].hwirq;
if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
return;
xics_mask_real_irq(d);
xics_mask_real_irq(hwirq);
}
static void xics_mask_unknown_vec(unsigned int vec)
{
printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec);
xics_mask_real_irq(irq_get_irq_data(vec));
xics_mask_real_irq(vec);
}
static inline unsigned int xics_xirr_vector(unsigned int xirr)
......@@ -373,37 +373,37 @@ static unsigned char pop_cppr(void)
static void xics_eoi_direct(struct irq_data *d)
{
unsigned int irq = (unsigned int)irq_map[d->irq].hwirq;
unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq;
iosync();
direct_xirr_info_set((pop_cppr() << 24) | irq);
direct_xirr_info_set((pop_cppr() << 24) | hwirq);
}
static void xics_eoi_lpar(struct irq_data *d)
{
unsigned int irq = (unsigned int)irq_map[d->irq].hwirq;
unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq;
iosync();
lpar_xirr_info_set((pop_cppr() << 24) | irq);
lpar_xirr_info_set((pop_cppr() << 24) | hwirq);
}
static int
xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force)
{
unsigned int irq;
unsigned int hwirq;
int status;
int xics_status[2];
int irq_server;
irq = (unsigned int)irq_map[d->irq].hwirq;
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
hwirq = (unsigned int)irq_map[d->irq].hwirq;
if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
return -1;
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq);
if (status) {
printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
__func__, irq, status);
__func__, hwirq, status);
return -1;
}
......@@ -418,11 +418,11 @@ xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force)
}
status = rtas_call(ibm_set_xive, 3, 1, NULL,
irq, irq_server, xics_status[1]);
hwirq, irq_server, xics_status[1]);
if (status) {
printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
__func__, irq, status);
__func__, hwirq, status);
return -1;
}
......@@ -874,7 +874,7 @@ void xics_kexec_teardown_cpu(int secondary)
void xics_migrate_irqs_away(void)
{
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
unsigned int irq, virq;
int virq;
/* If we used to be the default server, move to the new "boot_cpuid" */
if (hw_cpu == default_server)
......@@ -892,6 +892,7 @@ void xics_migrate_irqs_away(void)
for_each_irq(virq) {
struct irq_desc *desc;
struct irq_chip *chip;
unsigned int hwirq;
int xics_status[2];
int status;
unsigned long flags;
......@@ -901,9 +902,9 @@ void xics_migrate_irqs_away(void)
continue;
if (irq_map[virq].host != xics_host)
continue;
irq = (unsigned int)irq_map[virq].hwirq;
hwirq = (unsigned int)irq_map[virq].hwirq;
/* We need to get IPIs still. */
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
continue;
desc = irq_to_desc(virq);
......@@ -918,10 +919,10 @@ void xics_migrate_irqs_away(void)
raw_spin_lock_irqsave(&desc->lock, flags);
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq);
if (status) {
printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
__func__, irq, status);
__func__, hwirq, status);
goto unlock;
}
......
......@@ -80,7 +80,7 @@ static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
if ((hw & 1) == 0) {
siel |= (0x80000000 >> hw);
out_be32(&siu_reg->sc_siel, siel);
__irq_set_handler_locked(irq, handle_edge_irq);
__irq_set_handler_locked(d->irq, handle_edge_irq);
}
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment