Commit b04d0a90 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6: (33 commits)
  sparc32: Fix might-be-used-uninitialized warning in do_sparc_fault().
  sparc: Fix .size directive for do_int_load
  sparc64: Fix build errors with gcc-4.6.0
  sparc32,sun4m: percpu and global register definitions moved to irq.h
  sparc32: introduce build_device_irq
  sparc32: introduce sparc_irq_config
  sparc32: fix build with leon or floppy enabled
  sparc: convert to clocksource_register_hz/khz
  sparc64: Sharpen address space randomization calculations.
  sparc32: irq_32.c cleanup
  sparc32, sun4d: add comment in empty statement in sun4d_request_irq()
  sparc32,sun4d: drop unused code in sun4d_distribute_irqs()
  sparc32,sun4d: irq, smp files cleanup
  sparc32,sun4m: irq, smp files cleanup
  sparc32,sun4c: irq file cleanup
  sparc32: add irq + smp declarations to headers
  sparc32: remove tick14.c
  sparc32/leon: FPU-FSR only available when FPU present
  SPARC/LEON: power down instruction different of different LEONs
  sparc32: added U-Boot build target: uImage
  ...
parents 054cfaac c816be7b
......@@ -51,6 +51,7 @@ config SPARC64
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_GENERIC_HARDIRQS
select GENERIC_HARDIRQS_NO_DEPRECATED
config ARCH_DEFCONFIG
string
......@@ -460,6 +461,39 @@ config SPARC_LEON
from www.gaisler.com. You can download a sparc-linux cross-compilation
toolchain at www.gaisler.com.
if SPARC_LEON
menu "U-Boot options"
config UBOOT_LOAD_ADDR
hex "uImage Load Address"
default 0x40004000
---help---
U-Boot kernel load address, the address in physical address space
where u-boot will place the Linux kernel before booting it.
This address is normally the base address of main memory + 0x4000.
config UBOOT_FLASH_ADDR
hex "uImage.o Load Address"
default 0x00080000
---help---
Optional setting only affecting the uImage.o ELF-image used to
download the uImage file to the target using a ELF-loader other than
U-Boot. It may for example be used to download an uImage to FLASH with
the GRMON utility before even starting u-boot.
config UBOOT_ENTRY_ADDR
hex "uImage Entry Address"
default 0xf0004000
---help---
Do not change this unless you know what you're doing. This is
hardcoded by the SPARC32 and LEON port.
This is the virtual address u-boot jumps to when booting the Linux
Kernel.
endmenu
endif
endmenu
menu "Bus options (PCI etc.)"
......
......@@ -88,7 +88,7 @@ boot := arch/sparc/boot
# Default target
all: zImage
image zImage tftpboot.img vmlinux.aout: vmlinux
image zImage uImage tftpboot.img vmlinux.aout: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
archclean:
......@@ -102,6 +102,7 @@ ifeq ($(ARCH),sparc)
define archhelp
echo '* image - kernel image ($(boot)/image)'
echo '* zImage - stripped kernel image ($(boot)/zImage)'
echo ' uImage - U-Boot SPARC32 Image (only for LEON)'
echo ' tftpboot.img - image prepared for tftp'
endef
else
......
......@@ -5,6 +5,7 @@
ROOT_IMG := /usr/src/root.img
ELFTOAOUT := elftoaout
MKIMAGE := $(srctree)/scripts/mkuboot.sh
hostprogs-y := piggyback btfixupprep
targets := tftpboot.img btfix.o btfix.S image zImage vmlinux.aout
......@@ -77,6 +78,36 @@ $(obj)/zImage: $(obj)/image
$(obj)/vmlinux.aout: vmlinux FORCE
$(call if_changed,elftoaout)
@echo ' kernel: $@ is ready'
else
# The following lines make a readable image for U-Boot.
# uImage - Binary file read by U-boot
# uImage.o - object file of uImage for loading with a
# flash programmer understanding ELF.
OBJCOPYFLAGS_image.bin := -S -O binary -R .note -R .comment
$(obj)/image.bin: $(obj)/image FORCE
$(call if_changed,objcopy)
$(obj)/image.gz: $(obj)/image.bin
$(call if_changed,gzip)
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sparc -O linux -T kernel \
-C gzip -a $(CONFIG_UBOOT_LOAD_ADDR) \
-e $(CONFIG_UBOOT_ENTRY_ADDR) -n 'Linux-$(KERNELRELEASE)' \
-d $< $@
quiet_cmd_uimage.o = UIMAGE.O $@
cmd_uimage.o = $(LD) -Tdata $(CONFIG_UBOOT_FLASH_ADDR) \
-r -b binary $@ -o $@.o
targets += uImage
$(obj)/uImage: $(obj)/image.gz
$(call if_changed,uimage)
$(call if_changed,uimage.o)
@echo ' Image $@ is ready'
endif
$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE
......
......@@ -33,34 +33,34 @@
/* The largest number of unique interrupt sources we support.
* If this needs to ever be larger than 255, you need to change
* the type of ino_bucket->virt_irq as appropriate.
* the type of ino_bucket->irq as appropriate.
*
* ino_bucket->virt_irq allocation is made during {sun4v_,}build_irq().
* ino_bucket->irq allocation is made during {sun4v_,}build_irq().
*/
#define NR_IRQS 255
extern void irq_install_pre_handler(int virt_irq,
extern void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2);
#define irq_canonicalize(irq) (irq)
extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
extern unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
unsigned int msi_devino_start,
unsigned int msi_devino_end);
extern void sun4v_destroy_msi(unsigned int virt_irq);
extern unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p,
extern void sun4v_destroy_msi(unsigned int irq);
extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
unsigned int msi_devino_start,
unsigned int msi_devino_end,
unsigned long imap_base,
unsigned long iclr_base);
extern void sun4u_destroy_msi(unsigned int virt_irq);
extern void sun4u_destroy_msi(unsigned int irq);
extern unsigned char virt_irq_alloc(unsigned int dev_handle,
extern unsigned char irq_alloc(unsigned int dev_handle,
unsigned int dev_ino);
#ifdef CONFIG_PCI_MSI
extern void virt_irq_free(unsigned int virt_irq);
extern void irq_free(unsigned int irq);
#endif
extern void __init init_IRQ(void);
......
......@@ -375,9 +375,6 @@ void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
extern unsigned int real_irq_entry[], smpleon_ticker[];
extern unsigned int patchme_maybe_smp_msg[];
extern unsigned long trapbase_cpu1[];
extern unsigned long trapbase_cpu2[];
extern unsigned long trapbase_cpu3[];
extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
extern unsigned int linux_trap_ipi15_sun4m[];
......
......@@ -180,6 +180,7 @@ struct amba_ahb_device {
struct device_node;
void _amba_init(struct device_node *dp, struct device_node ***nextp);
extern unsigned long amba_system_id;
extern struct leon3_irqctrl_regs_map *leon3_irqctrl_regs;
extern struct leon3_gptimer_regs_map *leon3_gptimer_regs;
extern struct amba_apb_device leon_percpu_timer_dev[16];
......@@ -254,6 +255,11 @@ extern unsigned int sparc_leon_eirq;
#define GAISLER_L2C 0xffe /* internal device: leon2compat */
#define GAISLER_PLUGPLAY 0xfff /* internal device: plug & play configarea */
/* Chip IDs */
#define AEROFLEX_UT699 0x0699
#define LEON4_NEXTREME1 0x0102
#define GAISLER_GR712RC 0x0712
#define amba_vendor(x) (((x) >> 24) & 0xff)
#define amba_device(x) (((x) >> 12) & 0xfff)
......
......@@ -4,4 +4,7 @@
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
/* mm/srmmu.c */
extern ctxd_t *srmmu_ctx_table_phys;
#endif
......@@ -29,10 +29,16 @@
*/
extern unsigned char boot_cpu_id;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpumask_t smp_commenced_mask;
extern struct linux_prom_registers smp_penguin_ctable;
typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long);
void cpu_panic(void);
extern void smp4m_irq_rotate(int cpu);
/*
* General functions that each host system must provide.
*/
......
......@@ -42,7 +42,6 @@ obj-$(CONFIG_SPARC32) += windows.o
obj-y += cpu.o
obj-$(CONFIG_SPARC32) += devices.o
obj-$(CONFIG_SPARC32) += tadpole.o
obj-$(CONFIG_SPARC32) += tick14.o
obj-y += ptrace_$(BITS).o
obj-y += unaligned_$(BITS).o
obj-y += una_asm_$(BITS).o
......@@ -54,6 +53,7 @@ obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o
obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o
obj-$(CONFIG_SPARC64) += reboot.o
obj-$(CONFIG_SPARC64) += sysfs.o
......
......@@ -324,7 +324,7 @@ void __cpuinit cpu_probe(void)
psr = get_psr();
put_psr(psr | PSR_EF);
#ifdef CONFIG_SPARC_LEON
fpu_vers = 7;
fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
#else
fpu_vers = ((get_fsr() >> 17) & 0x7);
#endif
......
......@@ -213,8 +213,8 @@ extern struct cheetah_err_info *cheetah_error_log;
struct ino_bucket {
/*0x00*/unsigned long __irq_chain_pa;
/* Virtual interrupt number assigned to this INO. */
/*0x08*/unsigned int __virt_irq;
/* Interrupt number assigned to this INO. */
/*0x08*/unsigned int __irq;
/*0x0c*/unsigned int __pad;
};
......
......@@ -333,13 +333,10 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma)
{
struct iommu *iommu;
iopte_t *iopte;
unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
iopte = iommu->page_table +
((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
......
......@@ -50,10 +50,14 @@
#include <asm/io-unit.h>
#include <asm/leon.h>
#ifdef CONFIG_SPARC_LEON
#define mmu_inval_dma_area(p, l) leon_flush_dcache_all()
#else
#ifndef CONFIG_SPARC_LEON
#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
#else
static inline void mmu_inval_dma_area(void *va, unsigned long len)
{
if (!sparc_leon3_snooping_enabled())
leon_flush_dcache_all();
}
#endif
static struct resource *_sparc_find_resource(struct resource *r,
......@@ -254,7 +258,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *dma_addrp, gfp_t gfp)
{
struct platform_device *op = to_platform_device(dev);
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
unsigned long len_total = PAGE_ALIGN(len);
unsigned long va;
struct resource *res;
int order;
......@@ -280,7 +284,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
goto err_nova;
}
mmu_inval_dma_area(va, len_total);
mmu_inval_dma_area((void *)va, len_total);
// XXX The mmu_map_dma_area does this for us below, see comments.
// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
/*
......@@ -297,9 +302,9 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
err_noiommu:
release_resource(res);
err_nova:
free_pages(va, order);
err_nomem:
kfree(res);
err_nomem:
free_pages(va, order);
err_nopages:
return NULL;
}
......@@ -321,7 +326,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
return;
}
n = (n + PAGE_SIZE-1) & PAGE_MASK;
n = PAGE_ALIGN(n);
if ((res->end-res->start)+1 != n) {
printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
(long)((res->end-res->start)+1), n);
......@@ -408,9 +413,6 @@ struct dma_map_ops sbus_dma_ops = {
.sync_sg_for_device = sbus_sync_sg_for_device,
};
struct dma_map_ops *dma_ops = &sbus_dma_ops;
EXPORT_SYMBOL(dma_ops);
static int __init sparc_register_ioport(void)
{
register_proc_sparc_ioport();
......@@ -422,7 +424,9 @@ arch_initcall(sparc_register_ioport);
#endif /* CONFIG_SBUS */
#ifdef CONFIG_PCI
/* LEON reuses PCI DMA ops */
#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
......@@ -430,8 +434,8 @@ arch_initcall(sparc_register_ioport);
static void *pci32_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *pba, gfp_t gfp)
{
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
unsigned long va;
unsigned long len_total = PAGE_ALIGN(len);
void *va;
struct resource *res;
int order;
......@@ -443,34 +447,34 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
}
order = get_order(len_total);
va = __get_free_pages(GFP_KERNEL, order);
if (va == 0) {
va = (void *) __get_free_pages(GFP_KERNEL, order);
if (va == NULL) {
printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
return NULL;
goto err_nopages;
}
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
free_pages(va, order);
printk("pci_alloc_consistent: no core\n");
return NULL;
goto err_nomem;
}
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
free_pages(va, order);
kfree(res);
return NULL;
goto err_nova;
}
mmu_inval_dma_area(va, len_total);
#if 0
/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
(long)va, (long)res->start, (long)virt_to_phys(va), len_total);
#endif
sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
err_nova:
kfree(res);
err_nomem:
free_pages((unsigned long)va, order);
err_nopages:
return NULL;
}
/* Free and unmap a consistent DMA buffer.
......@@ -485,7 +489,7 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
dma_addr_t ba)
{
struct resource *res;
unsigned long pgp;
void *pgp;
if ((res = _sparc_find_resource(&_sparc_dvma,
(unsigned long)p)) == NULL) {
......@@ -498,21 +502,21 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
return;
}
n = (n + PAGE_SIZE-1) & PAGE_MASK;
n = PAGE_ALIGN(n);
if ((res->end-res->start)+1 != n) {
printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
(long)((res->end-res->start)+1), (long)n);
return;
}
pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
pgp = phys_to_virt(ba); /* bus_to_virt actually */
mmu_inval_dma_area(pgp, n);
sparc_unmapiorange((unsigned long)p, n);
release_resource(res);
kfree(res);
free_pages(pgp, get_order(n));
free_pages((unsigned long)pgp, get_order(n));
}
/*
......@@ -527,6 +531,13 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
return page_to_phys(page) + offset;
}
static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
if (dir != PCI_DMA_TODEVICE)
mmu_inval_dma_area(phys_to_virt(ba), PAGE_ALIGN(size));
}
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
......@@ -572,9 +583,8 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
(unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
mmu_inval_dma_area(page_address(sg_page(sg)),
PAGE_ALIGN(sg->length));
}
}
}
......@@ -593,8 +603,8 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
size_t size, enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
mmu_inval_dma_area(phys_to_virt(ba),
PAGE_ALIGN(size));
}
}
......@@ -602,8 +612,8 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
size_t size, enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
mmu_inval_dma_area(phys_to_virt(ba),
PAGE_ALIGN(size));
}
}
......@@ -622,9 +632,8 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
(unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
mmu_inval_dma_area(page_address(sg_page(sg)),
PAGE_ALIGN(sg->length));
}
}
}
......@@ -638,9 +647,8 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
(unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
mmu_inval_dma_area(page_address(sg_page(sg)),
PAGE_ALIGN(sg->length));
}
}
}
......@@ -649,6 +657,7 @@ struct dma_map_ops pci32_dma_ops = {
.alloc_coherent = pci32_alloc_coherent,
.free_coherent = pci32_free_coherent,
.map_page = pci32_map_page,
.unmap_page = pci32_unmap_page,
.map_sg = pci32_map_sg,
.unmap_sg = pci32_unmap_sg,
.sync_single_for_cpu = pci32_sync_single_for_cpu,
......@@ -658,7 +667,16 @@ struct dma_map_ops pci32_dma_ops = {
};
EXPORT_SYMBOL(pci32_dma_ops);
#endif /* CONFIG_PCI */
#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
#ifdef CONFIG_SPARC_LEON
struct dma_map_ops *dma_ops = &pci32_dma_ops;
#elif defined(CONFIG_SBUS)
struct dma_map_ops *dma_ops = &sbus_dma_ops;
#endif
EXPORT_SYMBOL(dma_ops);
/*
* Return whether the given PCI device DMA address mask can be
......
#include <linux/platform_device.h>
#include <asm/btfixup.h>
/* sun4m specific type definitions */
/* This maps direct to CPU specific interrupt registers */
struct sun4m_irq_percpu {
u32 pending;
u32 clear;
u32 set;
};
/* This maps direct to global interrupt registers */
struct sun4m_irq_global {
u32 pending;
u32 mask;
u32 mask_clear;
u32 mask_set;
u32 interrupt_target;
};
extern struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
extern struct sun4m_irq_global __iomem *sun4m_irq_global;
/*
* Platform specific irq configuration
* The individual platforms assign their platform
* specifics in their init functions.
*/
struct sparc_irq_config {
void (*init_timers)(irq_handler_t);
unsigned int (*build_device_irq)(struct platform_device *op,
unsigned int real_irq);
};
extern struct sparc_irq_config sparc_irq_config;
/* Dave Redman (djhr@tadpole.co.uk)
* changed these to function pointers.. it saves cycles and will allow
* the irq dependencies to be split into different files at a later date
......@@ -45,12 +81,6 @@ static inline void load_profile_irq(int cpu, int limit)
BTFIXUP_CALL(load_profile_irq)(cpu, limit);
}
extern void (*sparc_init_timers)(irq_handler_t lvl10_irq);
extern void claim_ticker14(irq_handler_t irq_handler,
int irq,
unsigned int timeout);
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, set_cpu_int, int, int)
BTFIXUPDEF_CALL(void, clear_cpu_int, int, int)
......
/*
* arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
* Interrupt request handling routines. On the
* Sparc the IRQs are basically 'cast in stone'
* and you are supposed to probe the prom's device
* node trees to find out who's got which IRQ.
......@@ -11,40 +11,11 @@
* Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/threads.h>
#include <linux/spinlock.h>
#include <linux/seq_file.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/psr.h>
#include <asm/smp.h>
#include <asm/vaddrs.h>
#include <asm/timer.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/pcic.h>
#include <asm/cacheflush.h>
#include <asm/irq_regs.h>
#include <asm/pcic.h>
#include <asm/leon.h>
#include "kernel.h"
......@@ -57,6 +28,10 @@
#define SMP_NOP2
#define SMP_NOP3
#endif /* SMP */
/* platform specific irq setup */
struct sparc_irq_config sparc_irq_config;
unsigned long arch_local_irq_save(void)
{
unsigned long retval;
......@@ -128,15 +103,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
*
*/
static void irq_panic(void)
{
extern char *cputypval;
prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
prom_halt();
}
void (*sparc_init_timers)(irq_handler_t ) =
(void (*)(irq_handler_t )) irq_panic;
/*
* Dave Redman (djhr@tadpole.co.uk)
......@@ -166,18 +133,16 @@ DEFINE_SPINLOCK(irq_action_lock);
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v;
struct irqaction * action;
int i = *(loff_t *)v;
struct irqaction *action;
unsigned long flags;
#ifdef CONFIG_SMP
int j;
#endif
if (sparc_cpu_model == sun4d) {
extern int show_sun4d_interrupts(struct seq_file *, void *);
if (sparc_cpu_model == sun4d)
return show_sun4d_interrupts(p, v);
}
spin_lock_irqsave(&irq_action_lock, flags);
if (i < NR_IRQS) {
action = sparc_irq[i].action;
......@@ -195,7 +160,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, " %c %s",
(action->flags & IRQF_DISABLED) ? '+' : ' ',
action->name);
for (action=action->next; action; action = action->next) {
for (action = action->next; action; action = action->next) {
seq_printf(p, ",%s %s",
(action->flags & IRQF_DISABLED) ? " +" : "",
action->name);
......@@ -209,20 +174,18 @@ int show_interrupts(struct seq_file *p, void *v)
void free_irq(unsigned int irq, void *dev_id)
{
struct irqaction * action;
struct irqaction *action;
struct irqaction **actionp;
unsigned long flags;
unsigned int cpu_irq;
if (sparc_cpu_model == sun4d) {
extern void sun4d_free_irq(unsigned int, void *);
sun4d_free_irq(irq, dev_id);
return;
}
cpu_irq = irq & (NR_IRQS - 1);
if (cpu_irq > 14) { /* 14 irq levels on the sparc */
printk("Trying to free bogus IRQ %d\n", irq);
printk(KERN_ERR "Trying to free bogus IRQ %d\n", irq);
return;
}
......@@ -232,7 +195,7 @@ void free_irq(unsigned int irq, void *dev_id)
action = *actionp;
if (!action->handler) {
printk("Trying to free free IRQ%d\n",irq);
printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
goto out_unlock;
}
if (dev_id) {
......@@ -242,19 +205,21 @@ void free_irq(unsigned int irq, void *dev_id)
actionp = &action->next;
}
if (!action) {
printk("Trying to free free shared IRQ%d\n",irq);
printk(KERN_ERR "Trying to free free shared IRQ%d\n",
irq);
goto out_unlock;
}
} else if (action->flags & IRQF_SHARED) {
printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n",
irq);
goto out_unlock;
}
if (action->flags & SA_STATIC_ALLOC)
{
/* This interrupt is marked as specially allocated
if (action->flags & SA_STATIC_ALLOC) {
/*
* This interrupt is marked as specially allocated
* so it is a bad idea to free it.
*/
printk("Attempt to free statically allocated IRQ%d (%s)\n",
printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name);
goto out_unlock;
}
......@@ -275,7 +240,6 @@ void free_irq(unsigned int irq, void *dev_id)
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
}
EXPORT_SYMBOL(free_irq);
/*
......@@ -297,64 +261,62 @@ void synchronize_irq(unsigned int irq)
EXPORT_SYMBOL(synchronize_irq);
#endif /* SMP */
void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
void unexpected_irq(int irq, void *dev_id, struct pt_regs *regs)
{
int i;
struct irqaction * action;
struct irqaction *action;
unsigned int cpu_irq;
cpu_irq = irq & (NR_IRQS - 1);
action = sparc_irq[cpu_irq].action;
printk("IO device interrupt, irq = %d\n", irq);
printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
printk(KERN_ERR "IO device interrupt, irq = %d\n", irq);
printk(KERN_ERR "PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
regs->npc, regs->u_regs[14]);
if (action) {
printk("Expecting: ");
printk(KERN_ERR "Expecting: ");
for (i = 0; i < 16; i++)
if (action->handler)
printk("[%s:%d:0x%x] ", action->name,
(int) i, (unsigned int) action->handler);
printk(KERN_CONT "[%s:%d:0x%x] ", action->name,
i, (unsigned int)action->handler);
}
printk("AIEEE\n");
printk(KERN_ERR "AIEEE\n");
panic("bogus interrupt received");
}
void handler_irq(int irq, struct pt_regs * regs)
void handler_irq(int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
struct irqaction * action;
struct irqaction *action;
int cpu = smp_processor_id();
#ifdef CONFIG_SMP
extern void smp4m_irq_rotate(int cpu);
#endif
old_regs = set_irq_regs(regs);
irq_enter();
disable_pil_irq(irq);
disable_pil_irq(pil);
#ifdef CONFIG_SMP
/* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */
if((sparc_cpu_model==sun4m) && (irq < 10))
if ((sparc_cpu_model==sun4m) && (pil < 10))
smp4m_irq_rotate(cpu);
#endif
action = sparc_irq[irq].action;
sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS;
kstat_cpu(cpu).irqs[irq]++;
action = sparc_irq[pil].action;
sparc_irq[pil].flags |= SPARC_IRQ_INPROGRESS;
kstat_cpu(cpu).irqs[pil]++;
do {
if (!action || !action->handler)
unexpected_irq(irq, NULL, regs);
action->handler(irq, action->dev_id);
unexpected_irq(pil, NULL, regs);
action->handler(pil, action->dev_id);
action = action->next;
} while (action);
sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS;
enable_pil_irq(irq);
sparc_irq[pil].flags &= ~SPARC_IRQ_INPROGRESS;
enable_pil_irq(pil);
irq_exit();
set_irq_regs(old_regs);
}
#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
/* Fast IRQs on the Sparc can only have one routine attached to them,
/*
* Fast IRQs on the Sparc can only have one routine attached to them,
* thus no sharing possible.
*/
static int request_fast_irq(unsigned int irq,
......@@ -367,15 +329,13 @@ static int request_fast_irq(unsigned int irq,
int ret;
#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
struct tt_entry *trap_table;
extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
#endif
cpu_irq = irq & (NR_IRQS - 1);
if(cpu_irq > 14) {
if (cpu_irq > 14) {
ret = -EINVAL;
goto out;
}
if(!handler) {
if (!handler) {
ret = -EINVAL;
goto out;
}
......@@ -383,33 +343,32 @@ static int request_fast_irq(unsigned int irq,
spin_lock_irqsave(&irq_action_lock, flags);
action = sparc_irq[cpu_irq].action;
if(action) {
if(action->flags & IRQF_SHARED)
if (action) {
if (action->flags & IRQF_SHARED)
panic("Trying to register fast irq when already shared.\n");
if(irqflags & IRQF_SHARED)
if (irqflags & IRQF_SHARED)
panic("Trying to register fast irq as shared.\n");
/* Anyway, someone already owns it so cannot be made fast. */
printk("request_fast_irq: Trying to register yet already owned.\n");
printk(KERN_ERR "request_fast_irq: Trying to register yet already owned.\n");
ret = -EBUSY;
goto out_unlock;
}
/* If this is flagged as statically allocated then we use our
/*
* If this is flagged as statically allocated then we use our
* private struct which is never freed.
*/
if (irqflags & SA_STATIC_ALLOC) {
if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
printk(KERN_ERR "Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
irq, devname);
}
if (action == NULL)
action = kmalloc(sizeof(struct irqaction),
GFP_ATOMIC);
action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
if (!action) {
ret = -ENOMEM;
goto out_unlock;
......@@ -426,9 +385,12 @@ static int request_fast_irq(unsigned int irq,
INSTANTIATE(sparc_ttable)
#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
trap_table = &trapbase_cpu1;
INSTANTIATE(trap_table)
trap_table = &trapbase_cpu2;
INSTANTIATE(trap_table)
trap_table = &trapbase_cpu3;
INSTANTIATE(trap_table)
#endif
#undef INSTANTIATE
/*
......@@ -454,7 +416,8 @@ static int request_fast_irq(unsigned int irq,
return ret;
}
/* These variables are used to access state from the assembler
/*
* These variables are used to access state from the assembler
* interrupt handler, floppy_hardint, so we cannot put these in
* the floppy driver image because that would not work in the
* modular case.
......@@ -477,8 +440,6 @@ EXPORT_SYMBOL(pdma_base);
unsigned long pdma_areasize;
EXPORT_SYMBOL(pdma_areasize);
extern void floppy_hardint(void);
static irq_handler_t floppy_irq_handler;
void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
......@@ -494,9 +455,11 @@ void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
irq_exit();
enable_pil_irq(irq);
set_irq_regs(old_regs);
// XXX Eek, it's totally changed with preempt_count() and such
// if (softirq_pending(cpu))
// do_softirq();
/*
* XXX Eek, it's totally changed with preempt_count() and such
* if (softirq_pending(cpu))
* do_softirq();
*/
}
int sparc_floppy_request_irq(int irq, unsigned long flags,
......@@ -511,21 +474,18 @@ EXPORT_SYMBOL(sparc_floppy_request_irq);
int request_irq(unsigned int irq,
irq_handler_t handler,
unsigned long irqflags, const char * devname, void *dev_id)
unsigned long irqflags, const char *devname, void *dev_id)
{
struct irqaction * action, **actionp;
struct irqaction *action, **actionp;
unsigned long flags;
unsigned int cpu_irq;
int ret;
if (sparc_cpu_model == sun4d) {
extern int sun4d_request_irq(unsigned int,
irq_handler_t ,
unsigned long, const char *, void *);
if (sparc_cpu_model == sun4d)
return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
}
cpu_irq = irq & (NR_IRQS - 1);
if(cpu_irq > 14) {
if (cpu_irq > 14) {
ret = -EINVAL;
goto out;
}
......@@ -544,7 +504,8 @@ int request_irq(unsigned int irq,
goto out_unlock;
}
if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) {
printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n",
irq);
ret = -EBUSY;
goto out_unlock;
}
......@@ -559,13 +520,11 @@ int request_irq(unsigned int irq,
if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
irq, devname);
}
if (action == NULL)
action = kmalloc(sizeof(struct irqaction),
GFP_ATOMIC);
action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
if (!action) {
ret = -ENOMEM;
goto out_unlock;
......@@ -587,7 +546,6 @@ int request_irq(unsigned int irq,
out:
return ret;
}
EXPORT_SYMBOL(request_irq);
void disable_irq_nosync(unsigned int irq)
......@@ -606,26 +564,30 @@ void enable_irq(unsigned int irq)
{
__enable_irq(irq);
}
EXPORT_SYMBOL(enable_irq);
/* We really don't need these at all on the Sparc. We only have
/*
* We really don't need these at all on the Sparc. We only have
* stubs here because they are exported to modules.
*/
unsigned long probe_irq_on(void)
{
return 0;
}
EXPORT_SYMBOL(probe_irq_on);
int probe_irq_off(unsigned long mask)
{
return 0;
}
EXPORT_SYMBOL(probe_irq_off);
static unsigned int build_device_irq(struct platform_device *op,
unsigned int real_irq)
{
return real_irq;
}
/* djhr
* This could probably be made indirect too and assigned in the CPU
* bits of the code. That would be much nicer I think and would also
......@@ -636,11 +598,9 @@ EXPORT_SYMBOL(probe_irq_off);
void __init init_IRQ(void)
{
extern void sun4c_init_IRQ( void );
extern void sun4m_init_IRQ( void );
extern void sun4d_init_IRQ( void );
sparc_irq_config.build_device_irq = build_device_irq;
switch(sparc_cpu_model) {
switch (sparc_cpu_model) {
case sun4c:
case sun4:
sun4c_init_IRQ();
......
......@@ -82,7 +82,7 @@ static void bucket_clear_chain_pa(unsigned long bucket_pa)
"i" (ASI_PHYS_USE_EC));
}
static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
static unsigned int bucket_get_irq(unsigned long bucket_pa)
{
unsigned int ret;
......@@ -90,21 +90,20 @@ static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
: "=&r" (ret)
: "r" (bucket_pa +
offsetof(struct ino_bucket,
__virt_irq)),
__irq)),
"i" (ASI_PHYS_USE_EC));
return ret;
}
static void bucket_set_virt_irq(unsigned long bucket_pa,
unsigned int virt_irq)
static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
{
__asm__ __volatile__("stwa %0, [%1] %2"
: /* no outputs */
: "r" (virt_irq),
: "r" (irq),
"r" (bucket_pa +
offsetof(struct ino_bucket,
__virt_irq)),
__irq)),
"i" (ASI_PHYS_USE_EC));
}
......@@ -114,50 +113,49 @@ static struct {
unsigned int dev_handle;
unsigned int dev_ino;
unsigned int in_use;
} virt_irq_table[NR_IRQS];
static DEFINE_SPINLOCK(virt_irq_alloc_lock);
} irq_table[NR_IRQS];
static DEFINE_SPINLOCK(irq_alloc_lock);
unsigned char virt_irq_alloc(unsigned int dev_handle,
unsigned int dev_ino)
unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
{
unsigned long flags;
unsigned char ent;
BUILD_BUG_ON(NR_IRQS >= 256);
spin_lock_irqsave(&virt_irq_alloc_lock, flags);
spin_lock_irqsave(&irq_alloc_lock, flags);
for (ent = 1; ent < NR_IRQS; ent++) {
if (!virt_irq_table[ent].in_use)
if (!irq_table[ent].in_use)
break;
}
if (ent >= NR_IRQS) {
printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
ent = 0;
} else {
virt_irq_table[ent].dev_handle = dev_handle;
virt_irq_table[ent].dev_ino = dev_ino;
virt_irq_table[ent].in_use = 1;
irq_table[ent].dev_handle = dev_handle;
irq_table[ent].dev_ino = dev_ino;
irq_table[ent].in_use = 1;
}
spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
spin_unlock_irqrestore(&irq_alloc_lock, flags);
return ent;
}
#ifdef CONFIG_PCI_MSI
void virt_irq_free(unsigned int virt_irq)
void irq_free(unsigned int irq)
{
unsigned long flags;
if (virt_irq >= NR_IRQS)
if (irq >= NR_IRQS)
return;
spin_lock_irqsave(&virt_irq_alloc_lock, flags);
spin_lock_irqsave(&irq_alloc_lock, flags);
virt_irq_table[virt_irq].in_use = 0;
irq_table[irq].in_use = 0;
spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
spin_unlock_irqrestore(&irq_alloc_lock, flags);
}
#endif
......@@ -190,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
seq_printf(p, " %9s", irq_desc[i].chip->name);
seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
......@@ -253,39 +251,38 @@ struct irq_handler_data {
};
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
{
cpumask_t mask;
int cpuid;
cpumask_copy(&mask, affinity);
if (cpus_equal(mask, cpu_online_map)) {
cpuid = map_to_cpu(virt_irq);
cpuid = map_to_cpu(irq);
} else {
cpumask_t tmp;
cpus_and(tmp, cpu_online_map, mask);
cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp);
}
return cpuid;
}
#else
#define irq_choose_cpu(virt_irq, affinity) \
#define irq_choose_cpu(irq, affinity) \
real_hard_smp_processor_id()
#endif
static void sun4u_irq_enable(unsigned int virt_irq)
static void sun4u_irq_enable(struct irq_data *data)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
struct irq_handler_data *handler_data = data->handler_data;
if (likely(data)) {
if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
cpuid = irq_choose_cpu(virt_irq,
irq_desc[virt_irq].affinity);
imap = data->imap;
cpuid = irq_choose_cpu(data->irq, data->affinity);
imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
......@@ -294,21 +291,21 @@ static void sun4u_irq_enable(unsigned int virt_irq)
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
upa_writeq(ICLR_IDLE, data->iclr);
upa_writeq(ICLR_IDLE, handler_data->iclr);
}
}
static int sun4u_set_affinity(unsigned int virt_irq,
const struct cpumask *mask)
static int sun4u_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
struct irq_handler_data *handler_data = data->handler_data;
if (likely(data)) {
if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
cpuid = irq_choose_cpu(virt_irq, mask);
imap = data->imap;
cpuid = irq_choose_cpu(data->irq, mask);
imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
......@@ -317,7 +314,7 @@ static int sun4u_set_affinity(unsigned int virt_irq,
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
upa_writeq(ICLR_IDLE, data->iclr);
upa_writeq(ICLR_IDLE, handler_data->iclr);
}
return 0;
......@@ -340,27 +337,26 @@ static int sun4u_set_affinity(unsigned int virt_irq,
* sees that, it also hooks up a default ->shutdown method which
* invokes ->mask() which we do not want. See irq_chip_set_defaults().
*/
static void sun4u_irq_disable(unsigned int virt_irq)
static void sun4u_irq_disable(struct irq_data *data)
{
}
static void sun4u_irq_eoi(unsigned int virt_irq)
static void sun4u_irq_eoi(struct irq_data *data)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
struct irq_desc *desc = irq_desc + virt_irq;
struct irq_handler_data *handler_data = data->handler_data;
struct irq_desc *desc = irq_desc + data->irq;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
return;
if (likely(data))
upa_writeq(ICLR_IDLE, data->iclr);
if (likely(handler_data))
upa_writeq(ICLR_IDLE, handler_data->iclr);
}
static void sun4v_irq_enable(unsigned int virt_irq)
static void sun4v_irq_enable(struct irq_data *data)
{
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
unsigned long cpuid = irq_choose_cpu(virt_irq,
irq_desc[virt_irq].affinity);
unsigned int ino = irq_table[data->irq].dev_ino;
unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
int err;
err = sun4v_intr_settarget(ino, cpuid);
......@@ -377,11 +373,11 @@ static void sun4v_irq_enable(unsigned int virt_irq)
ino, err);
}
static int sun4v_set_affinity(unsigned int virt_irq,
const struct cpumask *mask)
static int sun4v_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
unsigned int ino = irq_table[data->irq].dev_ino;
unsigned long cpuid = irq_choose_cpu(data->irq, mask);
int err;
err = sun4v_intr_settarget(ino, cpuid);
......@@ -392,9 +388,9 @@ static int sun4v_set_affinity(unsigned int virt_irq,
return 0;
}
static void sun4v_irq_disable(unsigned int virt_irq)
static void sun4v_irq_disable(struct irq_data *data)
{
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
unsigned int ino = irq_table[data->irq].dev_ino;
int err;
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
......@@ -403,10 +399,10 @@ static void sun4v_irq_disable(unsigned int virt_irq)
"err(%d)\n", ino, err);
}
static void sun4v_irq_eoi(unsigned int virt_irq)
static void sun4v_irq_eoi(struct irq_data *data)
{
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
struct irq_desc *desc = irq_desc + virt_irq;
unsigned int ino = irq_table[data->irq].dev_ino;
struct irq_desc *desc = irq_desc + data->irq;
int err;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
......@@ -418,15 +414,15 @@ static void sun4v_irq_eoi(unsigned int virt_irq)
"err(%d)\n", ino, err);
}
static void sun4v_virq_enable(unsigned int virt_irq)
static void sun4v_virq_enable(struct irq_data *data)
{
unsigned long cpuid, dev_handle, dev_ino;
int err;
cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
cpuid = irq_choose_cpu(data->irq, data->affinity);
dev_handle = virt_irq_table[virt_irq].dev_handle;
dev_ino = virt_irq_table[virt_irq].dev_ino;
dev_handle = irq_table[data->irq].dev_handle;
dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
......@@ -447,16 +443,16 @@ static void sun4v_virq_enable(unsigned int virt_irq)
dev_handle, dev_ino, err);
}
static int sun4v_virt_set_affinity(unsigned int virt_irq,
const struct cpumask *mask)
static int sun4v_virt_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
unsigned long cpuid, dev_handle, dev_ino;
int err;
cpuid = irq_choose_cpu(virt_irq, mask);
cpuid = irq_choose_cpu(data->irq, mask);
dev_handle = virt_irq_table[virt_irq].dev_handle;
dev_ino = virt_irq_table[virt_irq].dev_ino;
dev_handle = irq_table[data->irq].dev_handle;
dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
......@@ -467,13 +463,13 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq,
return 0;
}
static void sun4v_virq_disable(unsigned int virt_irq)
static void sun4v_virq_disable(struct irq_data *data)
{
unsigned long dev_handle, dev_ino;
int err;
dev_handle = virt_irq_table[virt_irq].dev_handle;
dev_ino = virt_irq_table[virt_irq].dev_ino;
dev_handle = irq_table[data->irq].dev_handle;
dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
......@@ -483,17 +479,17 @@ static void sun4v_virq_disable(unsigned int virt_irq)
dev_handle, dev_ino, err);
}
static void sun4v_virq_eoi(unsigned int virt_irq)
static void sun4v_virq_eoi(struct irq_data *data)
{
struct irq_desc *desc = irq_desc + virt_irq;
struct irq_desc *desc = irq_desc + data->irq;
unsigned long dev_handle, dev_ino;
int err;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
return;
dev_handle = virt_irq_table[virt_irq].dev_handle;
dev_ino = virt_irq_table[virt_irq].dev_ino;
dev_handle = irq_table[data->irq].dev_handle;
dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
......@@ -505,49 +501,48 @@ static void sun4v_virq_eoi(unsigned int virt_irq)
static struct irq_chip sun4u_irq = {
.name = "sun4u",
.enable = sun4u_irq_enable,
.disable = sun4u_irq_disable,
.eoi = sun4u_irq_eoi,
.set_affinity = sun4u_set_affinity,
.irq_enable = sun4u_irq_enable,
.irq_disable = sun4u_irq_disable,
.irq_eoi = sun4u_irq_eoi,
.irq_set_affinity = sun4u_set_affinity,
};
static struct irq_chip sun4v_irq = {
.name = "sun4v",
.enable = sun4v_irq_enable,
.disable = sun4v_irq_disable,
.eoi = sun4v_irq_eoi,
.set_affinity = sun4v_set_affinity,
.irq_enable = sun4v_irq_enable,
.irq_disable = sun4v_irq_disable,
.irq_eoi = sun4v_irq_eoi,
.irq_set_affinity = sun4v_set_affinity,
};
static struct irq_chip sun4v_virq = {
.name = "vsun4v",
.enable = sun4v_virq_enable,
.disable = sun4v_virq_disable,
.eoi = sun4v_virq_eoi,
.set_affinity = sun4v_virt_set_affinity,
.irq_enable = sun4v_virq_enable,
.irq_disable = sun4v_virq_disable,
.irq_eoi = sun4v_virq_eoi,
.irq_set_affinity = sun4v_virt_set_affinity,
};
static void pre_flow_handler(unsigned int virt_irq,
struct irq_desc *desc)
static void pre_flow_handler(unsigned int irq, struct irq_desc *desc)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
struct irq_handler_data *handler_data = get_irq_data(irq);
unsigned int ino = irq_table[irq].dev_ino;
data->pre_handler(ino, data->arg1, data->arg2);
handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
handle_fasteoi_irq(virt_irq, desc);
handle_fasteoi_irq(irq, desc);
}
void irq_install_pre_handler(int virt_irq,
void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
struct irq_desc *desc = irq_desc + virt_irq;
struct irq_handler_data *handler_data = get_irq_data(irq);
struct irq_desc *desc = irq_desc + irq;
data->pre_handler = func;
data->arg1 = arg1;
data->arg2 = arg2;
handler_data->pre_handler = func;
handler_data->arg1 = arg1;
handler_data->arg2 = arg2;
desc->handle_irq = pre_flow_handler;
}
......@@ -555,81 +550,81 @@ void irq_install_pre_handler(int virt_irq,
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
struct ino_bucket *bucket;
struct irq_handler_data *data;
unsigned int virt_irq;
struct irq_handler_data *handler_data;
unsigned int irq;
int ino;
BUG_ON(tlb_type == hypervisor);
ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
bucket = &ivector_table[ino];
virt_irq = bucket_get_virt_irq(__pa(bucket));
if (!virt_irq) {
virt_irq = virt_irq_alloc(0, ino);
bucket_set_virt_irq(__pa(bucket), virt_irq);
set_irq_chip_and_handler_name(virt_irq,
irq = bucket_get_irq(__pa(bucket));
if (!irq) {
irq = irq_alloc(0, ino);
bucket_set_irq(__pa(bucket), irq);
set_irq_chip_and_handler_name(irq,
&sun4u_irq,
handle_fasteoi_irq,
"IVEC");
}
data = get_irq_chip_data(virt_irq);
if (unlikely(data))
handler_data = get_irq_data(irq);
if (unlikely(handler_data))
goto out;
data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!data)) {
handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
set_irq_chip_data(virt_irq, data);
set_irq_data(irq, handler_data);
data->imap = imap;
data->iclr = iclr;
handler_data->imap = imap;
handler_data->iclr = iclr;
out:
return virt_irq;
return irq;
}
static unsigned int sun4v_build_common(unsigned long sysino,
struct irq_chip *chip)
{
struct ino_bucket *bucket;
struct irq_handler_data *data;
unsigned int virt_irq;
struct irq_handler_data *handler_data;
unsigned int irq;
BUG_ON(tlb_type != hypervisor);
bucket = &ivector_table[sysino];
virt_irq = bucket_get_virt_irq(__pa(bucket));
if (!virt_irq) {
virt_irq = virt_irq_alloc(0, sysino);
bucket_set_virt_irq(__pa(bucket), virt_irq);
set_irq_chip_and_handler_name(virt_irq, chip,
irq = bucket_get_irq(__pa(bucket));
if (!irq) {
irq = irq_alloc(0, sysino);
bucket_set_irq(__pa(bucket), irq);
set_irq_chip_and_handler_name(irq, chip,
handle_fasteoi_irq,
"IVEC");
}
data = get_irq_chip_data(virt_irq);
if (unlikely(data))
handler_data = get_irq_data(irq);
if (unlikely(handler_data))
goto out;
data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!data)) {
handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
set_irq_chip_data(virt_irq, data);
set_irq_data(irq, handler_data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
* register accesses.
*/
data->imap = ~0UL;
data->iclr = ~0UL;
handler_data->imap = ~0UL;
handler_data->iclr = ~0UL;
out:
return virt_irq;
return irq;
}
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
......@@ -641,11 +636,11 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
struct irq_handler_data *data;
struct irq_handler_data *handler_data;
unsigned long hv_err, cookie;
struct ino_bucket *bucket;
struct irq_desc *desc;
unsigned int virt_irq;
unsigned int irq;
bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
if (unlikely(!bucket))
......@@ -662,32 +657,32 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
((unsigned long) bucket +
sizeof(struct ino_bucket)));
virt_irq = virt_irq_alloc(devhandle, devino);
bucket_set_virt_irq(__pa(bucket), virt_irq);
irq = irq_alloc(devhandle, devino);
bucket_set_irq(__pa(bucket), irq);
set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
set_irq_chip_and_handler_name(irq, &sun4v_virq,
handle_fasteoi_irq,
"IVEC");
data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!data))
handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!handler_data))
return 0;
/* In order to make the LDC channel startup sequence easier,
* especially wrt. locking, we do not let request_irq() enable
* the interrupt.
*/
desc = irq_desc + virt_irq;
desc = irq_desc + irq;
desc->status |= IRQ_NOAUTOEN;
set_irq_chip_data(virt_irq, data);
set_irq_data(irq, handler_data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
* register accesses.
*/
data->imap = ~0UL;
data->iclr = ~0UL;
handler_data->imap = ~0UL;
handler_data->iclr = ~0UL;
cookie = ~__pa(bucket);
hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
......@@ -697,30 +692,30 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
prom_halt();
}
return virt_irq;
return irq;
}
void ack_bad_irq(unsigned int virt_irq)
void ack_bad_irq(unsigned int irq)
{
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
unsigned int ino = irq_table[irq].dev_ino;
if (!ino)
ino = 0xdeadbeef;
printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
ino, virt_irq);
printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
ino, irq);
}
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];
void __irq_entry handler_irq(int irq, struct pt_regs *regs)
void __irq_entry handler_irq(int pil, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
void *orig_sp;
clear_softint(1 << irq);
clear_softint(1 << pil);
old_regs = set_irq_regs(regs);
irq_enter();
......@@ -741,16 +736,16 @@ void __irq_entry handler_irq(int irq, struct pt_regs *regs)
while (bucket_pa) {
struct irq_desc *desc;
unsigned long next_pa;
unsigned int virt_irq;
unsigned int irq;
next_pa = bucket_get_chain_pa(bucket_pa);
virt_irq = bucket_get_virt_irq(bucket_pa);
irq = bucket_get_irq(bucket_pa);
bucket_clear_chain_pa(bucket_pa);
desc = irq_desc + virt_irq;
desc = irq_desc + irq;
if (!(desc->status & IRQ_DISABLED))
desc->handle_irq(virt_irq, desc);
desc->handle_irq(irq, desc);
bucket_pa = next_pa;
}
......@@ -798,9 +793,12 @@ void fixup_irqs(void)
raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
if (irq_desc[irq].action &&
!(irq_desc[irq].status & IRQ_PER_CPU)) {
if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq,
irq_desc[irq].affinity);
struct irq_data *data = irq_get_irq_data(irq);
if (data->chip->irq_set_affinity)
data->chip->irq_set_affinity(data,
data->affinity,
false);
}
raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
......
......@@ -3,6 +3,8 @@
#include <linux/interrupt.h>
#include <asm/traps.h>
/* cpu.c */
extern const char *sparc_cpu_type;
extern const char *sparc_pmu_type;
......@@ -26,6 +28,53 @@ extern int static_irq_count;
extern spinlock_t irq_action_lock;
extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
extern void init_IRQ(void);
/* sun4c_irq.c */
extern void sun4c_init_IRQ(void);
/* sun4m_irq.c */
extern unsigned int lvl14_resolution;
extern void sun4m_init_IRQ(void);
extern void sun4m_clear_profile_irq(int cpu);
/* sun4d_irq.c */
extern spinlock_t sun4d_imsk_lock;
extern void sun4d_init_IRQ(void);
extern int sun4d_request_irq(unsigned int irq,
irq_handler_t handler,
unsigned long irqflags,
const char *devname, void *dev_id);
extern int show_sun4d_interrupts(struct seq_file *, void *);
extern void sun4d_distribute_irqs(void);
extern void sun4d_free_irq(unsigned int irq, void *dev_id);
/* head_32.S */
extern unsigned int t_nmi[];
extern unsigned int linux_trap_ipi15_sun4d[];
extern unsigned int linux_trap_ipi15_sun4m[];
extern struct tt_entry trapbase_cpu1;
extern struct tt_entry trapbase_cpu2;
extern struct tt_entry trapbase_cpu3;
extern char cputypval[];
/* entry.S */
extern unsigned long lvl14_save[4];
extern unsigned int real_irq_entry[];
extern unsigned int smp4d_ticker[];
extern unsigned int patchme_maybe_smp_msg[];
extern void floppy_hardint(void);
/* trampoline_32.S */
extern int __smp4m_processor_id(void);
extern int __smp4d_processor_id(void);
extern unsigned long sun4m_cpu_startup;
extern unsigned long sun4d_cpu_startup;
#else /* CONFIG_SPARC32 */
#endif /* CONFIG_SPARC32 */
......
......@@ -790,13 +790,17 @@ static void send_events(struct ldc_channel *lp, unsigned int event_mask)
static irqreturn_t ldc_rx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
unsigned long orig_state, hv_err, flags;
unsigned long orig_state, flags;
unsigned int event_mask;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
hv_err = sun4v_ldc_rx_get_state(lp->id,
/* We should probably check for hypervisor errors here and
* reset the LDC channel if we get one.
*/
sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
......@@ -904,13 +908,17 @@ static irqreturn_t ldc_rx(int irq, void *dev_id)
static irqreturn_t ldc_tx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
unsigned long flags, hv_err, orig_state;
unsigned long flags, orig_state;
unsigned int event_mask = 0;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
hv_err = sun4v_ldc_tx_get_state(lp->id,
/* We should probably check for hypervisor errors here and
* reset the LDC channel if we get one.
*/
sun4v_ldc_tx_get_state(lp->id,
&lp->tx_head,
&lp->tx_tail,
&lp->chan_state);
......
......@@ -30,6 +30,7 @@ struct amba_apb_device leon_percpu_timer_dev[16];
int leondebug_irq_disable;
int leon_debug_irqout;
static int dummy_master_l10_counter;
unsigned long amba_system_id;
unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
......@@ -117,10 +118,16 @@ void __init leon_init_timers(irq_handler_t counter_fn)
master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
dummy_master_l10_counter = 0;
/*Find IRQMP IRQ Controller Registers base address otherwise bail out.*/
rootnp = of_find_node_by_path("/ambapp0");
if (!rootnp)
goto bad;
/* Find System ID: GRLIB build ID and optional CHIP ID */
pp = of_find_property(rootnp, "systemid", &len);
if (pp)
amba_system_id = *(unsigned long *)pp->value;
/* Find IRQMP IRQ Controller Registers base adr otherwise bail out */
np = of_find_node_by_name(rootnp, "GAISLER_IRQMP");
if (!np) {
np = of_find_node_by_name(rootnp, "01_00d");
......@@ -340,7 +347,7 @@ void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
void __init leon_init_IRQ(void)
{
sparc_init_timers = leon_init_timers;
sparc_irq_config.init_timers = leon_init_timers;
BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM);
......
/* leon_pmc.c: LEON Power-down cpu_idle() handler
*
* Copyright (C) 2011 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
*/
#include <linux/init.h>
#include <linux/pm.h>
#include <asm/leon_amba.h>
#include <asm/leon.h>
/* List of Systems that need fixup instructions around power-down instruction */
unsigned int pmc_leon_fixup_ids[] = {
AEROFLEX_UT699,
GAISLER_GR712RC,
LEON4_NEXTREME1,
0
};
int pmc_leon_need_fixup(void)
{
unsigned int systemid = amba_system_id >> 16;
unsigned int *id;
id = &pmc_leon_fixup_ids[0];
while (*id != 0) {
if (*id == systemid)
return 1;
id++;
}
return 0;
}
/*
* CPU idle callback function for systems that need some extra handling
* See .../arch/sparc/kernel/process.c
*/
void pmc_leon_idle_fixup(void)
{
/* Prepare an address to a non-cachable region. APB is always
* none-cachable. One instruction is executed after the Sleep
* instruction, we make sure to read the bus and throw away the
* value by accessing a non-cachable area, also we make sure the
* MMU does not get a TLB miss here by using the MMU BYPASS ASI.
*/
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
__asm__ __volatile__ (
"mov %%g0, %%asr19\n"
"lda [%0] %1, %%g0\n"
:
: "r"(address), "i"(ASI_LEON_BYPASS));
}
/*
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
void pmc_leon_idle(void)
{
/* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("mov %g0, %asr19\n\t");
}
/* Install LEON Power Down function */
static int __init leon_pmc_install(void)
{
/* Assign power management IDLE handler */
if (pmc_leon_need_fixup())
pm_idle = pmc_leon_idle_fixup;
else
pm_idle = pmc_leon_idle;
printk(KERN_INFO "leon: power management initialized\n");
return 0;
}
/* This driver is not critical to the boot process, don't care
* if initialized late.
*/
late_initcall(leon_pmc_install);
......@@ -41,6 +41,8 @@
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include "kernel.h"
#ifdef CONFIG_SPARC_LEON
#include "irq.h"
......@@ -261,23 +263,23 @@ void __init leon_smp_done(void)
/* Free unneeded trap tables */
if (!cpu_isset(1, cpu_present_map)) {
ClearPageReserved(virt_to_page(trapbase_cpu1));
init_page_count(virt_to_page(trapbase_cpu1));
free_page((unsigned long)trapbase_cpu1);
ClearPageReserved(virt_to_page(&trapbase_cpu1));
init_page_count(virt_to_page(&trapbase_cpu1));
free_page((unsigned long)&trapbase_cpu1);
totalram_pages++;
num_physpages++;
}
if (!cpu_isset(2, cpu_present_map)) {
ClearPageReserved(virt_to_page(trapbase_cpu2));
init_page_count(virt_to_page(trapbase_cpu2));
free_page((unsigned long)trapbase_cpu2);
ClearPageReserved(virt_to_page(&trapbase_cpu2));
init_page_count(virt_to_page(&trapbase_cpu2));
free_page((unsigned long)&trapbase_cpu2);
totalram_pages++;
num_physpages++;
}
if (!cpu_isset(3, cpu_present_map)) {
ClearPageReserved(virt_to_page(trapbase_cpu3));
init_page_count(virt_to_page(trapbase_cpu3));
free_page((unsigned long)trapbase_cpu3);
ClearPageReserved(virt_to_page(&trapbase_cpu3));
init_page_count(virt_to_page(&trapbase_cpu3));
free_page((unsigned long)&trapbase_cpu3);
totalram_pages++;
num_physpages++;
}
......@@ -437,15 +439,6 @@ void __init leon_blackbox_current(unsigned *addr)
}
/*
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
void pmc_leon_idle(void)
{
__asm__ volatile ("mov %g0, %asr19");
}
void __init leon_init_smp(void)
{
/* Patch ipi15 trap table */
......@@ -456,13 +449,6 @@ void __init leon_init_smp(void)
BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id,
BTFIXUPCALL_NORM);
#ifndef PMC_NO_IDLE
/* Assign power management IDLE handler */
pm_idle = pmc_leon_idle;
printk(KERN_INFO "leon: power management initialized\n");
#endif
}
#endif /* CONFIG_SPARC_LEON */
......@@ -13,6 +13,7 @@
#include <asm/leon_amba.h>
#include "of_device_common.h"
#include "irq.h"
/*
* PCI bus specific translator
......@@ -355,7 +356,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
if (intr) {
op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs);
for (i = 0; i < op->archdata.num_irqs; i++)
op->archdata.irqs[i] = intr[i].pri;
op->archdata.irqs[i] =
sparc_irq_config.build_device_irq(op, intr[i].pri);
} else {
const unsigned int *irq =
of_get_property(dp, "interrupts", &len);
......@@ -363,64 +365,13 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
if (irq) {
op->archdata.num_irqs = len / sizeof(unsigned int);
for (i = 0; i < op->archdata.num_irqs; i++)
op->archdata.irqs[i] = irq[i];
op->archdata.irqs[i] =
sparc_irq_config.build_device_irq(op, irq[i]);
} else {
op->archdata.num_irqs = 0;
}
}
if (sparc_cpu_model == sun4d) {
static int pil_to_sbus[] = {
0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
};
struct device_node *io_unit, *sbi = dp->parent;
const struct linux_prom_registers *regs;
int board, slot;
while (sbi) {
if (!strcmp(sbi->name, "sbi"))
break;
sbi = sbi->parent;
}
if (!sbi)
goto build_resources;
regs = of_get_property(dp, "reg", NULL);
if (!regs)
goto build_resources;
slot = regs->which_io;
/* If SBI's parent is not io-unit or the io-unit lacks
* a "board#" property, something is very wrong.
*/
if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
printk("%s: Error, parent is not io-unit.\n",
sbi->full_name);
goto build_resources;
}
io_unit = sbi->parent;
board = of_getintprop_default(io_unit, "board#", -1);
if (board == -1) {
printk("%s: Error, lacks board# property.\n",
io_unit->full_name);
goto build_resources;
}
for (i = 0; i < op->archdata.num_irqs; i++) {
int this_irq = op->archdata.irqs[i];
int sbusl = pil_to_sbus[this_irq];
if (sbusl)
this_irq = (((board + 1) << 5) +
(sbusl << 2) +
slot);
op->archdata.irqs[i] = this_irq;
}
}
build_resources:
build_device_resources(op, parent);
op->dev.parent = parent;
......
......@@ -675,6 +675,7 @@ static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
* humanoid.
*/
err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
(void) err;
}
list_for_each_entry(child_bus, &bus->children, node)
pci_bus_register_of_sysfs(child_bus);
......@@ -1001,22 +1002,22 @@ EXPORT_SYMBOL(pci_domain_nr);
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
unsigned int virt_irq;
unsigned int irq;
if (!pbm->setup_msi_irq)
return -EINVAL;
return pbm->setup_msi_irq(&virt_irq, pdev, desc);
return pbm->setup_msi_irq(&irq, pdev, desc);
}
void arch_teardown_msi_irq(unsigned int virt_irq)
void arch_teardown_msi_irq(unsigned int irq)
{
struct msi_desc *entry = get_irq_msi(virt_irq);
struct msi_desc *entry = get_irq_msi(irq);
struct pci_dev *pdev = entry->dev;
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
if (pbm->teardown_msi_irq)
pbm->teardown_msi_irq(virt_irq, pdev);
pbm->teardown_msi_irq(irq, pdev);
}
#endif /* !(CONFIG_PCI_MSI) */
......
......@@ -295,12 +295,15 @@ static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
unsigned int bus = bus_dev->number;
unsigned int device = PCI_SLOT(devfn);
unsigned int func = PCI_FUNC(devfn);
unsigned long ret;
if (config_out_of_range(pbm, bus, devfn, where)) {
/* Do nothing. */
} else {
ret = pci_sun4v_config_put(devhandle,
/* We don't check for hypervisor errors here, but perhaps
* we should and influence our return value depending upon
* what kind of error is thrown.
*/
pci_sun4v_config_put(devhandle,
HV_PCI_DEVICE_BUILD(bus, device, func),
where, size, value);
}
......
......@@ -214,11 +214,9 @@ static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
unsigned long msiqid;
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
msiqid = (val & MSI_MAP_EQNUM);
val &= ~MSI_MAP_VALID;
......@@ -277,7 +275,7 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
{
unsigned long cregs = (unsigned long) pbm->pbm_regs;
unsigned long imap_reg, iclr_reg, int_ctrlr;
unsigned int virt_irq;
unsigned int irq;
int fixup;
u64 val;
......@@ -293,14 +291,14 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
virt_irq = build_irq(fixup, iclr_reg, imap_reg);
if (!virt_irq)
irq = build_irq(fixup, iclr_reg, imap_reg);
if (!irq)
return -ENOMEM;
upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
return virt_irq;
return irq;
}
static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
......
......@@ -131,9 +131,9 @@ struct pci_pbm_info {
void *msi_queues;
unsigned long *msi_bitmap;
unsigned int *msi_irq_table;
int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
int (*setup_msi_irq)(unsigned int *irq_p, struct pci_dev *pdev,
struct msi_desc *entry);
void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev);
void (*teardown_msi_irq)(unsigned int irq, struct pci_dev *pdev);
const struct sparc64_msiq_ops *msi_ops;
#endif /* !(CONFIG_PCI_MSI) */
......
......@@ -31,12 +31,12 @@ static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
if (likely(err > 0)) {
struct irq_desc *desc;
unsigned int virt_irq;
unsigned int irq;
virt_irq = pbm->msi_irq_table[msi - pbm->msi_first];
desc = irq_desc + virt_irq;
irq = pbm->msi_irq_table[msi - pbm->msi_first];
desc = irq_desc + irq;
desc->handle_irq(virt_irq, desc);
desc->handle_irq(irq, desc);
}
if (unlikely(err < 0))
......@@ -121,7 +121,7 @@ static struct irq_chip msi_irq = {
/* XXX affinity XXX */
};
static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
static int sparc64_setup_msi_irq(unsigned int *irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
{
......@@ -131,17 +131,17 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
int msi, err;
u32 msiqid;
*virt_irq_p = virt_irq_alloc(0, 0);
*irq_p = irq_alloc(0, 0);
err = -ENOMEM;
if (!*virt_irq_p)
if (!*irq_p)
goto out_err;
set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq,
set_irq_chip_and_handler_name(*irq_p, &msi_irq,
handle_simple_irq, "MSI");
err = alloc_msi(pbm);
if (unlikely(err < 0))
goto out_virt_irq_free;
goto out_irq_free;
msi = err;
......@@ -152,7 +152,7 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
if (err)
goto out_msi_free;
pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p;
pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
if (entry->msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
......@@ -163,24 +163,24 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
}
msg.data = msi;
set_irq_msi(*virt_irq_p, entry);
write_msi_msg(*virt_irq_p, &msg);
set_irq_msi(*irq_p, entry);
write_msi_msg(*irq_p, &msg);
return 0;
out_msi_free:
free_msi(pbm, msi);
out_virt_irq_free:
set_irq_chip(*virt_irq_p, NULL);
virt_irq_free(*virt_irq_p);
*virt_irq_p = 0;
out_irq_free:
set_irq_chip(*irq_p, NULL);
irq_free(*irq_p);
*irq_p = 0;
out_err:
return err;
}
static void sparc64_teardown_msi_irq(unsigned int virt_irq,
static void sparc64_teardown_msi_irq(unsigned int irq,
struct pci_dev *pdev)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
......@@ -189,12 +189,12 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq,
int i, err;
for (i = 0; i < pbm->msi_num; i++) {
if (pbm->msi_irq_table[i] == virt_irq)
if (pbm->msi_irq_table[i] == irq)
break;
}
if (i >= pbm->msi_num) {
printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
pbm->name, virt_irq);
pbm->name, irq);
return;
}
......@@ -205,14 +205,14 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq,
if (err) {
printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
"irq %u, gives error %d\n",
pbm->name, msi_num, virt_irq, err);
pbm->name, msi_num, irq, err);
return;
}
free_msi(pbm, msi_num);
set_irq_chip(virt_irq, NULL);
virt_irq_free(virt_irq);
set_irq_chip(irq, NULL);
irq_free(irq);
}
static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
......
......@@ -1313,7 +1313,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;
const char *chipset_name;
int is_pbm_a, err;
int err;
switch (chip_type) {
case PBM_CHIP_TYPE_TOMATILLO:
......@@ -1343,8 +1343,6 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
*/
regs = of_get_property(dp, "reg", NULL);
is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000);
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
......
......@@ -580,7 +580,7 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
struct iommu *iommu = pbm->iommu;
unsigned long num_tsb_entries, sz, tsbsize;
unsigned long num_tsb_entries, sz;
u32 dma_mask, dma_offset;
const u32 *vdma;
......@@ -596,7 +596,6 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
tsbsize = num_tsb_entries * sizeof(iopte_t);
dma_offset = vdma[0];
......@@ -844,9 +843,9 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
if (!virt_irq)
if (!irq)
return -ENOMEM;
if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
......@@ -854,7 +853,7 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
return virt_irq;
return irq;
}
static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
......
......@@ -81,7 +81,7 @@ static void n2_pcr_write(u64 val)
unsigned long ret;
ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
if (val != HV_EOK)
if (ret != HV_EOK)
write_pcr(val);
}
......
......@@ -227,7 +227,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
int virt_irq;
int irq;
ino &= 0x3f;
if (ino < SABRE_ONBOARD_IRQ_BASE) {
......@@ -247,7 +247,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
virt_irq = build_irq(inofixup, iclr, imap);
irq = build_irq(inofixup, iclr, imap);
/* If the parent device is a PCI<->PCI bridge other than
* APB, we have to install a pre-handler to ensure that
......@@ -256,13 +256,13 @@ static unsigned int sabre_irq_build(struct device_node *dp,
*/
regs = of_get_property(dp, "reg", NULL);
if (regs && sabre_device_needs_wsync(dp)) {
irq_install_pre_handler(virt_irq,
irq_install_pre_handler(irq,
sabre_wsync_handler,
(void *) (long) regs->phys_hi,
(void *) irq_data);
}
return virt_irq;
return irq;
}
static void __init sabre_irq_trans_init(struct device_node *dp)
......@@ -382,7 +382,7 @@ static unsigned int schizo_irq_build(struct device_node *dp,
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
int ign_fixup;
int virt_irq;
int irq;
int is_tomatillo;
ino &= 0x3f;
......@@ -409,17 +409,17 @@ static unsigned int schizo_irq_build(struct device_node *dp,
ign_fixup = (1 << 6);
}
virt_irq = build_irq(ign_fixup, iclr, imap);
irq = build_irq(ign_fixup, iclr, imap);
if (is_tomatillo) {
irq_install_pre_handler(virt_irq,
irq_install_pre_handler(irq,
tomatillo_wsync_handler,
((irq_data->chip_version <= 4) ?
(void *) 1 : (void *) 0),
(void *) irq_data->sync_reg);
}
return virt_irq;
return irq;
}
static void __init __schizo_irq_trans_init(struct device_node *dp,
......
......@@ -1086,6 +1086,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
#ifdef CONFIG_AUDITSYSCALL
if (unlikely(current->audit_context)) {
unsigned long tstate = regs->tstate;
int result = AUDITSC_SUCCESS;
......@@ -1095,7 +1096,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(result, regs->u_regs[UREG_I0]);
}
#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->u_regs[UREG_G1]);
......
......@@ -184,7 +184,6 @@ static void __init boot_flags_init(char *commands)
*/
extern void sun4c_probe_vac(void);
extern char cputypval;
extern unsigned short root_flags;
extern unsigned short root_dev;
......@@ -218,21 +217,21 @@ void __init setup_arch(char **cmdline_p)
/* Set sparc_cpu_model */
sparc_cpu_model = sun_unknown;
if (!strcmp(&cputypval,"sun4 "))
if (!strcmp(&cputypval[0], "sun4 "))
sparc_cpu_model = sun4;
if (!strcmp(&cputypval,"sun4c"))
if (!strcmp(&cputypval[0], "sun4c"))
sparc_cpu_model = sun4c;
if (!strcmp(&cputypval,"sun4m"))
if (!strcmp(&cputypval[0], "sun4m"))
sparc_cpu_model = sun4m;
if (!strcmp(&cputypval,"sun4s"))
if (!strcmp(&cputypval[0], "sun4s"))
sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */
if (!strcmp(&cputypval,"sun4d"))
if (!strcmp(&cputypval[0], "sun4d"))
sparc_cpu_model = sun4d;
if (!strcmp(&cputypval,"sun4e"))
if (!strcmp(&cputypval[0], "sun4e"))
sparc_cpu_model = sun4e;
if (!strcmp(&cputypval,"sun4u"))
if (!strcmp(&cputypval[0], "sun4u"))
sparc_cpu_model = sun4u;
if (!strncmp(&cputypval, "leon" , 4))
if (!strncmp(&cputypval[0], "leon" , 4))
sparc_cpu_model = sparc_leon;
printk("ARCH: ");
......@@ -335,7 +334,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
prom_rev,
romvec->pv_printrev >> 16,
romvec->pv_printrev & 0xffff,
&cputypval,
&cputypval[0],
ncpus_probed,
num_online_cpus()
#ifndef CONFIG_SMP
......
......@@ -189,7 +189,7 @@ static inline long get_delta (long *rt, long *master)
void smp_synchronize_tick_client(void)
{
long i, delta, adj, adjust_latency = 0, done = 0;
unsigned long flags, rt, master_time_stamp, bound;
unsigned long flags, rt, master_time_stamp;
#if DEBUG_TICK_SYNC
struct {
long rt; /* roundtrip time */
......@@ -208,10 +208,8 @@ void smp_synchronize_tick_client(void)
{
for (i = 0; i < NUM_ROUNDS; i++) {
delta = get_delta(&rt, &master_time_stamp);
if (delta == 0) {
if (delta == 0)
done = 1; /* let's lock on to this... */
bound = rt;
}
if (!done) {
if (i > 0) {
......@@ -933,13 +931,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{
void *pg_addr;
int this_cpu;
u64 data0;
if (tlb_type == hypervisor)
return;
this_cpu = get_cpu();
preempt_disable();
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
......@@ -964,7 +961,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
}
__local_flush_dcache_page(page);
put_cpu();
preempt_enable();
}
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
......
/* sun4c_irq.c
* arch/sparc/kernel/sun4c_irq.c:
/*
* sun4c irq support
*
* djhr: Hacked out of irq.c into a CPU dependent version.
*
......@@ -9,31 +9,41 @@
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include "irq.h"
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/psr.h>
#include <asm/vaddrs.h>
#include <asm/timer.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/timer.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/idprom.h>
#include <asm/machines.h>
#include "irq.h"
/* Sun4c interrupts are typically laid out as follows:
*
* 1 - Software interrupt, SBUS level 1
* 2 - SBUS level 2
* 3 - ESP SCSI, SBUS level 3
* 4 - Software interrupt
* 5 - Lance ethernet, SBUS level 4
* 6 - Software interrupt
* 7 - Graphics card, SBUS level 5
* 8 - SBUS level 6
* 9 - SBUS level 7
* 10 - Counter timer
* 11 - Floppy
* 12 - Zilog uart
* 13 - CS4231 audio
* 14 - Profiling timer
* 15 - NMI
*
* The interrupt enable bits in the interrupt mask register are
* really only used to enable/disable the timer interrupts, and
* for signalling software interrupts. There is also a master
* interrupt enable bit in this register.
*
* Interrupts are enabled by setting the SUN4C_INT_* bits, they
* are disabled by clearing those bits.
*/
/*
* Bit field defines for the interrupt registers on various
......@@ -49,16 +59,11 @@
#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */
#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */
/* Pointer to the interrupt enable byte
*
* Dave Redman (djhr@tadpole.co.uk)
* What you may not be aware of is that entry.S requires this variable.
*
* --- linux_trap_nmi_sun4c --
*
* so don't go making it static, like I tried. sigh.
/*
* Pointer to the interrupt enable byte
* Used by entry.S
*/
unsigned char __iomem *interrupt_enable = NULL;
unsigned char __iomem *interrupt_enable;
static void sun4c_disable_irq(unsigned int irq_nr)
{
......@@ -68,7 +73,7 @@ static void sun4c_disable_irq(unsigned int irq_nr)
local_irq_save(flags);
irq_nr &= (NR_IRQS - 1);
current_mask = sbus_readb(interrupt_enable);
switch(irq_nr) {
switch (irq_nr) {
case 1:
new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
break;
......@@ -97,7 +102,7 @@ static void sun4c_enable_irq(unsigned int irq_nr)
local_irq_save(flags);
irq_nr &= (NR_IRQS - 1);
current_mask = sbus_readb(interrupt_enable);
switch(irq_nr) {
switch (irq_nr) {
case 1:
new_mask = ((current_mask) | SUN4C_INT_E1);
break;
......@@ -185,7 +190,9 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
}
#ifdef CONFIG_SMP
static void sun4c_nop(void) {}
static void sun4c_nop(void)
{
}
#endif
void __init sun4c_init_IRQ(void)
......@@ -214,7 +221,9 @@ void __init sun4c_init_IRQ(void)
BTFIXUPSET_CALL(disable_pil_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
sparc_init_timers = sun4c_init_timers;
sparc_irq_config.init_timers = sun4c_init_timers;
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
......
/*
* arch/sparc/kernel/sun4d_irq.c:
* SS1000/SC2000 interrupt handling.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Heavily based on arch/sparc/kernel/irq.c.
*/
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/seq_file.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/psr.h>
#include <asm/smp.h>
#include <asm/vaddrs.h>
#include <asm/timer.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sbi.h>
#include <asm/cacheflush.h>
#include <asm/irq_regs.h>
#include "kernel.h"
#include "irq.h"
/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
/* #define DISTRIBUTE_IRQS */
/* Sun4d interrupts fall roughly into two categories. SBUS and
* cpu local. CPU local interrupts cover the timer interrupts
* and whatnot, and we encode those as normal PILs between
* 0 and 15.
*
* SBUS interrupts are encoded integers including the board number
* (plus one), the SBUS level, and the SBUS slot number. Sun4D
* IRQ dispatch is done by:
*
* 1) Reading the BW local interrupt table in order to get the bus
* interrupt mask.
*
* This table is indexed by SBUS interrupt level which can be
* derived from the PIL we got interrupted on.
*
* 2) For each bus showing interrupt pending from #1, read the
* SBI interrupt state register. This will indicate which slots
* have interrupts pending for that SBUS interrupt level.
*/
struct sun4d_timer_regs {
u32 l10_timer_limit;
......@@ -59,11 +51,9 @@ static struct sun4d_timer_regs __iomem *sun4d_timers;
#define TIMER_IRQ 10
#define MAX_STATIC_ALLOC 4
extern int static_irq_count;
static unsigned char sbus_tid[32];
static struct irqaction *irq_action[NR_IRQS];
extern spinlock_t irq_action_lock;
static struct sbus_action {
struct irqaction *action;
......@@ -71,11 +61,33 @@ static struct sbus_action {
} *sbus_actions;
static int pil_to_sbus[] = {
0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
0,
0,
1,
2,
0,
3,
0,
4,
0,
5,
0,
6,
0,
7,
0,
0,
};
static int sbus_to_pil[] = {
0, 2, 3, 5, 7, 9, 11, 13,
0,
2,
3,
5,
7,
9,
11,
13,
};
static int nsbi;
......@@ -86,7 +98,7 @@ DEFINE_SPINLOCK(sun4d_imsk_lock);
int show_sun4d_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j = 0, k = 0, sbusl;
struct irqaction * action;
struct irqaction *action;
unsigned long flags;
#ifdef CONFIG_SMP
int x;
......@@ -102,7 +114,8 @@ int show_sun4d_interrupts(struct seq_file *p, void *v)
} else {
for (j = 0; j < nsbi; j++) {
for (k = 0; k < 4; k++)
if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
action = sbus_actions[(j << 5) + (sbusl << 2) + k].action;
if (action)
goto found_it;
}
goto out_unlock;
......@@ -125,15 +138,17 @@ found_it: seq_printf(p, "%3d: ", i);
(action->flags & IRQF_DISABLED) ? " +" : "",
action->name);
}
if (!sbusl) break;
if (!sbusl)
break;
k++;
if (k < 4)
action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
else {
if (k < 4) {
action = sbus_actions[(j << 5) + (sbusl << 2) + k].action;
} else {
j++;
if (j == nsbi) break;
if (j == nsbi)
break;
k = 0;
action = sbus_actions [(j << 5) + (sbusl << 2)].action;
action = sbus_actions[(j << 5) + (sbusl << 2)].action;
}
}
seq_putc(p, '\n');
......@@ -156,7 +171,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
actionp = &(sbus_actions[irq - (1 << 5)].action);
action = *actionp;
if (!action) {
printk("Trying to free free IRQ%d\n",irq);
printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
goto out_unlock;
}
if (dev_id) {
......@@ -166,19 +181,21 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
tmp = action;
}
if (!action) {
printk("Trying to free free shared IRQ%d\n",irq);
printk(KERN_ERR "Trying to free free shared IRQ%d\n",
irq);
goto out_unlock;
}
} else if (action->flags & IRQF_SHARED) {
printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n",
irq);
goto out_unlock;
}
if (action->flags & SA_STATIC_ALLOC)
{
/* This interrupt is marked as specially allocated
if (action->flags & SA_STATIC_ALLOC) {
/*
* This interrupt is marked as specially allocated
* so it is a bad idea to free it.
*/
printk("Attempt to free statically allocated IRQ%d (%s)\n",
printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name);
goto out_unlock;
}
......@@ -203,30 +220,28 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
spin_unlock_irqrestore(&irq_action_lock, flags);
}
extern void unexpected_irq(int, void *, struct pt_regs *);
void sun4d_handler_irq(int irq, struct pt_regs * regs)
void sun4d_handler_irq(int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
struct irqaction * action;
struct irqaction *action;
int cpu = smp_processor_id();
/* SBUS IRQ level (1 - 7) */
int sbusl = pil_to_sbus[irq];
int sbusl = pil_to_sbus[pil];
/* FIXME: Is this necessary?? */
cc_get_ipen();
cc_set_iclr(1 << irq);
cc_set_iclr(1 << pil);
old_regs = set_irq_regs(regs);
irq_enter();
kstat_cpu(cpu).irqs[irq]++;
kstat_cpu(cpu).irqs[pil]++;
if (!sbusl) {
action = *(irq + irq_action);
action = *(pil + irq_action);
if (!action)
unexpected_irq(irq, NULL, regs);
unexpected_irq(pil, NULL, regs);
do {
action->handler(irq, action->dev_id);
action->handler(pil, action->dev_id);
action = action->next;
} while (action);
} else {
......@@ -251,9 +266,9 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
action = actionp->action;
if (!action)
unexpected_irq(irq, NULL, regs);
unexpected_irq(pil, NULL, regs);
do {
action->handler(irq, action->dev_id);
action->handler(pil, action->dev_id);
action = action->next;
} while (action);
release_sbi(SBI2DEVID(sbino), slot);
......@@ -266,13 +281,13 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
int sun4d_request_irq(unsigned int irq,
irq_handler_t handler,
unsigned long irqflags, const char * devname, void *dev_id)
unsigned long irqflags, const char *devname, void *dev_id)
{
struct irqaction *action, *tmp = NULL, **actionp;
unsigned long flags;
int ret;
if(irq > 14 && irq < (1 << 5)) {
if (irq > 14 && irq < (1 << 5)) {
ret = -EINVAL;
goto out;
}
......@@ -292,13 +307,15 @@ int sun4d_request_irq(unsigned int irq,
if (action) {
if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
for (tmp = action; tmp->next; tmp = tmp->next);
for (tmp = action; tmp->next; tmp = tmp->next)
/* find last entry - tmp used below */;
} else {
ret = -EBUSY;
goto out_unlock;
}
if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n",
irq);
ret = -EBUSY;
goto out_unlock;
}
......@@ -312,12 +329,12 @@ int sun4d_request_irq(unsigned int irq,
if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
irq, devname);
}
if (action == NULL)
action = kmalloc(sizeof(struct irqaction),
GFP_ATOMIC);
action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
if (!action) {
ret = -ENOMEM;
......@@ -389,44 +406,6 @@ void __init sun4d_distribute_irqs(void)
{
struct device_node *dp;
#ifdef DISTRIBUTE_IRQS
cpumask_t sbus_serving_map;
sbus_serving_map = cpu_present_map;
for_each_node_by_name(dp, "sbi") {
int board = of_getintprop_default(dp, "board#", 0);
if ((board * 2) == boot_cpu_id && cpu_isset(board * 2 + 1, cpu_present_map))
sbus_tid[board] = (board * 2 + 1);
else if (cpu_isset(board * 2, cpu_present_map))
sbus_tid[board] = (board * 2);
else if (cpu_isset(board * 2 + 1, cpu_present_map))
sbus_tid[board] = (board * 2 + 1);
else
sbus_tid[board] = 0xff;
if (sbus_tid[board] != 0xff)
cpu_clear(sbus_tid[board], sbus_serving_map);
}
for_each_node_by_name(dp, "sbi") {
int board = of_getintprop_default(dp, "board#", 0);
if (sbus_tid[board] == 0xff) {
int i = 31;
if (cpus_empty(sbus_serving_map))
sbus_serving_map = cpu_present_map;
while (cpu_isset(i, sbus_serving_map))
i--;
sbus_tid[board] = i;
cpu_clear(i, sbus_serving_map);
}
}
for_each_node_by_name(dp, "sbi") {
int devid = of_getintprop_default(dp, "device-id", 0);
int board = of_getintprop_default(dp, "board#", 0);
printk("sbus%d IRQs directed to CPU%d\n", board, sbus_tid[board]);
set_sbi_tid(devid, sbus_tid[board] << 3);
}
#else
int cpuid = cpu_logical_map(1);
if (cpuid == -1)
......@@ -437,8 +416,7 @@ void __init sun4d_distribute_irqs(void)
sbus_tid[board] = cpuid;
set_sbi_tid(devid, cpuid << 3);
}
printk("All sbus IRQs directed to CPU%d\n", cpuid);
#endif
printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid);
}
#endif
......@@ -462,14 +440,61 @@ static void __init sun4d_load_profile_irqs(void)
}
}
unsigned int sun4d_build_device_irq(struct platform_device *op,
unsigned int real_irq)
{
static int pil_to_sbus[] = {
0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
};
struct device_node *dp = op->dev.of_node;
struct device_node *io_unit, *sbi = dp->parent;
const struct linux_prom_registers *regs;
int board, slot;
int sbusl;
while (sbi) {
if (!strcmp(sbi->name, "sbi"))
break;
sbi = sbi->parent;
}
if (!sbi)
goto err_out;
regs = of_get_property(dp, "reg", NULL);
if (!regs)
goto err_out;
slot = regs->which_io;
/*
* If SBI's parent is not io-unit or the io-unit lacks
* a "board#" property, something is very wrong.
*/
if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
printk("%s: Error, parent is not io-unit.\n", sbi->full_name);
goto err_out;
}
io_unit = sbi->parent;
board = of_getintprop_default(io_unit, "board#", -1);
if (board == -1) {
printk("%s: Error, lacks board# property.\n", io_unit->full_name);
goto err_out;
}
sbusl = pil_to_sbus[real_irq];
if (sbusl)
return (((board + 1) << 5) + (sbusl << 2) + slot);
err_out:
return real_irq;
}
static void __init sun4d_fixup_trap_table(void)
{
#ifdef CONFIG_SMP
unsigned long flags;
extern unsigned long lvl14_save[4];
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
extern unsigned int real_irq_entry[], smp4d_ticker[];
extern unsigned int patchme_maybe_smp_msg[];
/* Adjust so that we jump directly to smp4d_ticker */
lvl14_save[2] += smp4d_ticker - real_irq_entry;
......@@ -531,7 +556,8 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
(IRQF_DISABLED | SA_STATIC_ALLOC),
"timer", NULL);
if (err) {
prom_printf("sun4d_init_timers: request_irq() failed with %d\n", err);
prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
err);
prom_halt();
}
sun4d_load_profile_irqs();
......@@ -550,7 +576,7 @@ void __init sun4d_init_sbi_irq(void)
nsbi = 0;
for_each_node_by_name(dp, "sbi")
nsbi++;
sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
sbus_actions = kzalloc(nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
if (!sbus_actions) {
prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
prom_halt();
......@@ -566,7 +592,8 @@ void __init sun4d_init_sbi_irq(void)
/* Get rid of pending irqs from PROM */
mask = acquire_sbi(devid, 0xffffffff);
if (mask) {
printk ("Clearing pending IRQs %08x on SBI %d\n", mask, board);
printk(KERN_ERR "Clearing pending IRQs %08x on SBI %d\n",
mask, board);
release_sbi(devid, mask);
}
}
......@@ -580,7 +607,10 @@ void __init sun4d_init_IRQ(void)
BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
sparc_init_timers = sun4d_init_timers;
sparc_irq_config.init_timers = sun4d_init_timers;
sparc_irq_config.build_device_irq = sun4d_build_device_irq;
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
......
/* sun4d_smp.c: Sparc SS1000/SC2000 SMP support.
/* Sparc SS1000/SC2000 SMP support.
*
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*
......@@ -6,59 +6,23 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/head.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
#include <asm/irq_regs.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/sbi.h>
#include <asm/mmu.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/cpudata.h>
#include "kernel.h"
#include "irq.h"
#define IRQ_CROSS_CALL 15
extern ctxd_t *srmmu_ctx_table_phys;
#define IRQ_CROSS_CALL 15
static volatile int smp_processors_ready = 0;
static volatile int smp_processors_ready;
static int smp_highest_cpu;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpuinfo_sparc cpu_data[NR_CPUS];
extern unsigned char boot_cpu_id;
extern volatile int smp_process_available;
extern cpumask_t smp_commenced_mask;
extern int __smp4d_processor_id(void);
/* #define SMP_DEBUG */
#ifdef SMP_DEBUG
#define SMP_PRINTK(x) printk x
#else
#define SMP_PRINTK(x)
#endif
static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val)
{
......@@ -69,8 +33,6 @@ static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned lon
}
static void smp_setup_percpu_timer(void);
extern void cpu_probe(void);
extern void sun4d_distribute_irqs(void);
static unsigned char cpu_leds[32];
......@@ -86,7 +48,6 @@ static inline void show_leds(int cpuid)
void __cpuinit smp4d_callin(void)
{
int cpuid = hard_smp4d_processor_id();
extern spinlock_t sun4d_imsk_lock;
unsigned long flags;
/* Show we are alive */
......@@ -121,10 +82,10 @@ void __cpuinit smp4d_callin(void)
cpu_probe();
while((unsigned long)current_set[cpuid] < PAGE_OFFSET)
while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
barrier();
while(current_set[cpuid]->cpu != cpuid)
while (current_set[cpuid]->cpu != cpuid)
barrier();
/* Fix idle thread fields. */
......@@ -154,15 +115,9 @@ void __cpuinit smp4d_callin(void)
}
extern void init_IRQ(void);
extern void cpu_panic(void);
/*
* Cycle through the processors asking the PROM to start each one.
*/
extern struct linux_prom_registers smp_penguin_ctable;
void __init smp4d_boot_cpus(void)
{
if (boot_cpu_id)
......@@ -173,13 +128,12 @@ void __init smp4d_boot_cpus(void)
int __cpuinit smp4d_boot_one_cpu(int i)
{
extern unsigned long sun4d_cpu_startup;
unsigned long *entry = &sun4d_cpu_startup;
struct task_struct *p;
int timeout;
int cpu_node;
cpu_find_by_instance(i, &cpu_node,NULL);
cpu_find_by_instance(i, &cpu_node, NULL);
/* Cook up an idler for this guy. */
p = fork_idle(i);
current_set[i] = task_thread_info(p);
......@@ -194,22 +148,22 @@ int __cpuinit smp4d_boot_one_cpu(int i)
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
SMP_PRINTK(("Starting CPU %d at %p\n", i, entry));
printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
local_flush_cache_all();
prom_startcpu(cpu_node,
&smp_penguin_ctable, 0, (char *)entry);
SMP_PRINTK(("prom_startcpu returned :)\n"));
printk(KERN_INFO "prom_startcpu returned :)\n");
/* wheee... it's going... */
for(timeout = 0; timeout < 10000; timeout++) {
if(cpu_callin_map[i])
for (timeout = 0; timeout < 10000; timeout++) {
if (cpu_callin_map[i])
break;
udelay(200);
}
if (!(cpu_callin_map[i])) {
printk("Processor %d is stuck.\n", i);
printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
}
......@@ -255,14 +209,17 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
if(smp_processors_ready) {
if (smp_processors_ready) {
register int high = smp_highest_cpu;
unsigned long flags;
spin_lock_irqsave(&cross_call_lock, flags);
{
/* If you make changes here, make sure gcc generates proper code... */
/*
* If you make changes here, make sure
* gcc generates proper code...
*/
register smpfunc_t f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
......@@ -284,7 +241,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
cpu_clear(smp_processor_id(), mask);
cpus_and(mask, cpu_online_map, mask);
for(i = 0; i <= high; i++) {
for (i = 0; i <= high; i++) {
if (cpu_isset(i, mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
......@@ -300,17 +257,17 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
do {
if (!cpu_isset(i, mask))
continue;
while(!ccall_info.processors_in[i])
while (!ccall_info.processors_in[i])
barrier();
} while(++i <= high);
} while (++i <= high);
i = 0;
do {
if (!cpu_isset(i, mask))
continue;
while(!ccall_info.processors_out[i])
while (!ccall_info.processors_out[i])
barrier();
} while(++i <= high);
} while (++i <= high);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
......@@ -349,7 +306,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
profile_tick(CPU_PROFILING);
if(!--prof_counter(cpu)) {
if (!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
......@@ -361,8 +318,6 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
extern unsigned int lvl14_resolution;
static void __cpuinit smp_setup_percpu_timer(void)
{
int cpu = hard_smp4d_processor_id();
......@@ -392,7 +347,6 @@ void __init smp4d_blackbox_current(unsigned *addr)
void __init sun4d_init_smp(void)
{
int i;
extern unsigned int t_nmi[], linux_trap_ipi15_sun4d[], linux_trap_ipi15_sun4m[];
/* Patch ipi15 trap table */
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
......
/* sun4m_irq.c
* arch/sparc/kernel/sun4m_irq.c:
/*
* sun4m irq support
*
* djhr: Hacked out of irq.c into a CPU dependent version.
*
......@@ -9,97 +9,40 @@
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/psr.h>
#include <asm/vaddrs.h>
#include <asm/timer.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
#include "irq.h"
#include "kernel.h"
struct sun4m_irq_percpu {
u32 pending;
u32 clear;
u32 set;
};
struct sun4m_irq_global {
u32 pending;
u32 mask;
u32 mask_clear;
u32 mask_set;
u32 interrupt_target;
};
/* Code in entry.S needs to get at these register mappings. */
struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
struct sun4m_irq_global __iomem *sun4m_irq_global;
/* Dave Redman (djhr@tadpole.co.uk)
* The sun4m interrupt registers.
*/
#define SUN4M_INT_ENABLE 0x80000000
#define SUN4M_INT_E14 0x00000080
#define SUN4M_INT_E10 0x00080000
#define SUN4M_HARD_INT(x) (0x000000001 << (x))
#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
SUN4M_INT_M2S_WRITE_ERR | \
SUN4M_INT_ECC_ERR | \
SUN4M_INT_VME_ERR)
#define SUN4M_INT_SBUS(x) (1 << (x+7))
#define SUN4M_INT_VME(x) (1 << (x))
/* Interrupt levels used by OBP */
#define OBP_INT_LEVEL_SOFT 0x10
#define OBP_INT_LEVEL_ONBOARD 0x20
#define OBP_INT_LEVEL_SBUS 0x30
#define OBP_INT_LEVEL_VME 0x40
/* Interrupt level assignment on sun4m:
/* Sample sun4m IRQ layout:
*
* 0x22 - Power
* 0x24 - ESP SCSI
* 0x26 - Lance ethernet
* 0x2b - Floppy
* 0x2c - Zilog uart
* 0x32 - SBUS level 0
* 0x33 - Parallel port, SBUS level 1
* 0x35 - SBUS level 2
* 0x37 - SBUS level 3
* 0x39 - Audio, Graphics card, SBUS level 4
* 0x3b - SBUS level 5
* 0x3d - SBUS level 6
*
* Each interrupt source has a mask bit in the interrupt registers.
* When the mask bit is set, this blocks interrupt deliver. So you
* clear the bit to enable the interrupt.
*
* Interrupts numbered less than 0x10 are software triggered interrupts
* and unused by Linux.
*
* Interrupt level assignment on sun4m:
*
* level source
* ------------------------------------------------------------
......@@ -152,8 +95,57 @@ struct sun4m_irq_global __iomem *sun4m_irq_global;
* power: 0x22 onboard power device (XXX unknown mask bit XXX)
*/
/* Code in entry.S needs to get at these register mappings. */
struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
struct sun4m_irq_global __iomem *sun4m_irq_global;
/* Dave Redman (djhr@tadpole.co.uk)
* The sun4m interrupt registers.
*/
#define SUN4M_INT_ENABLE 0x80000000
#define SUN4M_INT_E14 0x00000080
#define SUN4M_INT_E10 0x00080000
#define SUN4M_HARD_INT(x) (0x000000001 << (x))
#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
SUN4M_INT_M2S_WRITE_ERR | \
SUN4M_INT_ECC_ERR | \
SUN4M_INT_VME_ERR)
#define SUN4M_INT_SBUS(x) (1 << (x+7))
#define SUN4M_INT_VME(x) (1 << (x))
/* Interrupt levels used by OBP */
#define OBP_INT_LEVEL_SOFT 0x10
#define OBP_INT_LEVEL_ONBOARD 0x20
#define OBP_INT_LEVEL_SBUS 0x30
#define OBP_INT_LEVEL_VME 0x40
#define SUN4M_TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
#define SUM4M_PROFILE_IRQ (OBP_INT_LEVEL_ONBOARD | 14)
static unsigned long irq_mask[0x50] = {
/* SMP */
/* 0x00 - SMP */
0, SUN4M_SOFT_INT(1),
SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
......@@ -162,7 +154,7 @@ static unsigned long irq_mask[0x50] = {
SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
/* soft */
/* 0x10 - soft */
0, SUN4M_SOFT_INT(1),
SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
......@@ -171,19 +163,19 @@ static unsigned long irq_mask[0x50] = {
SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
/* onboard */
/* 0x20 - onboard */
0, 0, 0, 0,
SUN4M_INT_SCSI, 0, SUN4M_INT_ETHERNET, 0,
SUN4M_INT_VIDEO, SUN4M_INT_MODULE,
SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY,
(SUN4M_INT_SERIAL | SUN4M_INT_KBDMS),
SUN4M_INT_AUDIO, 0, SUN4M_INT_MODULE_ERR,
/* sbus */
/* 0x30 - sbus */
0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1),
0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3),
0, SUN4M_INT_SBUS(4), 0, SUN4M_INT_SBUS(5),
0, SUN4M_INT_SBUS(6), 0, 0,
/* vme */
/* 0x40 - vme */
0, 0, SUN4M_INT_VME(0), SUN4M_INT_VME(1),
0, SUN4M_INT_VME(2), 0, SUN4M_INT_VME(3),
0, SUN4M_INT_VME(4), 0, SUN4M_INT_VME(5),
......@@ -260,7 +252,7 @@ static unsigned long cpu_pil_to_imask[16] = {
/*12*/ SUN4M_INT_SERIAL | SUN4M_INT_KBDMS,
/*13*/ SUN4M_INT_SBUS(6) | SUN4M_INT_VME(6) | SUN4M_INT_AUDIO,
/*14*/ SUN4M_INT_E14,
/*15*/ SUN4M_INT_ERROR
/*15*/ SUN4M_INT_ERROR,
};
/* We assume the caller has disabled local interrupts when these are called,
......@@ -280,12 +272,14 @@ static void sun4m_enable_pil_irq(unsigned int pil)
static void sun4m_send_ipi(int cpu, int level)
{
unsigned long mask = sun4m_get_irqmask(level);
sbus_writel(mask, &sun4m_irq_percpu[cpu]->set);
}
static void sun4m_clear_ipi(int cpu, int level)
{
unsigned long mask = sun4m_get_irqmask(level);
sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
}
......@@ -314,7 +308,6 @@ struct sun4m_timer_global {
static struct sun4m_timer_global __iomem *timers_global;
#define TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
......@@ -391,7 +384,7 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
master_l10_counter = &timers_global->l10_count;
err = request_irq(TIMER_IRQ, counter_fn,
err = request_irq(SUN4M_TIMER_IRQ, counter_fn,
(IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
if (err) {
printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n",
......@@ -407,7 +400,6 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
#ifdef CONFIG_SMP
{
unsigned long flags;
extern unsigned long lvl14_save[4];
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
/* For SMP we use the level 14 ticker, however the bootup code
......@@ -466,7 +458,9 @@ void __init sun4m_init_IRQ(void)
BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
sparc_init_timers = sun4m_init_timers;
sparc_irq_config.init_timers = sun4m_init_timers;
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
......
/* sun4m_smp.c: Sparc SUN4M SMP support.
/*
* sun4m SMP support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/head.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/irq_regs.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/cpudata.h>
#include "irq.h"
#include "kernel.h"
#define IRQ_CROSS_CALL 15
extern ctxd_t *srmmu_ctx_table_phys;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned char boot_cpu_id;
extern cpumask_t smp_commenced_mask;
extern int __smp4m_processor_id(void);
/*#define SMP_DEBUG*/
#ifdef SMP_DEBUG
#define SMP_PRINTK(x) printk x
#else
#define SMP_PRINTK(x)
#endif
static inline unsigned long
swap_ulong(volatile unsigned long *ptr, unsigned long val)
{
......@@ -64,7 +27,6 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
}
static void smp_setup_percpu_timer(void);
extern void cpu_probe(void);
void __cpuinit smp4m_callin(void)
{
......@@ -119,9 +81,6 @@ void __cpuinit smp4m_callin(void)
/*
* Cycle through the processors asking the PROM to start each one.
*/
extern struct linux_prom_registers smp_penguin_ctable;
void __init smp4m_boot_cpus(void)
{
smp_setup_percpu_timer();
......@@ -130,7 +89,6 @@ void __init smp4m_boot_cpus(void)
int __cpuinit smp4m_boot_one_cpu(int i)
{
extern unsigned long sun4m_cpu_startup;
unsigned long *entry = &sun4m_cpu_startup;
struct task_struct *p;
int timeout;
......@@ -142,7 +100,7 @@ int __cpuinit smp4m_boot_one_cpu(int i)
p = fork_idle(i);
current_set[i] = task_thread_info(p);
/* See trampoline.S for details... */
entry += ((i-1) * 3);
entry += ((i - 1) * 3);
/*
* Initialize the contexts table
......@@ -154,20 +112,19 @@ int __cpuinit smp4m_boot_one_cpu(int i)
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
printk("Starting CPU %d at %p\n", i, entry);
printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
local_flush_cache_all();
prom_startcpu(cpu_node,
&smp_penguin_ctable, 0, (char *)entry);
prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
/* wheee... it's going... */
for(timeout = 0; timeout < 10000; timeout++) {
if(cpu_callin_map[i])
for (timeout = 0; timeout < 10000; timeout++) {
if (cpu_callin_map[i])
break;
udelay(200);
}
if (!(cpu_callin_map[i])) {
printk("Processor %d is stuck.\n", i);
printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
}
......@@ -202,6 +159,7 @@ void __init smp4m_smp_done(void)
void smp4m_irq_rotate(int cpu)
{
int next = cpu_data(cpu).next;
if (next != cpu)
set_irq_udt(next);
}
......@@ -243,7 +201,7 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
cpu_clear(smp_processor_id(), mask);
cpus_and(mask, cpu_online_map, mask);
for(i = 0; i < ncpus; i++) {
for (i = 0; i < ncpus; i++) {
if (cpu_isset(i, mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
......@@ -262,19 +220,18 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
do {
if (!cpu_isset(i, mask))
continue;
while(!ccall_info.processors_in[i])
while (!ccall_info.processors_in[i])
barrier();
} while(++i < ncpus);
} while (++i < ncpus);
i = 0;
do {
if (!cpu_isset(i, mask))
continue;
while(!ccall_info.processors_out[i])
while (!ccall_info.processors_out[i])
barrier();
} while(++i < ncpus);
} while (++i < ncpus);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
}
......@@ -289,8 +246,6 @@ void smp4m_cross_call_irq(void)
ccall_info.processors_out[i] = 1;
}
extern void sun4m_clear_profile_irq(int cpu);
void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs;
......@@ -302,7 +257,7 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
profile_tick(CPU_PROFILING);
if(!--prof_counter(cpu)) {
if (!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
......@@ -314,8 +269,6 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
extern unsigned int lvl14_resolution;
static void __cpuinit smp_setup_percpu_timer(void)
{
int cpu = smp_processor_id();
......@@ -323,7 +276,7 @@ static void __cpuinit smp_setup_percpu_timer(void)
prof_counter(cpu) = prof_multiplier(cpu) = 1;
load_profile_irq(cpu, lvl14_resolution);
if(cpu == boot_cpu_id)
if (cpu == boot_cpu_id)
enable_pil_irq(14);
}
......
......@@ -360,20 +360,25 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
}
EXPORT_SYMBOL(get_fb_unmapped_area);
/* Essentially the same as PowerPC... */
void arch_pick_mmap_layout(struct mm_struct *mm)
/* Essentially the same as PowerPC. */
static unsigned long mmap_rnd(void)
{
unsigned long random_factor = 0UL;
unsigned long gap;
unsigned long rnd = 0UL;
if (current->flags & PF_RANDOMIZE) {
random_factor = get_random_int();
unsigned long val = get_random_int();
if (test_thread_flag(TIF_32BIT))
random_factor &= ((1 * 1024 * 1024) - 1);
rnd = (val % (1UL << (22UL-PAGE_SHIFT)));
else
random_factor = ((random_factor << PAGE_SHIFT) &
0xffffffffUL);
rnd = (val % (1UL << (29UL-PAGE_SHIFT)));
}
return (rnd << PAGE_SHIFT) * 2;
}
void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = mmap_rnd();
unsigned long gap;
/*
* Fall back to the standard layout if the personality
......
/* tick14.c
*
* Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
*
* This file handles the Sparc specific level14 ticker
* This is really useful for profiling OBP uses it for keyboard
* aborts and other stuff.
*/
#include <linux/kernel.h>
extern unsigned long lvl14_save[5];
static unsigned long *linux_lvl14 = NULL;
static unsigned long obp_lvl14[4];
/*
* Call with timer IRQ closed.
* First time we do it with disable_irq, later prom code uses spin_lock_irq().
*/
void install_linux_ticker(void)
{
if (!linux_lvl14)
return;
linux_lvl14[0] = lvl14_save[0];
linux_lvl14[1] = lvl14_save[1];
linux_lvl14[2] = lvl14_save[2];
linux_lvl14[3] = lvl14_save[3];
}
void install_obp_ticker(void)
{
if (!linux_lvl14)
return;
linux_lvl14[0] = obp_lvl14[0];
linux_lvl14[1] = obp_lvl14[1];
linux_lvl14[2] = obp_lvl14[2];
linux_lvl14[3] = obp_lvl14[3];
}
......@@ -219,7 +219,7 @@ static void __init sbus_time_init(void)
btfixup();
sparc_init_timers(timer_interrupt);
sparc_irq_config.init_timers(timer_interrupt);
}
void __init time_init(void)
......
......@@ -816,14 +816,12 @@ void __init time_init(void)
clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
clocksource_tick.name = tick_ops->name;
clocksource_calc_mult_shift(&clocksource_tick, freq, 4);
clocksource_tick.read = clocksource_tick_read;
clocksource_register_hz(&clocksource_tick, freq);
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);
clocksource_register(&clocksource_tick);
sparc64_clockevent.name = tick_ops->name;
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
......
......@@ -2152,7 +2152,7 @@ static void user_instruction_dump(unsigned int __user *pc)
void show_stack(struct task_struct *tsk, unsigned long *_ksp)
{
unsigned long fp, thread_base, ksp;
unsigned long fp, ksp;
struct thread_info *tp;
int count = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
......@@ -2173,7 +2173,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
flushw_all();
fp = ksp + STACK_BIAS;
thread_base = (unsigned long) tp;
printk("Call Trace:\n");
do {
......
......@@ -127,7 +127,7 @@ do_int_load:
wr %o5, 0x0, %asi
retl
mov 0, %o0
.size __do_int_load, .-__do_int_load
.size do_int_load, .-do_int_load
.section __ex_table,"a"
.word 4b, __retl_efault
......
......@@ -240,11 +240,10 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* only copy the information from the master page table,
* nothing more.
*/
code = SEGV_MAPERR;
if (!ARCH_SUN4C && address >= TASK_SIZE)
goto vmalloc_fault;
code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
......
......@@ -54,15 +54,11 @@ EXPORT_SYMBOL(prom_feval);
void
prom_cmdline(void)
{
extern void install_obp_ticker(void);
extern void install_linux_ticker(void);
unsigned long flags;
spin_lock_irqsave(&prom_lock, flags);
install_obp_ticker();
(*(romvec->pv_abort))();
restore_current();
install_linux_ticker();
spin_unlock_irqrestore(&prom_lock, flags);
set_auxio(AUXIO_LED, 0);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment