Commit 99bc4706 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6: (21 commits)
  sparc64: Initial niagara2 perf counter support.
  sparc64: Perf counter 'nop' event is not constant.
  sparc64: Provide a way to specify a perf counter overflow IRQ enable bit.
  sparc64: Provide hypervisor tracing bit support for perf counters.
  sparc64: Initial hw perf counter support.
  sparc64: Implement a real set_perf_counter_pending().
  sparc64: Use nmi_enter() and nmi_exit(), as needed.
  sparc64: Provide extern decls for sparc_??u_type strings.
  sparc64: Make touch_nmi_watchdog() actually work.
  sparc64: Kill unnecessary cast in profile_timer_exceptions_notify().
  sparc64: Manage NMI watchdog enabling like x86.
  sparc: add basic support for 'perf'
  sparc: convert /proc/io_map, /proc/dvma_map to seq_file
  sparc, leon: sparc-leon specific SRMMU initialization and bootup fixes.
  sparc,leon: Added support for AMBAPP bus.
  sparc,leon: Introduce the sparc-leon CPU type.
  sparc,leon: Redefine MMU register access asi if CONFIG_LEON
  sparc,leon: CONFIG_SPARC_LEON option and leon specific files.
  sparc64: cheaper asm/uaccess.h inclusion
  SPARC: fix duplicate declaration
  ...
parents d7e9660a cabc5c0f
...@@ -25,6 +25,7 @@ config SPARC ...@@ -25,6 +25,7 @@ config SPARC
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
select RTC_CLASS select RTC_CLASS
select RTC_DRV_M48T59 select RTC_DRV_M48T59
select HAVE_PERF_COUNTERS
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
...@@ -46,6 +47,7 @@ config SPARC64 ...@@ -46,6 +47,7 @@ config SPARC64
select RTC_DRV_BQ4802 select RTC_DRV_BQ4802
select RTC_DRV_SUN4V select RTC_DRV_SUN4V
select RTC_DRV_STARFIRE select RTC_DRV_STARFIRE
select HAVE_PERF_COUNTERS
config ARCH_DEFCONFIG config ARCH_DEFCONFIG
string string
...@@ -439,6 +441,17 @@ config SERIAL_CONSOLE ...@@ -439,6 +441,17 @@ config SERIAL_CONSOLE
If unsure, say N. If unsure, say N.
config SPARC_LEON
bool "Sparc Leon processor family"
depends on SPARC32
---help---
If you say Y here if you are running on a SPARC-LEON processor.
The LEON processor is a synthesizable VHDL model of the
SPARC-v8 standard. LEON is part of the GRLIB collection of
IP cores that are distributed under GPL. GRLIB can be downloaded
from www.gaisler.com. You can download a sparc-linux cross-compilation
toolchain at www.gaisler.com.
endmenu endmenu
menu "Bus options (PCI etc.)" menu "Bus options (PCI etc.)"
......
...@@ -38,10 +38,6 @@ CPPFLAGS_vmlinux.lds += -m32 ...@@ -38,10 +38,6 @@ CPPFLAGS_vmlinux.lds += -m32
# Actual linking is done with "make image". # Actual linking is done with "make image".
LDFLAGS_vmlinux = -r LDFLAGS_vmlinux = -r
# Default target
all: zImage
else else
##### #####
# sparc64 # sparc64
...@@ -91,6 +87,9 @@ endif ...@@ -91,6 +87,9 @@ endif
boot := arch/sparc/boot boot := arch/sparc/boot
# Default target
all: zImage
image zImage tftpboot.img vmlinux.aout: vmlinux image zImage tftpboot.img vmlinux.aout: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
...@@ -109,8 +108,9 @@ define archhelp ...@@ -109,8 +108,9 @@ define archhelp
endef endef
else else
define archhelp define archhelp
echo '* vmlinux - Standard sparc64 kernel' echo '* vmlinux - standard sparc64 kernel'
echo ' vmlinux.aout - a.out kernel for sparc64' echo '* zImage - stripped and compressed sparc64 kernel ($(boot)/zImage)'
echo ' vmlinux.aout - a.out kernel for sparc64'
echo ' tftpboot.img - image prepared for tftp' echo ' tftpboot.img - image prepared for tftp'
endef endef
endif endif
...@@ -79,6 +79,9 @@ $(obj)/image: vmlinux FORCE ...@@ -79,6 +79,9 @@ $(obj)/image: vmlinux FORCE
$(call if_changed,strip) $(call if_changed,strip)
@echo ' kernel: $@ is ready' @echo ' kernel: $@ is ready'
$(obj)/zImage: $(obj)/image
$(call if_changed,gzip)
$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE $(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
$(call if_changed,elftoaout) $(call if_changed,elftoaout)
$(call if_changed,piggy) $(call if_changed,piggy)
......
...@@ -40,7 +40,11 @@ ...@@ -40,7 +40,11 @@
#define ASI_M_UNA01 0x01 /* Same here... */ #define ASI_M_UNA01 0x01 /* Same here... */
#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */ #define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */
#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */ #define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */
#ifndef CONFIG_SPARC_LEON
#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */ #define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */
#else
#define ASI_M_MMUREGS 0x19
#endif /* CONFIG_SPARC_LEON */
#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */ #define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */
#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */ #define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */
#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */ #define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */
......
/*
* Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com) Gaisler Research
* Copyright (C) 2004 Stefan Holst (mail@s-holst.de) Uni-Stuttgart
* Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
* Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
*/
#ifndef LEON_H_INCLUDE
#define LEON_H_INCLUDE
#ifdef CONFIG_SPARC_LEON
#define ASI_LEON_NOCACHE 0x01
#define ASI_LEON_DCACHE_MISS 0x1
#define ASI_LEON_CACHEREGS 0x02
#define ASI_LEON_IFLUSH 0x10
#define ASI_LEON_DFLUSH 0x11
#define ASI_LEON_MMUFLUSH 0x18
#define ASI_LEON_MMUREGS 0x19
#define ASI_LEON_BYPASS 0x1c
#define ASI_LEON_FLUSH_PAGE 0x10
/* mmu register access, ASI_LEON_MMUREGS */
#define LEON_CNR_CTRL 0x000
#define LEON_CNR_CTXP 0x100
#define LEON_CNR_CTX 0x200
#define LEON_CNR_F 0x300
#define LEON_CNR_FADDR 0x400
#define LEON_CNR_CTX_NCTX 256 /*number of MMU ctx */
#define LEON_CNR_CTRL_TLBDIS 0x80000000
#define LEON_MMUTLB_ENT_MAX 64
/*
* diagnostic access from mmutlb.vhd:
* 0: pte address
* 4: pte
* 8: additional flags
*/
#define LEON_DIAGF_LVL 0x3
#define LEON_DIAGF_WR 0x8
#define LEON_DIAGF_WR_SHIFT 3
#define LEON_DIAGF_HIT 0x10
#define LEON_DIAGF_HIT_SHIFT 4
#define LEON_DIAGF_CTX 0x1fe0
#define LEON_DIAGF_CTX_SHIFT 5
#define LEON_DIAGF_VALID 0x2000
#define LEON_DIAGF_VALID_SHIFT 13
/*
* Interrupt Sources
*
* The interrupt source numbers directly map to the trap type and to
* the bits used in the Interrupt Clear, Interrupt Force, Interrupt Mask,
* and the Interrupt Pending Registers.
*/
#define LEON_INTERRUPT_CORRECTABLE_MEMORY_ERROR 1
#define LEON_INTERRUPT_UART_1_RX_TX 2
#define LEON_INTERRUPT_UART_0_RX_TX 3
#define LEON_INTERRUPT_EXTERNAL_0 4
#define LEON_INTERRUPT_EXTERNAL_1 5
#define LEON_INTERRUPT_EXTERNAL_2 6
#define LEON_INTERRUPT_EXTERNAL_3 7
#define LEON_INTERRUPT_TIMER1 8
#define LEON_INTERRUPT_TIMER2 9
#define LEON_INTERRUPT_EMPTY1 10
#define LEON_INTERRUPT_EMPTY2 11
#define LEON_INTERRUPT_OPEN_ETH 12
#define LEON_INTERRUPT_EMPTY4 13
#define LEON_INTERRUPT_EMPTY5 14
#define LEON_INTERRUPT_EMPTY6 15
/* irq masks */
#define LEON_HARD_INT(x) (1 << (x)) /* irq 0-15 */
#define LEON_IRQMASK_R 0x0000fffe /* bit 15- 1 of lregs.irqmask */
#define LEON_IRQPRIO_R 0xfffe0000 /* bit 31-17 of lregs.irqmask */
/* leon uart register definitions */
#define LEON_OFF_UDATA 0x0
#define LEON_OFF_USTAT 0x4
#define LEON_OFF_UCTRL 0x8
#define LEON_OFF_USCAL 0xc
#define LEON_UCTRL_RE 0x01
#define LEON_UCTRL_TE 0x02
#define LEON_UCTRL_RI 0x04
#define LEON_UCTRL_TI 0x08
#define LEON_UCTRL_PS 0x10
#define LEON_UCTRL_PE 0x20
#define LEON_UCTRL_FL 0x40
#define LEON_UCTRL_LB 0x80
#define LEON_USTAT_DR 0x01
#define LEON_USTAT_TS 0x02
#define LEON_USTAT_TH 0x04
#define LEON_USTAT_BR 0x08
#define LEON_USTAT_OV 0x10
#define LEON_USTAT_PE 0x20
#define LEON_USTAT_FE 0x40
#define LEON_MCFG2_SRAMDIS 0x00002000
#define LEON_MCFG2_SDRAMEN 0x00004000
#define LEON_MCFG2_SRAMBANKSZ 0x00001e00 /* [12-9] */
#define LEON_MCFG2_SRAMBANKSZ_SHIFT 9
#define LEON_MCFG2_SDRAMBANKSZ 0x03800000 /* [25-23] */
#define LEON_MCFG2_SDRAMBANKSZ_SHIFT 23
#define LEON_TCNT0_MASK 0x7fffff
#define LEON_USTAT_ERROR (LEON_USTAT_OV | LEON_USTAT_PE | LEON_USTAT_FE)
/* no break yet */
#define ASI_LEON3_SYSCTRL 0x02
#define ASI_LEON3_SYSCTRL_ICFG 0x08
#define ASI_LEON3_SYSCTRL_DCFG 0x0c
#define ASI_LEON3_SYSCTRL_CFG_SNOOPING (1 << 27)
#define ASI_LEON3_SYSCTRL_CFG_SSIZE(c) (1 << ((c >> 20) & 0xf))
#ifndef __ASSEMBLY__
/* do a virtual address read without cache */
static inline unsigned long leon_readnobuffer_reg(unsigned long paddr)
{
unsigned long retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE));
return retval;
}
/* do a physical address bypass write, i.e. for 0x80000000 */
static inline void leon_store_reg(unsigned long paddr, unsigned long value)
{
__asm__ __volatile__("sta %0, [%1] %2\n\t" : : "r"(value), "r"(paddr),
"i"(ASI_LEON_BYPASS) : "memory");
}
/* do a physical address bypass load, i.e. for 0x80000000 */
static inline unsigned long leon_load_reg(unsigned long paddr)
{
unsigned long retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r"(retval) : "r"(paddr), "i"(ASI_LEON_BYPASS));
return retval;
}
extern inline void leon_srmmu_disabletlb(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
"i"(ASI_LEON_MMUREGS));
retval |= LEON_CNR_CTRL_TLBDIS;
__asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
"i"(ASI_LEON_MMUREGS) : "memory");
}
extern inline void leon_srmmu_enabletlb(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
"i"(ASI_LEON_MMUREGS));
retval = retval & ~LEON_CNR_CTRL_TLBDIS;
__asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
"i"(ASI_LEON_MMUREGS) : "memory");
}
/* macro access for leon_load_reg() and leon_store_reg() */
#define LEON3_BYPASS_LOAD_PA(x) (leon_load_reg((unsigned long)(x)))
#define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v)))
#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v)
#define LEON3_BYPASS_ORIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v)
#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
#define LEON_REGLOAD_PA(x) leon_load_reg((unsigned long)(x)+LEON_PREGS)
#define LEON_REGSTORE_PA(x, v) leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v))
#define LEON_REGSTORE_OR_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v))
#define LEON_REGSTORE_AND_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v))
/* macro access for leon_readnobuffer_reg() */
#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
extern void sparc_leon_eirq_register(int eirq);
extern void leon_init(void);
extern void leon_switch_mm(void);
extern void leon_init_IRQ(void);
extern unsigned long last_valid_pfn;
extern inline unsigned long sparc_leon3_get_dcachecfg(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r"(retval) :
"r"(ASI_LEON3_SYSCTRL_DCFG),
"i"(ASI_LEON3_SYSCTRL));
return retval;
}
/* enable snooping */
extern inline void sparc_leon3_enable_snooping(void)
{
__asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t"
"set 0x800000, %%l2\n\t"
"or %%l2, %%l1, %%l2\n\t"
"sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2");
};
extern inline void sparc_leon3_disable_cache(void)
{
__asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t"
"set 0x00000f, %%l2\n\t"
"andn %%l2, %%l1, %%l2\n\t"
"sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2");
};
#endif /*!__ASSEMBLY__*/
#ifdef CONFIG_SMP
# define LEON3_IRQ_RESCHEDULE 13
# define LEON3_IRQ_TICKER (leon_percpu_timer_dev[0].irq)
# define LEON3_IRQ_CROSS_CALL 15
#endif
#if defined(PAGE_SIZE_LEON_8K)
#define LEON_PAGE_SIZE_LEON 1
#elif defined(PAGE_SIZE_LEON_16K)
#define LEON_PAGE_SIZE_LEON 2)
#else
#define LEON_PAGE_SIZE_LEON 0
#endif
#if LEON_PAGE_SIZE_LEON == 0
/* [ 8, 6, 6 ] + 12 */
#define LEON_PGD_SH 24
#define LEON_PGD_M 0xff
#define LEON_PMD_SH 18
#define LEON_PMD_SH_V (LEON_PGD_SH-2)
#define LEON_PMD_M 0x3f
#define LEON_PTE_SH 12
#define LEON_PTE_M 0x3f
#elif LEON_PAGE_SIZE_LEON == 1
/* [ 7, 6, 6 ] + 13 */
#define LEON_PGD_SH 25
#define LEON_PGD_M 0x7f
#define LEON_PMD_SH 19
#define LEON_PMD_SH_V (LEON_PGD_SH-1)
#define LEON_PMD_M 0x3f
#define LEON_PTE_SH 13
#define LEON_PTE_M 0x3f
#elif LEON_PAGE_SIZE_LEON == 2
/* [ 6, 6, 6 ] + 14 */
#define LEON_PGD_SH 26
#define LEON_PGD_M 0x3f
#define LEON_PMD_SH 20
#define LEON_PMD_SH_V (LEON_PGD_SH-0)
#define LEON_PMD_M 0x3f
#define LEON_PTE_SH 14
#define LEON_PTE_M 0x3f
#elif LEON_PAGE_SIZE_LEON == 3
/* [ 4, 7, 6 ] + 15 */
#define LEON_PGD_SH 28
#define LEON_PGD_M 0x0f
#define LEON_PMD_SH 21
#define LEON_PMD_SH_V (LEON_PGD_SH-0)
#define LEON_PMD_M 0x7f
#define LEON_PTE_SH 15
#define LEON_PTE_M 0x3f
#else
#error cannot determine LEON_PAGE_SIZE_LEON
#endif
#define PAGE_MIN_SHIFT (12)
#define PAGE_MIN_SIZE (1UL << PAGE_MIN_SHIFT)
#define LEON3_XCCR_SETS_MASK 0x07000000UL
#define LEON3_XCCR_SSIZE_MASK 0x00f00000UL
#define LEON2_CCR_DSETS_MASK 0x03000000UL
#define LEON2_CFG_SSIZE_MASK 0x00007000UL
#ifndef __ASSEMBLY__
extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
extern void leon_flush_icache_all(void);
extern void leon_flush_dcache_all(void);
extern void leon_flush_cache_all(void);
extern void leon_flush_tlb_all(void);
extern int leon_flush_during_switch;
extern int leon_flush_needed(void);
struct vm_area_struct;
extern void leon_flush_icache_all(void);
extern void leon_flush_dcache_all(void);
extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
extern void leon_flush_cache_all(void);
extern void leon_flush_tlb_all(void);
extern int leon_flush_during_switch;
extern int leon_flush_needed(void);
extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
/* struct that hold LEON3 cache configuration registers */
struct leon3_cacheregs {
unsigned long ccr; /* 0x00 - Cache Control Register */
unsigned long iccr; /* 0x08 - Instruction Cache Configuration Register */
unsigned long dccr; /* 0x0c - Data Cache Configuration Register */
};
/* struct that hold LEON2 cache configuration register
* & configuration register
*/
struct leon2_cacheregs {
unsigned long ccr, cfg;
};
#ifdef __KERNEL__
#include <linux/interrupt.h>
struct device_node;
extern int sparc_leon_eirq_get(int eirq, int cpu);
extern irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id);
extern void sparc_leon_eirq_register(int eirq);
extern void leon_clear_clock_irq(void);
extern void leon_load_profile_irq(int cpu, unsigned int limit);
extern void leon_init_timers(irq_handler_t counter_fn);
extern void leon_clear_clock_irq(void);
extern void leon_load_profile_irq(int cpu, unsigned int limit);
extern void leon_trans_init(struct device_node *dp);
extern void leon_node_init(struct device_node *dp, struct device_node ***nextp);
extern void leon_init_IRQ(void);
extern void leon_init(void);
extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
extern void init_leon(void);
extern void poke_leonsparc(void);
extern void leon3_getCacheRegs(struct leon3_cacheregs *regs);
extern int leon_flush_needed(void);
extern void leon_switch_mm(void);
extern int srmmu_swprobe_trace;
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
/* macros used in leon_mm.c */
#define PFN(x) ((x) >> PAGE_SHIFT)
#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
#define _SRMMU_PTE_PMASK_LEON 0xffffffff
#else /* defined(CONFIG_SPARC_LEON) */
/* nop definitions for !LEON case */
#define leon_init() do {} while (0)
#define leon_switch_mm() do {} while (0)
#define leon_init_IRQ() do {} while (0)
#define init_leon() do {} while (0)
#endif /* !defined(CONFIG_SPARC_LEON) */
#endif
/*
*Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com), Gaisler Research
*Copyright (C) 2004 Stefan Holst (mail@s-holst.de), Uni-Stuttgart
*Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com),Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
*/
#ifndef LEON_AMBA_H_INCLUDE
#define LEON_AMBA_H_INCLUDE
#ifndef __ASSEMBLY__
struct amba_prom_registers {
unsigned int phys_addr; /* The physical address of this register */
unsigned int reg_size; /* How many bytes does this register take up? */
};
#endif
/*
* The following defines the bits in the LEON UART Status Registers.
*/
#define LEON_REG_UART_STATUS_DR 0x00000001 /* Data Ready */
#define LEON_REG_UART_STATUS_TSE 0x00000002 /* TX Send Register Empty */
#define LEON_REG_UART_STATUS_THE 0x00000004 /* TX Hold Register Empty */
#define LEON_REG_UART_STATUS_BR 0x00000008 /* Break Error */
#define LEON_REG_UART_STATUS_OE 0x00000010 /* RX Overrun Error */
#define LEON_REG_UART_STATUS_PE 0x00000020 /* RX Parity Error */
#define LEON_REG_UART_STATUS_FE 0x00000040 /* RX Framing Error */
#define LEON_REG_UART_STATUS_ERR 0x00000078 /* Error Mask */
/*
* The following defines the bits in the LEON UART Ctrl Registers.
*/
#define LEON_REG_UART_CTRL_RE 0x00000001 /* Receiver enable */
#define LEON_REG_UART_CTRL_TE 0x00000002 /* Transmitter enable */
#define LEON_REG_UART_CTRL_RI 0x00000004 /* Receiver interrupt enable */
#define LEON_REG_UART_CTRL_TI 0x00000008 /* Transmitter irq */
#define LEON_REG_UART_CTRL_PS 0x00000010 /* Parity select */
#define LEON_REG_UART_CTRL_PE 0x00000020 /* Parity enable */
#define LEON_REG_UART_CTRL_FL 0x00000040 /* Flow control enable */
#define LEON_REG_UART_CTRL_LB 0x00000080 /* Loop Back enable */
#define LEON3_GPTIMER_EN 1
#define LEON3_GPTIMER_RL 2
#define LEON3_GPTIMER_LD 4
#define LEON3_GPTIMER_IRQEN 8
#define LEON3_GPTIMER_SEPIRQ 8
#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */
/* 0 = hold scalar and counter */
#define LEON23_REG_TIMER_CONTROL_RL 0x00000002 /* 1 = reload at 0 */
/* 0 = stop at 0 */
#define LEON23_REG_TIMER_CONTROL_LD 0x00000004 /* 1 = load counter */
/* 0 = no function */
#define LEON23_REG_TIMER_CONTROL_IQ 0x00000008 /* 1 = irq enable */
/* 0 = no function */
/*
* The following defines the bits in the LEON PS/2 Status Registers.
*/
#define LEON_REG_PS2_STATUS_DR 0x00000001 /* Data Ready */
#define LEON_REG_PS2_STATUS_PE 0x00000002 /* Parity error */
#define LEON_REG_PS2_STATUS_FE 0x00000004 /* Framing error */
#define LEON_REG_PS2_STATUS_KI 0x00000008 /* Keyboard inhibit */
#define LEON_REG_PS2_STATUS_RF 0x00000010 /* RX buffer full */
#define LEON_REG_PS2_STATUS_TF 0x00000020 /* TX buffer full */
/*
* The following defines the bits in the LEON PS/2 Ctrl Registers.
*/
#define LEON_REG_PS2_CTRL_RE 0x00000001 /* Receiver enable */
#define LEON_REG_PS2_CTRL_TE 0x00000002 /* Transmitter enable */
#define LEON_REG_PS2_CTRL_RI 0x00000004 /* Keyboard receive irq */
#define LEON_REG_PS2_CTRL_TI 0x00000008 /* Keyboard transmit irq */
#define LEON3_IRQMPSTATUS_CPUNR 28
#define LEON3_IRQMPSTATUS_BROADCAST 27
#define GPTIMER_CONFIG_IRQNT(a) (((a) >> 3) & 0x1f)
#define GPTIMER_CONFIG_ISSEP(a) ((a) & (1 << 8))
#define GPTIMER_CONFIG_NTIMERS(a) ((a) & (0x7))
#define LEON3_GPTIMER_CTRL_PENDING 0x10
#define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7)
#define LEON3_GPTIMER_CTRL_ISPENDING(r) (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0)
#ifdef CONFIG_SPARC_LEON
#ifndef __ASSEMBLY__
struct leon3_irqctrl_regs_map {
u32 ilevel;
u32 ipend;
u32 iforce;
u32 iclear;
u32 mpstatus;
u32 mpbroadcast;
u32 notused02;
u32 notused03;
u32 notused10;
u32 notused11;
u32 notused12;
u32 notused13;
u32 notused20;
u32 notused21;
u32 notused22;
u32 notused23;
u32 mask[16];
u32 force[16];
/* Extended IRQ registers */
u32 intid[16]; /* 0xc0 */
};
struct leon3_apbuart_regs_map {
u32 data;
u32 status;
u32 ctrl;
u32 scaler;
};
struct leon3_gptimerelem_regs_map {
u32 val;
u32 rld;
u32 ctrl;
u32 unused;
};
struct leon3_gptimer_regs_map {
u32 scalar;
u32 scalar_reload;
u32 config;
u32 unused;
struct leon3_gptimerelem_regs_map e[8];
};
/*
* Types and structure used for AMBA Plug & Play bus scanning
*/
#define AMBA_MAXAPB_DEVS 64
#define AMBA_MAXAPB_DEVS_PERBUS 16
struct amba_device_table {
int devnr; /* number of devices on AHB or APB bus */
unsigned int *addr[16]; /* addresses to the devices configuration tables */
unsigned int allocbits[1]; /* 0=unallocated, 1=allocated driver */
};
struct amba_apbslv_device_table {
int devnr; /* number of devices on AHB or APB bus */
unsigned int *addr[AMBA_MAXAPB_DEVS]; /* addresses to the devices configuration tables */
unsigned int apbmst[AMBA_MAXAPB_DEVS]; /* apb master if a entry is a apb slave */
unsigned int apbmstidx[AMBA_MAXAPB_DEVS]; /* apb master idx if a entry is a apb slave */
unsigned int allocbits[4]; /* 0=unallocated, 1=allocated driver */
};
struct amba_confarea_type {
struct amba_confarea_type *next;/* next bus in chain */
struct amba_device_table ahbmst;
struct amba_device_table ahbslv;
struct amba_apbslv_device_table apbslv;
unsigned int apbmst;
};
/* collect apb slaves */
struct amba_apb_device {
unsigned int start, irq, bus_id;
struct amba_confarea_type *bus;
};
/* collect ahb slaves */
struct amba_ahb_device {
unsigned int start[4], irq, bus_id;
struct amba_confarea_type *bus;
};
struct device_node;
void _amba_init(struct device_node *dp, struct device_node ***nextp);
extern struct leon3_irqctrl_regs_map *leon3_irqctrl_regs;
extern struct leon3_gptimer_regs_map *leon3_gptimer_regs;
extern struct amba_apb_device leon_percpu_timer_dev[16];
extern int leondebug_irq_disable;
extern int leon_debug_irqout;
extern unsigned long leon3_gptimer_irq;
extern unsigned int sparc_leon_eirq;
#endif /* __ASSEMBLY__ */
#define LEON3_IO_AREA 0xfff00000
#define LEON3_CONF_AREA 0xff000
#define LEON3_AHB_SLAVE_CONF_AREA (1 << 11)
#define LEON3_AHB_CONF_WORDS 8
#define LEON3_APB_CONF_WORDS 2
#define LEON3_AHB_MASTERS 16
#define LEON3_AHB_SLAVES 16
#define LEON3_APB_SLAVES 16
#define LEON3_APBUARTS 8
/* Vendor codes */
#define VENDOR_GAISLER 1
#define VENDOR_PENDER 2
#define VENDOR_ESA 4
#define VENDOR_OPENCORES 8
/* Gaisler Research device id's */
#define GAISLER_LEON3 0x003
#define GAISLER_LEON3DSU 0x004
#define GAISLER_ETHAHB 0x005
#define GAISLER_APBMST 0x006
#define GAISLER_AHBUART 0x007
#define GAISLER_SRCTRL 0x008
#define GAISLER_SDCTRL 0x009
#define GAISLER_APBUART 0x00C
#define GAISLER_IRQMP 0x00D
#define GAISLER_AHBRAM 0x00E
#define GAISLER_GPTIMER 0x011
#define GAISLER_PCITRG 0x012
#define GAISLER_PCISBRG 0x013
#define GAISLER_PCIFBRG 0x014
#define GAISLER_PCITRACE 0x015
#define GAISLER_PCIDMA 0x016
#define GAISLER_AHBTRACE 0x017
#define GAISLER_ETHDSU 0x018
#define GAISLER_PIOPORT 0x01A
#define GAISLER_GRGPIO 0x01A
#define GAISLER_AHBJTAG 0x01c
#define GAISLER_ETHMAC 0x01D
#define GAISLER_AHB2AHB 0x020
#define GAISLER_USBDC 0x021
#define GAISLER_ATACTRL 0x024
#define GAISLER_DDRSPA 0x025
#define GAISLER_USBEHC 0x026
#define GAISLER_USBUHC 0x027
#define GAISLER_I2CMST 0x028
#define GAISLER_SPICTRL 0x02D
#define GAISLER_DDR2SPA 0x02E
#define GAISLER_SPIMCTRL 0x045
#define GAISLER_LEON4 0x048
#define GAISLER_LEON4DSU 0x049
#define GAISLER_AHBSTAT 0x052
#define GAISLER_FTMCTRL 0x054
#define GAISLER_KBD 0x060
#define GAISLER_VGA 0x061
#define GAISLER_SVGA 0x063
#define GAISLER_GRSYSMON 0x066
#define GAISLER_GRACECTRL 0x067
#define GAISLER_L2TIME 0xffd /* internal device: leon2 timer */
#define GAISLER_L2C 0xffe /* internal device: leon2compat */
#define GAISLER_PLUGPLAY 0xfff /* internal device: plug & play configarea */
#define amba_vendor(x) (((x) >> 24) & 0xff)
#define amba_device(x) (((x) >> 12) & 0xfff)
#endif /* !defined(CONFIG_SPARC_LEON) */
#endif
...@@ -15,7 +15,7 @@ struct Sun_Machine_Models { ...@@ -15,7 +15,7 @@ struct Sun_Machine_Models {
/* Current number of machines we know about that has an IDPROM /* Current number of machines we know about that has an IDPROM
* machtype entry including one entry for the 0x80 OBP machines. * machtype entry including one entry for the 0x80 OBP machines.
*/ */
#define NUM_SUN_MACHINES 15 #define NUM_SUN_MACHINES 16
/* The machine type in the idprom area looks like this: /* The machine type in the idprom area looks like this:
* *
...@@ -30,6 +30,7 @@ struct Sun_Machine_Models { ...@@ -30,6 +30,7 @@ struct Sun_Machine_Models {
#define SM_ARCH_MASK 0xf0 #define SM_ARCH_MASK 0xf0
#define SM_SUN4 0x20 #define SM_SUN4 0x20
#define M_LEON 0x30
#define SM_SUN4C 0x50 #define SM_SUN4C 0x50
#define SM_SUN4M 0x70 #define SM_SUN4M 0x70
#define SM_SUN4M_OBP 0x80 #define SM_SUN4M_OBP 0x80
...@@ -41,6 +42,9 @@ struct Sun_Machine_Models { ...@@ -41,6 +42,9 @@ struct Sun_Machine_Models {
#define SM_4_330 0x03 /* Sun 4/300 series */ #define SM_4_330 0x03 /* Sun 4/300 series */
#define SM_4_470 0x04 /* Sun 4/400 series */ #define SM_4_470 0x04 /* Sun 4/400 series */
/* Leon machines */
#define M_LEON3_SOC 0x02 /* Leon3 SoC */
/* Sun4c machines Full Name - PROM NAME */ /* Sun4c machines Full Name - PROM NAME */
#define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */ #define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */
#define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */ #define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */
......
...@@ -5,6 +5,9 @@ extern int __init nmi_init(void); ...@@ -5,6 +5,9 @@ extern int __init nmi_init(void);
extern void perfctr_irq(int irq, struct pt_regs *regs); extern void perfctr_irq(int irq, struct pt_regs *regs);
extern void nmi_adjust_hz(unsigned int new_hz); extern void nmi_adjust_hz(unsigned int new_hz);
extern int nmi_usable; extern atomic_t nmi_active;
extern void start_nmi_watchdog(void *unused);
extern void stop_nmi_watchdog(void *unused);
#endif /* __NMI_H */ #endif /* __NMI_H */
#ifndef __ASM_SPARC_PERF_COUNTER_H
#define __ASM_SPARC_PERF_COUNTER_H
extern void set_perf_counter_pending(void);
#define PERF_COUNTER_INDEX_OFFSET 0
#ifdef CONFIG_PERF_COUNTERS
extern void init_hw_perf_counters(void);
#else
static inline void init_hw_perf_counters(void) { }
#endif
#endif
...@@ -267,6 +267,7 @@ static inline void srmmu_flush_tlb_page(unsigned long page) ...@@ -267,6 +267,7 @@ static inline void srmmu_flush_tlb_page(unsigned long page)
} }
#ifndef CONFIG_SPARC_LEON
static inline unsigned long srmmu_hwprobe(unsigned long vaddr) static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
{ {
unsigned long retval; unsigned long retval;
...@@ -278,6 +279,9 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr) ...@@ -278,6 +279,9 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
return retval; return retval;
} }
#else
#define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK)
#endif
static inline int static inline int
srmmu_get_pte (unsigned long addr) srmmu_get_pte (unsigned long addr)
......
...@@ -118,5 +118,8 @@ extern struct device_node *of_console_device; ...@@ -118,5 +118,8 @@ extern struct device_node *of_console_device;
extern char *of_console_path; extern char *of_console_path;
extern char *of_console_options; extern char *of_console_options;
extern void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp);
extern char *build_full_name(struct device_node *dp);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _SPARC_PROM_H */ #endif /* _SPARC_PROM_H */
...@@ -32,6 +32,7 @@ enum sparc_cpu { ...@@ -32,6 +32,7 @@ enum sparc_cpu {
sun4u = 0x05, /* V8 ploos ploos */ sun4u = 0x05, /* V8 ploos ploos */
sun_unknown = 0x06, sun_unknown = 0x06,
ap1000 = 0x07, /* almost a sun4m */ ap1000 = 0x07, /* almost a sun4m */
sparc_leon = 0x08, /* Leon SoC */
}; };
/* Really, userland should not be looking at any of this... */ /* Really, userland should not be looking at any of this... */
......
...@@ -29,6 +29,10 @@ enum sparc_cpu { ...@@ -29,6 +29,10 @@ enum sparc_cpu {
/* This cannot ever be a sun4c :) That's just history. */ /* This cannot ever be a sun4c :) That's just history. */
#define ARCH_SUN4C 0 #define ARCH_SUN4C 0
extern const char *sparc_cpu_type;
extern const char *sparc_fpu_type;
extern const char *sparc_pmu_type;
extern char reboot_command[]; extern char reboot_command[];
/* These are here in an effort to more fully work around Spitfire Errata /* These are here in an effort to more fully work around Spitfire Errata
......
...@@ -8,9 +8,8 @@ ...@@ -8,9 +8,8 @@
* need to be careful to avoid a name clashes. * need to be careful to avoid a name clashes.
*/ */
#if defined(__sparc__) && defined(__arch64__) #if defined(__sparc__)
/*** SPARC 64 bit ***/
#include <asm-generic/int-ll64.h> #include <asm-generic/int-ll64.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -26,33 +25,21 @@ typedef unsigned short umode_t; ...@@ -26,33 +25,21 @@ typedef unsigned short umode_t;
/* Dma addresses come in generic and 64-bit flavours. */ /* Dma addresses come in generic and 64-bit flavours. */
typedef u32 dma_addr_t; typedef u32 dma_addr_t;
typedef u64 dma64_addr_t;
#endif /* __ASSEMBLY__ */ #if defined(__arch64__)
#endif /* __KERNEL__ */ /*** SPARC 64 bit ***/
typedef u64 dma64_addr_t;
#else #else
/*** SPARC 32 bit ***/ /*** SPARC 32 bit ***/
#include <asm-generic/int-ll64.h>
#ifndef __ASSEMBLY__
typedef unsigned short umode_t;
#endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
typedef u32 dma_addr_t;
typedef u32 dma64_addr_t; typedef u32 dma64_addr_t;
#endif /* defined(__arch64__) */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* defined(__sparc__) && defined(__arch64__) */ #endif /* defined(__sparc__) */
#endif /* defined(_SPARC_TYPES_H) */ #endif /* defined(_SPARC_TYPES_H) */
...@@ -7,8 +7,8 @@ ...@@ -7,8 +7,8 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/thread_info.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
......
...@@ -395,8 +395,9 @@ ...@@ -395,8 +395,9 @@
#define __NR_preadv 324 #define __NR_preadv 324
#define __NR_pwritev 325 #define __NR_pwritev 325
#define __NR_rt_tgsigqueueinfo 326 #define __NR_rt_tgsigqueueinfo 326
#define __NR_perf_counter_open 327
#define NR_SYSCALLS 327 #define NR_SYSCALLS 328
#ifdef __32bit_syscall_numbers__ #ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
......
...@@ -41,6 +41,8 @@ obj-y += of_device_common.o ...@@ -41,6 +41,8 @@ obj-y += of_device_common.o
obj-y += of_device_$(BITS).o obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o obj-$(CONFIG_SPARC64) += prom_irqtrans.o
obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
obj-$(CONFIG_SPARC64) += reboot.o obj-$(CONFIG_SPARC64) += reboot.o
obj-$(CONFIG_SPARC64) += sysfs.o obj-$(CONFIG_SPARC64) += sysfs.o
obj-$(CONFIG_SPARC64) += iommu.o obj-$(CONFIG_SPARC64) += iommu.o
...@@ -101,3 +103,6 @@ obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o ...@@ -101,3 +103,6 @@ obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o
obj-$(CONFIG_AUDIT) += audit.o obj-$(CONFIG_AUDIT) += audit.o
audit--$(CONFIG_AUDIT) := compat_audit.o audit--$(CONFIG_AUDIT) := compat_audit.o
obj-$(CONFIG_COMPAT) += $(audit--y) obj-$(CONFIG_COMPAT) += $(audit--y)
pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o
obj-$(CONFIG_SPARC64) += $(pc--y)
...@@ -312,7 +312,12 @@ void __cpuinit cpu_probe(void) ...@@ -312,7 +312,12 @@ void __cpuinit cpu_probe(void)
psr = get_psr(); psr = get_psr();
put_psr(psr | PSR_EF); put_psr(psr | PSR_EF);
#ifdef CONFIG_SPARC_LEON
fpu_vers = 7;
#else
fpu_vers = ((get_fsr() >> 17) & 0x7); fpu_vers = ((get_fsr() >> 17) & 0x7);
#endif
put_psr(psr); put_psr(psr);
set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers); set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers);
......
...@@ -809,6 +809,11 @@ found_version: ...@@ -809,6 +809,11 @@ found_version:
nop nop
got_prop: got_prop:
#ifdef CONFIG_SPARC_LEON
/* no cpu-type check is needed, it is a SPARC-LEON */
ba sun4c_continue_boot
nop
#endif
set cputypval, %o2 set cputypval, %o2
ldub [%o2 + 0x4], %l1 ldub [%o2 + 0x4], %l1
......
...@@ -31,6 +31,8 @@ static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = { ...@@ -31,6 +31,8 @@ static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
{ .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, { .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) },
{ .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, { .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) },
{ .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, { .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) },
/* Now Leon */
{ .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) },
/* Now, Sun4c's */ /* Now, Sun4c's */
{ .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, { .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) },
{ .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, { .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) },
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pci.h> /* struct pci_dev */ #include <linux/pci.h> /* struct pci_dev */
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/of_device.h> #include <linux/of_device.h>
...@@ -683,26 +684,33 @@ EXPORT_SYMBOL(dma_set_mask); ...@@ -683,26 +684,33 @@ EXPORT_SYMBOL(dma_set_mask);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static int static int sparc_io_proc_show(struct seq_file *m, void *v)
_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
void *data)
{ {
char *p = buf, *e = buf + length; struct resource *root = m->private, *r;
struct resource *r;
const char *nm; const char *nm;
for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { for (r = root->child; r != NULL; r = r->sibling) {
if (p + 32 >= e) /* Better than nothing */
break;
if ((nm = r->name) == 0) nm = "???"; if ((nm = r->name) == 0) nm = "???";
p += sprintf(p, "%016llx-%016llx: %s\n", seq_printf(m, "%016llx-%016llx: %s\n",
(unsigned long long)r->start, (unsigned long long)r->start,
(unsigned long long)r->end, nm); (unsigned long long)r->end, nm);
} }
return p-buf; return 0;
} }
static int sparc_io_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, sparc_io_proc_show, PDE(inode)->data);
}
static const struct file_operations sparc_io_proc_fops = {
.owner = THIS_MODULE,
.open = sparc_io_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
/* /*
...@@ -727,7 +735,7 @@ static struct resource *_sparc_find_resource(struct resource *root, ...@@ -727,7 +735,7 @@ static struct resource *_sparc_find_resource(struct resource *root,
static void register_proc_sparc_ioport(void) static void register_proc_sparc_ioport(void)
{ {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap);
create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma);
#endif #endif
} }
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <asm/pcic.h> #include <asm/pcic.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/leon.h>
#include "kernel.h" #include "kernel.h"
#include "irq.h" #include "irq.h"
...@@ -661,6 +662,10 @@ void __init init_IRQ(void) ...@@ -661,6 +662,10 @@ void __init init_IRQ(void)
sun4d_init_IRQ(); sun4d_init_IRQ();
break; break;
case sparc_leon:
leon_init_IRQ();
break;
default: default:
prom_printf("Cannot initialize IRQs on this Sun machine..."); prom_printf("Cannot initialize IRQs on this Sun machine...");
break; break;
......
/*
* Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
* Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
#include <asm/oplib.h>
#include <asm/timer.h>
#include <asm/prom.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include "prom.h"
#include "irq.h"
struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address, initialized by amba_init() */
struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address, initialized by amba_init() */
struct amba_apb_device leon_percpu_timer_dev[16];
int leondebug_irq_disable;
int leon_debug_irqout;
static int dummy_master_l10_counter;
unsigned long leon3_gptimer_irq; /* interrupt controller irq number, initialized by amba_init() */
unsigned int sparc_leon_eirq;
#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0]))
/* Return the IRQ of the pending IRQ on the extended IRQ controller */
int sparc_leon_eirq_get(int eirq, int cpu)
{
return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f;
}
irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id)
{
printk(KERN_ERR "sparc_leon_eirq_isr: ERROR EXTENDED IRQ\n");
return IRQ_HANDLED;
}
/* The extended IRQ controller has been found, this function registers it */
void sparc_leon_eirq_register(int eirq)
{
int irq;
/* Register a "BAD" handler for this interrupt, it should never happen */
irq = request_irq(eirq, sparc_leon_eirq_isr,
(IRQF_DISABLED | SA_STATIC_ALLOC), "extirq", NULL);
if (irq) {
printk(KERN_ERR
"sparc_leon_eirq_register: unable to attach IRQ%d\n",
eirq);
} else {
sparc_leon_eirq = eirq;
}
}
static inline unsigned long get_irqmask(unsigned int irq)
{
unsigned long mask;
if (!irq || ((irq > 0xf) && !sparc_leon_eirq)
|| ((irq > 0x1f) && sparc_leon_eirq)) {
printk(KERN_ERR
"leon_get_irqmask: false irq number: %d\n", irq);
mask = 0;
} else {
mask = LEON_HARD_INT(irq);
}
return mask;
}
static void leon_enable_irq(unsigned int irq_nr)
{
unsigned long mask, flags;
mask = get_irqmask(irq_nr);
local_irq_save(flags);
LEON3_BYPASS_STORE_PA(LEON_IMASK,
(LEON3_BYPASS_LOAD_PA(LEON_IMASK) | (mask)));
local_irq_restore(flags);
}
static void leon_disable_irq(unsigned int irq_nr)
{
unsigned long mask, flags;
mask = get_irqmask(irq_nr);
local_irq_save(flags);
LEON3_BYPASS_STORE_PA(LEON_IMASK,
(LEON3_BYPASS_LOAD_PA(LEON_IMASK) & ~(mask)));
local_irq_restore(flags);
}
void __init leon_init_timers(irq_handler_t counter_fn)
{
int irq;
leondebug_irq_disable = 0;
leon_debug_irqout = 0;
master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
dummy_master_l10_counter = 0;
if (leon3_gptimer_regs && leon3_irqctrl_regs) {
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0);
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld,
(((1000000 / 100) - 1)));
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0);
} else {
printk(KERN_ERR "No Timer/irqctrl found\n");
BUG();
}
irq = request_irq(leon3_gptimer_irq,
counter_fn,
(IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
if (irq) {
printk(KERN_ERR "leon_time_init: unable to attach IRQ%d\n",
LEON_INTERRUPT_TIMER1);
prom_halt();
}
if (leon3_gptimer_regs) {
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl,
LEON3_GPTIMER_EN |
LEON3_GPTIMER_RL |
LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN);
}
}
void leon_clear_clock_irq(void)
{
}
void leon_load_profile_irq(int cpu, unsigned int limit)
{
BUG();
}
void __init leon_trans_init(struct device_node *dp)
{
if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) {
struct property *p;
p = of_find_property(dp, "mid", (void *)0);
if (p) {
int mid;
dp->name = prom_early_alloc(5 + 1);
memcpy(&mid, p->value, p->length);
sprintf((char *)dp->name, "cpu%.2d", mid);
}
}
}
void __initdata (*prom_amba_init)(struct device_node *dp, struct device_node ***nextp) = 0;
void __init leon_node_init(struct device_node *dp, struct device_node ***nextp)
{
if (prom_amba_init &&
strcmp(dp->type, "ambapp") == 0 &&
strcmp(dp->name, "ambapp0") == 0) {
prom_amba_init(dp, nextp);
}
}
void __init leon_init_IRQ(void)
{
sparc_init_timers = leon_init_timers;
BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(enable_pil_irq, leon_enable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(disable_pil_irq, leon_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq,
BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, leon_load_profile_irq,
BTFIXUPCALL_NOP);
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, leon_set_cpu_int, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, leon_clear_ipi, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(set_irq_udt, leon_set_udt, BTFIXUPCALL_NORM);
#endif
}
void __init leon_init(void)
{
prom_build_more = &leon_node_init;
}
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <asm/perf_counter.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/local.h> #include <asm/local.h>
#include <asm/pcr.h> #include <asm/pcr.h>
...@@ -31,13 +32,19 @@ ...@@ -31,13 +32,19 @@
* level 14 as our IRQ off level. * level 14 as our IRQ off level.
*/ */
static int nmi_watchdog_active;
static int panic_on_timeout; static int panic_on_timeout;
int nmi_usable; /* nmi_active:
EXPORT_SYMBOL_GPL(nmi_usable); * >0: the NMI watchdog is active, but can be disabled
* <0: the NMI watchdog has not been set up, and cannot be enabled
* 0: the NMI watchdog is disabled, but can be enabled
*/
atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
EXPORT_SYMBOL(nmi_active);
static unsigned int nmi_hz = HZ; static unsigned int nmi_hz = HZ;
static DEFINE_PER_CPU(short, wd_enabled);
static int endflag __initdata;
static DEFINE_PER_CPU(unsigned int, last_irq_sum); static DEFINE_PER_CPU(unsigned int, last_irq_sum);
static DEFINE_PER_CPU(local_t, alert_counter); static DEFINE_PER_CPU(local_t, alert_counter);
...@@ -45,7 +52,7 @@ static DEFINE_PER_CPU(int, nmi_touch); ...@@ -45,7 +52,7 @@ static DEFINE_PER_CPU(int, nmi_touch);
void touch_nmi_watchdog(void) void touch_nmi_watchdog(void)
{ {
if (nmi_watchdog_active) { if (atomic_read(&nmi_active)) {
int cpu; int cpu;
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
...@@ -78,6 +85,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) ...@@ -78,6 +85,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
if (do_panic || panic_on_oops) if (do_panic || panic_on_oops)
panic("Non maskable interrupt"); panic("Non maskable interrupt");
nmi_exit();
local_irq_enable(); local_irq_enable();
do_exit(SIGBUS); do_exit(SIGBUS);
} }
...@@ -92,6 +100,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) ...@@ -92,6 +100,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
local_cpu_data().__nmi_count++; local_cpu_data().__nmi_count++;
nmi_enter();
if (notify_die(DIE_NMI, "nmi", regs, 0, if (notify_die(DIE_NMI, "nmi", regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
touched = 1; touched = 1;
...@@ -110,10 +120,12 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) ...@@ -110,10 +120,12 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
__get_cpu_var(last_irq_sum) = sum; __get_cpu_var(last_irq_sum) = sum;
local_set(&__get_cpu_var(alert_counter), 0); local_set(&__get_cpu_var(alert_counter), 0);
} }
if (nmi_usable) { if (__get_cpu_var(wd_enabled)) {
write_pic(picl_value(nmi_hz)); write_pic(picl_value(nmi_hz));
pcr_ops->write(pcr_enable); pcr_ops->write(pcr_enable);
} }
nmi_exit();
} }
static inline unsigned int get_nmi_count(int cpu) static inline unsigned int get_nmi_count(int cpu)
...@@ -121,8 +133,6 @@ static inline unsigned int get_nmi_count(int cpu) ...@@ -121,8 +133,6 @@ static inline unsigned int get_nmi_count(int cpu)
return cpu_data(cpu).__nmi_count; return cpu_data(cpu).__nmi_count;
} }
static int endflag __initdata;
static __init void nmi_cpu_busy(void *data) static __init void nmi_cpu_busy(void *data)
{ {
local_irq_enable_in_hardirq(); local_irq_enable_in_hardirq();
...@@ -143,12 +153,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) ...@@ -143,12 +153,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
printk(KERN_WARNING printk(KERN_WARNING
"and attach the output of the 'dmesg' command.\n"); "and attach the output of the 'dmesg' command.\n");
nmi_usable = 0; per_cpu(wd_enabled, cpu) = 0;
atomic_dec(&nmi_active);
} }
static void stop_watchdog(void *unused) void stop_nmi_watchdog(void *unused)
{ {
pcr_ops->write(PCR_PIC_PRIV); pcr_ops->write(PCR_PIC_PRIV);
__get_cpu_var(wd_enabled) = 0;
atomic_dec(&nmi_active);
} }
static int __init check_nmi_watchdog(void) static int __init check_nmi_watchdog(void)
...@@ -156,6 +169,9 @@ static int __init check_nmi_watchdog(void) ...@@ -156,6 +169,9 @@ static int __init check_nmi_watchdog(void)
unsigned int *prev_nmi_count; unsigned int *prev_nmi_count;
int cpu, err; int cpu, err;
if (!atomic_read(&nmi_active))
return 0;
prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
if (!prev_nmi_count) { if (!prev_nmi_count) {
err = -ENOMEM; err = -ENOMEM;
...@@ -172,12 +188,15 @@ static int __init check_nmi_watchdog(void) ...@@ -172,12 +188,15 @@ static int __init check_nmi_watchdog(void)
mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (!per_cpu(wd_enabled, cpu))
continue;
if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
report_broken_nmi(cpu, prev_nmi_count); report_broken_nmi(cpu, prev_nmi_count);
} }
endflag = 1; endflag = 1;
if (!nmi_usable) { if (!atomic_read(&nmi_active)) {
kfree(prev_nmi_count); kfree(prev_nmi_count);
atomic_set(&nmi_active, -1);
err = -ENODEV; err = -ENODEV;
goto error; goto error;
} }
...@@ -188,12 +207,26 @@ static int __init check_nmi_watchdog(void) ...@@ -188,12 +207,26 @@ static int __init check_nmi_watchdog(void)
kfree(prev_nmi_count); kfree(prev_nmi_count);
return 0; return 0;
error: error:
on_each_cpu(stop_watchdog, NULL, 1); on_each_cpu(stop_nmi_watchdog, NULL, 1);
return err; return err;
} }
static void start_watchdog(void *unused) void start_nmi_watchdog(void *unused)
{ {
__get_cpu_var(wd_enabled) = 1;
atomic_inc(&nmi_active);
pcr_ops->write(PCR_PIC_PRIV);
write_pic(picl_value(nmi_hz));
pcr_ops->write(pcr_enable);
}
static void nmi_adjust_hz_one(void *unused)
{
if (!__get_cpu_var(wd_enabled))
return;
pcr_ops->write(PCR_PIC_PRIV); pcr_ops->write(PCR_PIC_PRIV);
write_pic(picl_value(nmi_hz)); write_pic(picl_value(nmi_hz));
...@@ -203,13 +236,13 @@ static void start_watchdog(void *unused) ...@@ -203,13 +236,13 @@ static void start_watchdog(void *unused)
void nmi_adjust_hz(unsigned int new_hz) void nmi_adjust_hz(unsigned int new_hz)
{ {
nmi_hz = new_hz; nmi_hz = new_hz;
on_each_cpu(start_watchdog, NULL, 1); on_each_cpu(nmi_adjust_hz_one, NULL, 1);
} }
EXPORT_SYMBOL_GPL(nmi_adjust_hz); EXPORT_SYMBOL_GPL(nmi_adjust_hz);
static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
{ {
on_each_cpu(stop_watchdog, NULL, 1); on_each_cpu(stop_nmi_watchdog, NULL, 1);
return 0; return 0;
} }
...@@ -221,18 +254,19 @@ int __init nmi_init(void) ...@@ -221,18 +254,19 @@ int __init nmi_init(void)
{ {
int err; int err;
nmi_usable = 1; on_each_cpu(start_nmi_watchdog, NULL, 1);
on_each_cpu(start_watchdog, NULL, 1);
err = check_nmi_watchdog(); err = check_nmi_watchdog();
if (!err) { if (!err) {
err = register_reboot_notifier(&nmi_reboot_notifier); err = register_reboot_notifier(&nmi_reboot_notifier);
if (err) { if (err) {
nmi_usable = 0; on_each_cpu(stop_nmi_watchdog, NULL, 1);
on_each_cpu(stop_watchdog, NULL, 1); atomic_set(&nmi_active, -1);
} }
} }
if (!err)
init_hw_perf_counters();
return err; return err;
} }
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include "of_device_common.h" #include "of_device_common.h"
...@@ -97,6 +99,35 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) ...@@ -97,6 +99,35 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
return IORESOURCE_MEM; return IORESOURCE_MEM;
} }
/*
* AMBAPP bus specific translator
*/
static int of_bus_ambapp_match(struct device_node *np)
{
return !strcmp(np->name, "ambapp");
}
static void of_bus_ambapp_count_cells(struct device_node *child,
int *addrc, int *sizec)
{
if (addrc)
*addrc = 1;
if (sizec)
*sizec = 1;
}
static int of_bus_ambapp_map(u32 *addr, const u32 *range,
int na, int ns, int pna)
{
return of_bus_default_map(addr, range, na, ns, pna);
}
static unsigned long of_bus_ambapp_get_flags(const u32 *addr,
unsigned long flags)
{
return IORESOURCE_MEM;
}
/* /*
* Array of bus specific translators * Array of bus specific translators
...@@ -121,6 +152,15 @@ static struct of_bus of_busses[] = { ...@@ -121,6 +152,15 @@ static struct of_bus of_busses[] = {
.map = of_bus_default_map, .map = of_bus_default_map,
.get_flags = of_bus_sbus_get_flags, .get_flags = of_bus_sbus_get_flags,
}, },
/* AMBA */
{
.name = "ambapp",
.addr_prop_name = "reg",
.match = of_bus_ambapp_match,
.count_cells = of_bus_ambapp_count_cells,
.map = of_bus_ambapp_map,
.get_flags = of_bus_ambapp_get_flags,
},
/* Default */ /* Default */
{ {
.name = "default", .name = "default",
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/perf_counter.h>
#include <asm/pil.h> #include <asm/pil.h>
#include <asm/pcr.h> #include <asm/pcr.h>
#include <asm/nmi.h> #include <asm/nmi.h>
...@@ -34,10 +36,20 @@ unsigned int picl_shift; ...@@ -34,10 +36,20 @@ unsigned int picl_shift;
*/ */
void deferred_pcr_work_irq(int irq, struct pt_regs *regs) void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
{ {
struct pt_regs *old_regs;
clear_softint(1 << PIL_DEFERRED_PCR_WORK); clear_softint(1 << PIL_DEFERRED_PCR_WORK);
old_regs = set_irq_regs(regs);
irq_enter();
#ifdef CONFIG_PERF_COUNTERS
perf_counter_do_pending();
#endif
irq_exit();
set_irq_regs(old_regs);
} }
void schedule_deferred_pcr_work(void) void set_perf_counter_pending(void)
{ {
set_softint(1 << PIL_DEFERRED_PCR_WORK); set_softint(1 << PIL_DEFERRED_PCR_WORK);
} }
......
/* Performance counter support for sparc64.
*
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
*
* This code is based almost entirely upon the x86 perf counter
* code, which is:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*/
#include <linux/perf_counter.h>
#include <linux/kprobes.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
#include <asm/cpudata.h>
#include <asm/atomic.h>
#include <asm/nmi.h>
#include <asm/pcr.h>
/* Sparc64 chips have two performance counters, 32-bits each, with
* overflow interrupts generated on transition from 0xffffffff to 0.
* The counters are accessed in one go using a 64-bit register.
*
* Both counters are controlled using a single control register. The
* only way to stop all sampling is to clear all of the context (user,
* supervisor, hypervisor) sampling enable bits. But these bits apply
* to both counters, thus the two counters can't be enabled/disabled
* individually.
*
* The control register has two event fields, one for each of the two
* counters. It's thus nearly impossible to have one counter going
* while keeping the other one stopped. Therefore it is possible to
* get overflow interrupts for counters not currently "in use" and
* that condition must be checked in the overflow interrupt handler.
*
* So we use a hack, in that we program inactive counters with the
* "sw_count0" and "sw_count1" events. These count how many times
* the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
* unusual way to encode a NOP and therefore will not trigger in
* normal code.
*/
#define MAX_HWCOUNTERS 2
#define MAX_PERIOD ((1UL << 32) - 1)
#define PIC_UPPER_INDEX 0
#define PIC_LOWER_INDEX 1
struct cpu_hw_counters {
struct perf_counter *counters[MAX_HWCOUNTERS];
unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
int enabled;
};
DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, };
struct perf_event_map {
u16 encoding;
u8 pic_mask;
#define PIC_NONE 0x00
#define PIC_UPPER 0x01
#define PIC_LOWER 0x02
};
struct sparc_pmu {
const struct perf_event_map *(*event_map)(int);
int max_events;
int upper_shift;
int lower_shift;
int event_mask;
int hv_bit;
int irq_bit;
int upper_nop;
int lower_nop;
};
static const struct perf_event_map ultra3i_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
};
static const struct perf_event_map *ultra3i_event_map(int event)
{
return &ultra3i_perfmon_event_map[event];
}
static const struct sparc_pmu ultra3i_pmu = {
.event_map = ultra3i_event_map,
.max_events = ARRAY_SIZE(ultra3i_perfmon_event_map),
.upper_shift = 11,
.lower_shift = 4,
.event_mask = 0x3f,
.upper_nop = 0x1c,
.lower_nop = 0x14,
};
static const struct perf_event_map niagara2_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
};
static const struct perf_event_map *niagara2_event_map(int event)
{
return &niagara2_perfmon_event_map[event];
}
static const struct sparc_pmu niagara2_pmu = {
.event_map = niagara2_event_map,
.max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
.upper_shift = 19,
.lower_shift = 6,
.event_mask = 0xfff,
.hv_bit = 0x8,
.irq_bit = 0x03,
.upper_nop = 0x220,
.lower_nop = 0x220,
};
static const struct sparc_pmu *sparc_pmu __read_mostly;
static u64 event_encoding(u64 event, int idx)
{
if (idx == PIC_UPPER_INDEX)
event <<= sparc_pmu->upper_shift;
else
event <<= sparc_pmu->lower_shift;
return event;
}
static u64 mask_for_index(int idx)
{
return event_encoding(sparc_pmu->event_mask, idx);
}
static u64 nop_for_index(int idx)
{
return event_encoding(idx == PIC_UPPER_INDEX ?
sparc_pmu->upper_nop :
sparc_pmu->lower_nop, idx);
}
static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
int idx)
{
u64 val, mask = mask_for_index(idx);
val = pcr_ops->read();
pcr_ops->write((val & ~mask) | hwc->config);
}
static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
int idx)
{
u64 mask = mask_for_index(idx);
u64 nop = nop_for_index(idx);
u64 val = pcr_ops->read();
pcr_ops->write((val & ~mask) | nop);
}
void hw_perf_enable(void)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
u64 val;
int i;
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
val = pcr_ops->read();
for (i = 0; i < MAX_HWCOUNTERS; i++) {
struct perf_counter *cp = cpuc->counters[i];
struct hw_perf_counter *hwc;
if (!cp)
continue;
hwc = &cp->hw;
val |= hwc->config_base;
}
pcr_ops->write(val);
}
void hw_perf_disable(void)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
u64 val;
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
val = pcr_ops->read();
val &= ~(PCR_UTRACE | PCR_STRACE |
sparc_pmu->hv_bit | sparc_pmu->irq_bit);
pcr_ops->write(val);
}
static u32 read_pmc(int idx)
{
u64 val;
read_pic(val);
if (idx == PIC_UPPER_INDEX)
val >>= 32;
return val & 0xffffffff;
}
static void write_pmc(int idx, u64 val)
{
u64 shift, mask, pic;
shift = 0;
if (idx == PIC_UPPER_INDEX)
shift = 32;
mask = ((u64) 0xffffffff) << shift;
val <<= shift;
read_pic(pic);
pic &= ~mask;
pic |= val;
write_pic(pic);
}
static int sparc_perf_counter_set_period(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > MAX_PERIOD)
left = MAX_PERIOD;
atomic64_set(&hwc->prev_count, (u64)-left);
write_pmc(idx, (u64)(-left) & 0xffffffff);
perf_counter_update_userpage(counter);
return ret;
}
static int sparc_pmu_enable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx;
if (test_and_set_bit(idx, cpuc->used_mask))
return -EAGAIN;
sparc_pmu_disable_counter(hwc, idx);
cpuc->counters[idx] = counter;
set_bit(idx, cpuc->active_mask);
sparc_perf_counter_set_period(counter, hwc, idx);
sparc_pmu_enable_counter(hwc, idx);
perf_counter_update_userpage(counter);
return 0;
}
static u64 sparc_perf_counter_update(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
int shift = 64 - 32;
u64 prev_raw_count, new_raw_count;
s64 delta;
again:
prev_raw_count = atomic64_read(&hwc->prev_count);
new_raw_count = read_pmc(idx);
if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
atomic64_add(delta, &counter->count);
atomic64_sub(delta, &hwc->period_left);
return new_raw_count;
}
static void sparc_pmu_disable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx;
clear_bit(idx, cpuc->active_mask);
sparc_pmu_disable_counter(hwc, idx);
barrier();
sparc_perf_counter_update(counter, hwc, idx);
cpuc->counters[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
perf_counter_update_userpage(counter);
}
static void sparc_pmu_read(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
sparc_perf_counter_update(counter, hwc, hwc->idx);
}
static void sparc_pmu_unthrottle(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
sparc_pmu_enable_counter(hwc, hwc->idx);
}
static atomic_t active_counters = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmc_grab_mutex);
void perf_counter_grab_pmc(void)
{
if (atomic_inc_not_zero(&active_counters))
return;
mutex_lock(&pmc_grab_mutex);
if (atomic_read(&active_counters) == 0) {
if (atomic_read(&nmi_active) > 0) {
on_each_cpu(stop_nmi_watchdog, NULL, 1);
BUG_ON(atomic_read(&nmi_active) != 0);
}
atomic_inc(&active_counters);
}
mutex_unlock(&pmc_grab_mutex);
}
void perf_counter_release_pmc(void)
{
if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) {
if (atomic_read(&nmi_active) == 0)
on_each_cpu(start_nmi_watchdog, NULL, 1);
mutex_unlock(&pmc_grab_mutex);
}
}
static void hw_perf_counter_destroy(struct perf_counter *counter)
{
perf_counter_release_pmc();
}
static int __hw_perf_counter_init(struct perf_counter *counter)
{
struct perf_counter_attr *attr = &counter->attr;
struct hw_perf_counter *hwc = &counter->hw;
const struct perf_event_map *pmap;
u64 enc;
if (atomic_read(&nmi_active) < 0)
return -ENODEV;
if (attr->type != PERF_TYPE_HARDWARE)
return -EOPNOTSUPP;
if (attr->config >= sparc_pmu->max_events)
return -EINVAL;
perf_counter_grab_pmc();
counter->destroy = hw_perf_counter_destroy;
/* We save the enable bits in the config_base. So to
* turn off sampling just write 'config', and to enable
* things write 'config | config_base'.
*/
hwc->config_base = sparc_pmu->irq_bit;
if (!attr->exclude_user)
hwc->config_base |= PCR_UTRACE;
if (!attr->exclude_kernel)
hwc->config_base |= PCR_STRACE;
if (!attr->exclude_hv)
hwc->config_base |= sparc_pmu->hv_bit;
if (!hwc->sample_period) {
hwc->sample_period = MAX_PERIOD;
hwc->last_period = hwc->sample_period;
atomic64_set(&hwc->period_left, hwc->sample_period);
}
pmap = sparc_pmu->event_map(attr->config);
enc = pmap->encoding;
if (pmap->pic_mask & PIC_UPPER) {
hwc->idx = PIC_UPPER_INDEX;
enc <<= sparc_pmu->upper_shift;
} else {
hwc->idx = PIC_LOWER_INDEX;
enc <<= sparc_pmu->lower_shift;
}
hwc->config |= enc;
return 0;
}
static const struct pmu pmu = {
.enable = sparc_pmu_enable,
.disable = sparc_pmu_disable,
.read = sparc_pmu_read,
.unthrottle = sparc_pmu_unthrottle,
};
const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
{
int err = __hw_perf_counter_init(counter);
if (err)
return ERR_PTR(err);
return &pmu;
}
void perf_counter_print_debug(void)
{
unsigned long flags;
u64 pcr, pic;
int cpu;
if (!sparc_pmu)
return;
local_irq_save(flags);
cpu = smp_processor_id();
pcr = pcr_ops->read();
read_pic(pic);
pr_info("\n");
pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
cpu, pcr, pic);
local_irq_restore(flags);
}
static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
struct perf_sample_data data;
struct cpu_hw_counters *cpuc;
struct pt_regs *regs;
int idx;
if (!atomic_read(&active_counters))
return NOTIFY_DONE;
switch (cmd) {
case DIE_NMI:
break;
default:
return NOTIFY_DONE;
}
regs = args->regs;
data.regs = regs;
data.addr = 0;
cpuc = &__get_cpu_var(cpu_hw_counters);
for (idx = 0; idx < MAX_HWCOUNTERS; idx++) {
struct perf_counter *counter = cpuc->counters[idx];
struct hw_perf_counter *hwc;
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
hwc = &counter->hw;
val = sparc_perf_counter_update(counter, hwc, idx);
if (val & (1ULL << 31))
continue;
data.period = counter->hw.last_period;
if (!sparc_perf_counter_set_period(counter, hwc, idx))
continue;
if (perf_counter_overflow(counter, 1, &data))
sparc_pmu_disable_counter(hwc, idx);
}
return NOTIFY_STOP;
}
static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
.notifier_call = perf_counter_nmi_handler,
};
static bool __init supported_pmu(void)
{
if (!strcmp(sparc_pmu_type, "ultra3i")) {
sparc_pmu = &ultra3i_pmu;
return true;
}
if (!strcmp(sparc_pmu_type, "niagara2")) {
sparc_pmu = &niagara2_pmu;
return true;
}
return false;
}
void __init init_hw_perf_counters(void)
{
pr_info("Performance counters: ");
if (!supported_pmu()) {
pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
return;
}
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
/* All sparc64 PMUs currently have 2 counters. But this simple
* driver only supports one active counter at a time.
*/
perf_max_counters = 1;
register_die_notifier(&perf_counter_nmi_notifier);
}
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include "prom.h" #include "prom.h"
...@@ -131,6 +133,35 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) ...@@ -131,6 +133,35 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
regs->which_io, regs->phys_addr); regs->which_io, regs->phys_addr);
} }
/* "name:vendor:device@irq,addrlo" */
static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
{
struct amba_prom_registers *regs; unsigned int *intr;
unsigned int *device, *vendor;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
prop = of_find_property(dp, "interrupts", NULL);
if (!prop)
return;
intr = prop->value;
prop = of_find_property(dp, "vendor", NULL);
if (!prop)
return;
vendor = prop->value;
prop = of_find_property(dp, "device", NULL);
if (!prop)
return;
device = prop->value;
sprintf(tmp_buf, "%s:%d:%d@%x,%x",
dp->name, *vendor, *device,
*intr, regs->phys_addr);
}
static void __init __build_path_component(struct device_node *dp, char *tmp_buf) static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
{ {
struct device_node *parent = dp->parent; struct device_node *parent = dp->parent;
...@@ -143,6 +174,8 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf) ...@@ -143,6 +174,8 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
return sbus_path_component(dp, tmp_buf); return sbus_path_component(dp, tmp_buf);
if (!strcmp(parent->type, "ebus")) if (!strcmp(parent->type, "ebus"))
return ebus_path_component(dp, tmp_buf); return ebus_path_component(dp, tmp_buf);
if (!strcmp(parent->type, "ambapp"))
return ambapp_path_component(dp, tmp_buf);
/* "isa" is handled with platform naming */ /* "isa" is handled with platform naming */
} }
......
...@@ -22,9 +22,12 @@ ...@@ -22,9 +22,12 @@
#include <linux/of.h> #include <linux/of.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/leon.h>
#include "prom.h" #include "prom.h"
void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp);
struct device_node *of_console_device; struct device_node *of_console_device;
EXPORT_SYMBOL(of_console_device); EXPORT_SYMBOL(of_console_device);
...@@ -161,7 +164,7 @@ static struct property * __init build_one_prop(phandle node, char *prev, ...@@ -161,7 +164,7 @@ static struct property * __init build_one_prop(phandle node, char *prev,
name = prom_nextprop(node, prev, p->name); name = prom_nextprop(node, prev, p->name);
} }
if (strlen(name) == 0) { if (!name || strlen(name) == 0) {
tmp = p; tmp = p;
return NULL; return NULL;
} }
...@@ -242,7 +245,7 @@ static struct device_node * __init prom_create_node(phandle node, ...@@ -242,7 +245,7 @@ static struct device_node * __init prom_create_node(phandle node,
return dp; return dp;
} }
static char * __init build_full_name(struct device_node *dp) char * __init build_full_name(struct device_node *dp)
{ {
int len, ourlen, plen; int len, ourlen, plen;
char *n; char *n;
...@@ -289,6 +292,9 @@ static struct device_node * __init prom_build_tree(struct device_node *parent, ...@@ -289,6 +292,9 @@ static struct device_node * __init prom_build_tree(struct device_node *parent,
dp->child = prom_build_tree(dp, prom_getchild(node), nextp); dp->child = prom_build_tree(dp, prom_getchild(node), nextp);
if (prom_build_more)
prom_build_more(dp, nextp);
node = prom_getsibling(node); node = prom_getsibling(node);
} }
......
...@@ -235,6 +235,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -235,6 +235,8 @@ void __init setup_arch(char **cmdline_p)
sparc_cpu_model = sun4e; sparc_cpu_model = sun4e;
if (!strcmp(&cputypval,"sun4u")) if (!strcmp(&cputypval,"sun4u"))
sparc_cpu_model = sun4u; sparc_cpu_model = sun4u;
if (!strncmp(&cputypval, "leon" , 4))
sparc_cpu_model = sparc_leon;
printk("ARCH: "); printk("ARCH: ");
switch(sparc_cpu_model) { switch(sparc_cpu_model) {
...@@ -256,6 +258,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -256,6 +258,9 @@ void __init setup_arch(char **cmdline_p)
case sun4u: case sun4u:
printk("SUN4U\n"); printk("SUN4U\n");
break; break;
case sparc_leon:
printk("LEON\n");
break;
default: default:
printk("UNKNOWN!\n"); printk("UNKNOWN!\n");
break; break;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
* *
* Copyright (C) 2007 David S. Miller <davem@davemloft.net> * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
*/ */
#include <linux/sched.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/smp.h> #include <linux/smp.h>
......
...@@ -82,5 +82,5 @@ sys_call_table: ...@@ -82,5 +82,5 @@ sys_call_table:
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
...@@ -83,7 +83,7 @@ sys_call_table32: ...@@ -83,7 +83,7 @@ sys_call_table32:
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
...@@ -158,4 +158,4 @@ sys_call_table: ...@@ -158,4 +158,4 @@ sys_call_table:
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
.word sys_pwritev, sys_rt_tgsigqueueinfo .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
...@@ -11,6 +11,7 @@ obj-$(CONFIG_SPARC32) += loadmmu.o ...@@ -11,6 +11,7 @@ obj-$(CONFIG_SPARC32) += loadmmu.o
obj-y += generic_$(BITS).o obj-y += generic_$(BITS).o
obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
# Only used by sparc64 # Only used by sparc64
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ #include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/leon.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
...@@ -326,6 +327,9 @@ void __init paging_init(void) ...@@ -326,6 +327,9 @@ void __init paging_init(void)
sparc_unmapped_base = 0xe0000000; sparc_unmapped_base = 0xe0000000;
BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000); BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
break; break;
case sparc_leon:
leon_init();
/* fall through */
case sun4m: case sun4m:
case sun4d: case sun4d:
srmmu_paging_init(); srmmu_paging_init();
......
/*
* linux/arch/sparc/mm/leon_m.c
*
* Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
* Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
* Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
*
* do srmmu probe in software
*
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/asi.h>
#include <asm/leon.h>
#include <asm/tlbflush.h>
int leon_flush_during_switch = 1;
int srmmu_swprobe_trace;
unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
{
unsigned int ctxtbl;
unsigned int pgd, pmd, ped;
unsigned int ptr;
unsigned int lvl, pte, paddrbase;
unsigned int ctx;
unsigned int paddr_calc;
paddrbase = 0;
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: trace on\n");
ctxtbl = srmmu_get_ctable_ptr();
if (!(ctxtbl)) {
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n");
return 0;
}
if (!_pfn_valid(PFN(ctxtbl))) {
if (srmmu_swprobe_trace)
printk(KERN_INFO
"swprobe: !_pfn_valid(%x)=>0\n",
PFN(ctxtbl));
return 0;
}
ctx = srmmu_get_context();
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx);
pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: pgd is entry level 3\n");
lvl = 3;
pte = pgd;
paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: pgd is invalid => 0\n");
return 0;
}
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd);
ptr = (pgd & SRMMU_PTD_PMASK) << 4;
ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
if (!_pfn_valid(PFN(ptr)))
return 0;
pmd = LEON_BYPASS_LOAD_PA(ptr);
if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: pmd is entry level 2\n");
lvl = 2;
pte = pmd;
paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
return 0;
}
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd);
ptr = (pmd & SRMMU_PTD_PMASK) << 4;
ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
if (!_pfn_valid(PFN(ptr))) {
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n",
PFN(ptr));
return 0;
}
ped = LEON_BYPASS_LOAD_PA(ptr);
if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: ped is entry level 1\n");
lvl = 1;
pte = ped;
paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: ped is invalid => 0\n");
return 0;
}
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped);
ptr = (ped & SRMMU_PTD_PMASK) << 4;
ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
if (!_pfn_valid(PFN(ptr)))
return 0;
ptr = LEON_BYPASS_LOAD_PA(ptr);
if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: ptr is entry level 0\n");
lvl = 0;
pte = ptr;
paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: ptr is invalid => 0\n");
return 0;
ready:
switch (lvl) {
case 0:
paddr_calc =
(vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
break;
case 1:
paddr_calc =
(vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
break;
case 2:
paddr_calc =
(vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
break;
default:
case 3:
paddr_calc = vaddr;
break;
}
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
if (paddr)
*paddr = paddr_calc;
return paddrbase;
}
void leon_flush_icache_all(void)
{
__asm__ __volatile__(" flush "); /*iflush*/
}
void leon_flush_dcache_all(void)
{
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
"i"(ASI_LEON_DFLUSH) : "memory");
}
void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
{
if (vma->vm_flags & VM_EXEC)
leon_flush_icache_all();
leon_flush_dcache_all();
}
void leon_flush_cache_all(void)
{
__asm__ __volatile__(" flush "); /*iflush*/
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
"i"(ASI_LEON_DFLUSH) : "memory");
}
void leon_flush_tlb_all(void)
{
leon_flush_cache_all();
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
"i"(ASI_LEON_MMUFLUSH) : "memory");
}
/* get all cache regs */
void leon3_getCacheRegs(struct leon3_cacheregs *regs)
{
unsigned long ccr, iccr, dccr;
if (!regs)
return;
/* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
__asm__ __volatile__("lda [%%g0] %3, %0\n\t"
"mov 0x08, %%g1\n\t"
"lda [%%g1] %3, %1\n\t"
"mov 0x0c, %%g1\n\t"
"lda [%%g1] %3, %2\n\t"
: "=r"(ccr), "=r"(iccr), "=r"(dccr)
/* output */
: "i"(ASI_LEON_CACHEREGS) /* input */
: "g1" /* clobber list */
);
regs->ccr = ccr;
regs->iccr = iccr;
regs->dccr = dccr;
}
/* Due to virtual cache we need to check cache configuration if
* it is possible to skip flushing in some cases.
*
* Leon2 and Leon3 differ in their way of telling cache information
*
*/
int leon_flush_needed(void)
{
int flush_needed = -1;
unsigned int ssize, sets;
char *setStr[4] =
{ "direct mapped", "2-way associative", "3-way associative",
"4-way associative"
};
/* leon 3 */
struct leon3_cacheregs cregs;
leon3_getCacheRegs(&cregs);
sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24;
/* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20);
printk(KERN_INFO "CACHE: %s cache, set size %dk\n",
sets > 3 ? "unknown" : setStr[sets], ssize);
if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
/* Set Size <= Page size ==>
flush on every context switch not needed. */
flush_needed = 0;
printk(KERN_INFO "CACHE: not flushing on every context switch\n");
}
return flush_needed;
}
void leon_switch_mm(void)
{
flush_tlb_mm((void *)0);
if (leon_flush_during_switch)
leon_flush_cache_all();
}
...@@ -33,6 +33,7 @@ void __init load_mmu(void) ...@@ -33,6 +33,7 @@ void __init load_mmu(void)
break; break;
case sun4m: case sun4m:
case sun4d: case sun4d:
case sparc_leon:
ld_mmu_srmmu(); ld_mmu_srmmu();
break; break;
default: default:
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <asm/tsunami.h> #include <asm/tsunami.h>
#include <asm/swift.h> #include <asm/swift.h>
#include <asm/turbosparc.h> #include <asm/turbosparc.h>
#include <asm/leon.h>
#include <asm/btfixup.h> #include <asm/btfixup.h>
...@@ -569,6 +570,9 @@ static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, ...@@ -569,6 +570,9 @@ static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
} }
if (sparc_cpu_model == sparc_leon)
leon_switch_mm();
if (is_hypersparc) if (is_hypersparc)
hyper_flush_whole_icache(); hyper_flush_whole_icache();
...@@ -1977,6 +1981,45 @@ static void __init init_viking(void) ...@@ -1977,6 +1981,45 @@ static void __init init_viking(void)
poke_srmmu = poke_viking; poke_srmmu = poke_viking;
} }
#ifdef CONFIG_SPARC_LEON
void __init poke_leonsparc(void)
{
}
void __init init_leon(void)
{
srmmu_name = "Leon";
BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
poke_srmmu = poke_leonsparc;
srmmu_cache_pagetables = 0;
leon_flush_during_switch = leon_flush_needed();
}
#endif
/* Probe for the srmmu chip version. */ /* Probe for the srmmu chip version. */
static void __init get_srmmu_type(void) static void __init get_srmmu_type(void)
{ {
...@@ -1992,7 +2035,15 @@ static void __init get_srmmu_type(void) ...@@ -1992,7 +2035,15 @@ static void __init get_srmmu_type(void)
psr_typ = (psr >> 28) & 0xf; psr_typ = (psr >> 28) & 0xf;
psr_vers = (psr >> 24) & 0xf; psr_vers = (psr >> 24) & 0xf;
/* First, check for HyperSparc or Cypress. */ /* First, check for sparc-leon. */
if (sparc_cpu_model == sparc_leon) {
psr_typ = 0xf; /* hardcoded ids for older models/simulators */
psr_vers = 2;
init_leon();
return;
}
/* Second, check for HyperSparc or Cypress. */
if(mod_typ == 1) { if(mod_typ == 1) {
switch(mod_rev) { switch(mod_rev) {
case 7: case 7:
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
static int profile_timer_exceptions_notify(struct notifier_block *self, static int profile_timer_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data) unsigned long val, void *data)
{ {
struct die_args *args = (struct die_args *)data; struct die_args *args = data;
int ret = NOTIFY_DONE; int ret = NOTIFY_DONE;
switch (val) { switch (val) {
...@@ -57,7 +57,7 @@ static void timer_stop(void) ...@@ -57,7 +57,7 @@ static void timer_stop(void)
static int op_nmi_timer_init(struct oprofile_operations *ops) static int op_nmi_timer_init(struct oprofile_operations *ops)
{ {
if (!nmi_usable) if (atomic_read(&nmi_active) <= 0)
return -ENODEV; return -ENODEV;
ops->start = timer_start; ops->start = timer_start;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/limits.h> #include <linux/limits.h>
#include <linux/sched.h>
static char func_name[NAME_MAX] = "do_fork"; static char func_name[NAME_MAX] = "do_fork";
module_param_string(func, func_name, NAME_MAX, S_IRUGO); module_param_string(func, func_name, NAME_MAX, S_IRUGO);
......
...@@ -41,6 +41,12 @@ ...@@ -41,6 +41,12 @@
#define cpu_relax() asm volatile("" ::: "memory"); #define cpu_relax() asm volatile("" ::: "memory");
#endif #endif
#ifdef __sparc__
#include "../../arch/sparc/include/asm/unistd.h"
#define rmb() asm volatile("":::"memory")
#define cpu_relax() asm volatile("":::"memory")
#endif
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include <sys/types.h> #include <sys/types.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment