Commit f589e9bf authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next

Pull sparc updates from David Miller:

 1) Hibernation support, as well as removal of excess interrupt
    twiddling in MMU context allocation on sparc64 from Kirill Tkhai.

 2) Kill references to __ARCH_WANT_UNLOCKED_CTXSW.

 3) Sparc32 LEON bug fixes from Daniel Hellstrom and Andreas Larsson.

 4) Provide cmpxchg64(), from Geert Uytterhoeven.

 5) Device refcount and registry bug fixes from Federico Vaga and Wei
    Yongjun.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next:
  serial: sunsu: add missing platform_driver_unregister() when module exit
  sparc32, leon: Do not overwrite previously set irq flow handlers
  sparc/kernel/vio.c: add put_device() after device_find_child()
  sparc64: Do not save/restore interrupts in get_new_mmu_context()
  sparc: Consistently use 'wr' and 'rd' instructions for ASRs.
  sparc64: Kill __ARCH_WANT_UNLOCKED_CTXSW
  sparc64: Provide cmpxchg64()
  sparc64: Do not change num_physpages during initmem freeing
  sparc64: Hibernation support
  sparc,leon: updated GRPCI2 config name
  sparc,leon: support for GRPCI1 PCI host bridge controller
  sparc32,leon: add support for PCI busn resource for GRPCI2
parents 17319295 048c9acc
......@@ -99,6 +99,9 @@ config HAVE_LATENCYTOP_SUPPORT
bool
default y if SPARC64
config ARCH_HIBERNATION_POSSIBLE
def_bool y if SPARC64
config AUDIT_ARCH
bool
default y
......@@ -303,6 +306,10 @@ config ARCH_SPARSEMEM_DEFAULT
source "mm/Kconfig"
if SPARC64
source "kernel/power/Kconfig"
endif
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on SPARC64 && SMP
......@@ -472,7 +479,18 @@ config LEON_PCI
depends on PCI && SPARC_LEON
default y
config GRPCI2
config SPARC_GRPCI1
bool "GRPCI Host Bridge Support"
depends on LEON_PCI
default y
help
Say Y here to include the GRPCI Host Bridge Driver. The GRPCI
PCI host controller is typically found in GRLIB SPARC32/LEON
systems. The driver has one property (all_pci_errors) controlled
from the bootloader that makes the GRPCI to generate interrupts
on detected PCI Parity and System errors.
config SPARC_GRPCI2
bool "GRPCI2 Host Bridge Support"
depends on LEON_PCI
default y
......
......@@ -57,6 +57,7 @@ core-y += arch/sparc/
libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/
drivers-$(CONFIG_PM) += arch/sparc/power/
drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
boot := arch/sparc/boot
......
......@@ -141,5 +141,6 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC64_CMPXCHG__ */
......@@ -55,15 +55,15 @@
/* The Get Condition Codes software trap for userland. */
#define GETCC_TRAP \
b getcc_trap_handler; mov %psr, %l0; nop; nop;
b getcc_trap_handler; rd %psr, %l0; nop; nop;
/* The Set Condition Codes software trap for userland. */
#define SETCC_TRAP \
b setcc_trap_handler; mov %psr, %l0; nop; nop;
b setcc_trap_handler; rd %psr, %l0; nop; nop;
/* The Get PSR software trap for userland. */
#define GETPSR_TRAP \
mov %psr, %i0; jmp %l2; rett %l2 + 4; nop;
rd %psr, %i0; jmp %l2; rett %l2 + 4; nop;
/* This is for hard interrupts from level 1-14, 15 is non-maskable (nmi) and
* gets handled with another macro.
......
/*
* hibernate.h: Hibernaton support specific for sparc64.
*
* Copyright (C) 2013 Kirill V Tkhai (tkhai@yandex.ru)
*/
#ifndef ___SPARC_HIBERNATE_H
#define ___SPARC_HIBERNATE_H
struct saved_context {
unsigned long fp;
unsigned long cwp;
unsigned long wstate;
unsigned long tick;
unsigned long pstate;
unsigned long g4;
unsigned long g5;
unsigned long g6;
};
#endif
......@@ -12,6 +12,7 @@ struct leon_pci_info {
struct pci_ops *ops;
struct resource io_space;
struct resource mem_space;
struct resource busn;
int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
};
......
......@@ -68,7 +68,7 @@ extern void smp_tsb_sync(struct mm_struct *mm);
extern void __flush_tlb_mm(unsigned long, unsigned long);
/* Switch the current MM context. Interrupts are disabled. */
/* Switch the current MM context. */
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid, flags;
......
......@@ -18,9 +18,6 @@
#include <asm/ptrace.h>
#include <asm/page.h>
/* Don't hold the runqueue lock over context switch */
#define __ARCH_WANT_UNLOCKED_CTXSW
/* The sparc has no problems with write protection */
#define wp_works_ok 1
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
......
......@@ -74,7 +74,8 @@ obj-y += dma.o
obj-$(CONFIG_PCIC_PCI) += pcic.o
obj-$(CONFIG_LEON_PCI) += leon_pci.o
obj-$(CONFIG_GRPCI2) += leon_pci_grpci2.o
obj-$(CONFIG_SPARC_GRPCI2)+= leon_pci_grpci2.o
obj-$(CONFIG_SPARC_GRPCI1)+= leon_pci_grpci1.o
obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o
obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o
......
......@@ -14,6 +14,8 @@
// #include <linux/mm.h>
#include <linux/kbuild.h>
#include <asm/hibernate.h>
#ifdef CONFIG_SPARC32
int sparc32_foo(void)
{
......@@ -24,6 +26,19 @@ int sparc32_foo(void)
#else
int sparc64_foo(void)
{
#ifdef CONFIG_HIBERNATION
BLANK();
OFFSET(SC_REG_FP, saved_context, fp);
OFFSET(SC_REG_CWP, saved_context, cwp);
OFFSET(SC_REG_WSTATE, saved_context, wstate);
OFFSET(SC_REG_TICK, saved_context, tick);
OFFSET(SC_REG_PSTATE, saved_context, pstate);
OFFSET(SC_REG_G4, saved_context, g4);
OFFSET(SC_REG_G5, saved_context, g5);
OFFSET(SC_REG_G6, saved_context, g6);
#endif
return 0;
}
#endif
......
......@@ -213,6 +213,7 @@ unsigned int leon_build_device_irq(unsigned int real_irq,
{
unsigned int irq;
unsigned long mask;
struct irq_desc *desc;
irq = 0;
mask = leon_get_irqmask(real_irq);
......@@ -226,9 +227,12 @@ unsigned int leon_build_device_irq(unsigned int real_irq,
if (do_ack)
mask |= LEON_DO_ACK_HW;
irq_set_chip_and_handler_name(irq, &leon_irq,
flow_handler, name);
irq_set_chip_data(irq, (void *)mask);
desc = irq_to_desc(irq);
if (!desc || !desc->handle_irq || desc->handle_irq == handle_bad_irq) {
irq_set_chip_and_handler_name(irq, &leon_irq,
flow_handler, name);
irq_set_chip_data(irq, (void *)mask);
}
out:
return irq;
......
......@@ -29,6 +29,8 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
pci_add_resource_offset(&resources, &info->io_space,
info->io_space.start - 0x1000);
pci_add_resource(&resources, &info->mem_space);
info->busn.flags = IORESOURCE_BUS;
pci_add_resource(&resources, &info->busn);
root_bus = pci_scan_root_bus(&ofdev->dev, 0, info->ops, info,
&resources);
......
/*
* leon_pci_grpci1.c: GRPCI1 Host PCI driver
*
* Copyright (C) 2013 Aeroflex Gaisler AB
*
* This GRPCI1 driver does not support PCI interrupts taken from
* GPIO pins. Interrupt generation at PCI parity and system error
* detection is by default turned off since some GRPCI1 cores does
* not support detection. It can be turned on from the bootloader
* using the all_pci_errors property.
*
* Contributors: Daniel Hellstrom <daniel@gaisler.com>
*/
#include <linux/of_device.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of_irq.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <asm/leon_pci.h>
#include <asm/sections.h>
#include <asm/vaddrs.h>
#include <asm/leon.h>
#include <asm/io.h>
#include "irq.h"
/* Enable/Disable Debugging Configuration Space Access */
#undef GRPCI1_DEBUG_CFGACCESS
/*
* GRPCI1 APB Register MAP
*/
struct grpci1_regs {
unsigned int cfg_stat; /* 0x00 Configuration / Status */
unsigned int bar0; /* 0x04 BAR0 (RO) */
unsigned int page0; /* 0x08 PAGE0 (RO) */
unsigned int bar1; /* 0x0C BAR1 (RO) */
unsigned int page1; /* 0x10 PAGE1 */
unsigned int iomap; /* 0x14 IO Map */
unsigned int stat_cmd; /* 0x18 PCI Status & Command (RO) */
unsigned int irq; /* 0x1C Interrupt register */
};
#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
#define PAGE0_BTEN_BIT 0
#define PAGE0_BTEN (1 << PAGE0_BTEN_BIT)
#define CFGSTAT_HOST_BIT 13
#define CFGSTAT_CTO_BIT 8
#define CFGSTAT_HOST (1 << CFGSTAT_HOST_BIT)
#define CFGSTAT_CTO (1 << CFGSTAT_CTO_BIT)
#define IRQ_DPE (1 << 9)
#define IRQ_SSE (1 << 8)
#define IRQ_RMA (1 << 7)
#define IRQ_RTA (1 << 6)
#define IRQ_STA (1 << 5)
#define IRQ_DPED (1 << 4)
#define IRQ_INTD (1 << 3)
#define IRQ_INTC (1 << 2)
#define IRQ_INTB (1 << 1)
#define IRQ_INTA (1 << 0)
#define IRQ_DEF_ERRORS (IRQ_RMA | IRQ_RTA | IRQ_STA)
#define IRQ_ALL_ERRORS (IRQ_DPED | IRQ_DEF_ERRORS | IRQ_SSE | IRQ_DPE)
#define IRQ_INTX (IRQ_INTA | IRQ_INTB | IRQ_INTC | IRQ_INTD)
#define IRQ_MASK_BIT 16
#define DEF_PCI_ERRORS (PCI_STATUS_SIG_TARGET_ABORT | \
PCI_STATUS_REC_TARGET_ABORT | \
PCI_STATUS_REC_MASTER_ABORT)
#define ALL_PCI_ERRORS (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY | \
PCI_STATUS_SIG_SYSTEM_ERROR | DEF_PCI_ERRORS)
#define TGT 256
struct grpci1_priv {
struct leon_pci_info info; /* must be on top of this structure */
struct grpci1_regs *regs; /* GRPCI register map */
struct device *dev;
int pci_err_mask; /* STATUS register error mask */
int irq; /* LEON irqctrl GRPCI IRQ */
unsigned char irq_map[4]; /* GRPCI nexus PCI INTX# IRQs */
unsigned int irq_err; /* GRPCI nexus Virt Error IRQ */
/* AHB PCI Windows */
unsigned long pci_area; /* MEMORY */
unsigned long pci_area_end;
unsigned long pci_io; /* I/O */
unsigned long pci_conf; /* CONFIGURATION */
unsigned long pci_conf_end;
unsigned long pci_io_va;
};
static struct grpci1_priv *grpci1priv;
static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val);
int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct grpci1_priv *priv = dev->bus->sysdata;
int irq_group;
/* Use default IRQ decoding on PCI BUS0 according slot numbering */
irq_group = slot & 0x3;
pin = ((pin - 1) + irq_group) & 0x3;
return priv->irq_map[pin];
}
static int grpci1_cfg_r32(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 *pci_conf, tmp, cfg;
if (where & 0x3)
return -EINVAL;
if (bus == 0) {
devfn += (0x8 * 6); /* start at AD16=Device0 */
} else if (bus == TGT) {
bus = 0;
devfn = 0; /* special case: bridge controller itself */
}
/* Select bus */
cfg = REGLOAD(priv->regs->cfg_stat);
REGSTORE(priv->regs->cfg_stat, (cfg & ~(0xf << 23)) | (bus << 23));
/* do read access */
pci_conf = (u32 *) (priv->pci_conf | (devfn << 8) | (where & 0xfc));
tmp = LEON3_BYPASS_LOAD_PA(pci_conf);
/* check if master abort was received */
if (REGLOAD(priv->regs->cfg_stat) & CFGSTAT_CTO) {
*val = 0xffffffff;
/* Clear Master abort bit in PCI cfg space (is set) */
tmp = REGLOAD(priv->regs->stat_cmd);
grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, tmp);
} else {
/* Bus always little endian (unaffected by byte-swapping) */
*val = flip_dword(tmp);
}
return 0;
}
static int grpci1_cfg_r16(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 v;
int ret;
if (where & 0x1)
return -EINVAL;
ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
*val = 0xffff & (v >> (8 * (where & 0x3)));
return ret;
}
static int grpci1_cfg_r8(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 v;
int ret;
ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
*val = 0xff & (v >> (8 * (where & 3)));
return ret;
}
static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
unsigned int *pci_conf;
u32 cfg;
if (where & 0x3)
return -EINVAL;
if (bus == 0) {
devfn += (0x8 * 6); /* start at AD16=Device0 */
} else if (bus == TGT) {
bus = 0;
devfn = 0; /* special case: bridge controller itself */
}
/* Select bus */
cfg = REGLOAD(priv->regs->cfg_stat);
REGSTORE(priv->regs->cfg_stat, (cfg & ~(0xf << 23)) | (bus << 23));
pci_conf = (unsigned int *) (priv->pci_conf |
(devfn << 8) | (where & 0xfc));
LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
return 0;
}
static int grpci1_cfg_w16(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
int ret;
u32 v;
if (where & 0x1)
return -EINVAL;
ret = grpci1_cfg_r32(priv, bus, devfn, where&~3, &v);
if (ret)
return ret;
v = (v & ~(0xffff << (8 * (where & 0x3)))) |
((0xffff & val) << (8 * (where & 0x3)));
return grpci1_cfg_w32(priv, bus, devfn, where & ~0x3, v);
}
static int grpci1_cfg_w8(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
int ret;
u32 v;
ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
if (ret != 0)
return ret;
v = (v & ~(0xff << (8 * (where & 0x3)))) |
((0xff & val) << (8 * (where & 0x3)));
return grpci1_cfg_w32(priv, bus, devfn, where & ~0x3, v);
}
/* Read from Configuration Space. When entering here the PCI layer has taken
* the pci_lock spinlock and IRQ is off.
*/
static int grpci1_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct grpci1_priv *priv = grpci1priv;
unsigned int busno = bus->number;
int ret;
if (PCI_SLOT(devfn) > 15 || busno > 15) {
*val = ~0;
return 0;
}
switch (size) {
case 1:
ret = grpci1_cfg_r8(priv, busno, devfn, where, val);
break;
case 2:
ret = grpci1_cfg_r16(priv, busno, devfn, where, val);
break;
case 4:
ret = grpci1_cfg_r32(priv, busno, devfn, where, val);
break;
default:
ret = -EINVAL;
break;
}
#ifdef GRPCI1_DEBUG_CFGACCESS
printk(KERN_INFO
"grpci1_read_config: [%02x:%02x:%x] ofs=%d val=%x size=%d\n",
busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, *val, size);
#endif
return ret;
}
/* Write to Configuration Space. When entering here the PCI layer has taken
* the pci_lock spinlock and IRQ is off.
*/
static int grpci1_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct grpci1_priv *priv = grpci1priv;
unsigned int busno = bus->number;
if (PCI_SLOT(devfn) > 15 || busno > 15)
return 0;
#ifdef GRPCI1_DEBUG_CFGACCESS
printk(KERN_INFO
"grpci1_write_config: [%02x:%02x:%x] ofs=%d size=%d val=%x\n",
busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
#endif
switch (size) {
default:
return -EINVAL;
case 1:
return grpci1_cfg_w8(priv, busno, devfn, where, val);
case 2:
return grpci1_cfg_w16(priv, busno, devfn, where, val);
case 4:
return grpci1_cfg_w32(priv, busno, devfn, where, val);
}
}
static struct pci_ops grpci1_ops = {
.read = grpci1_read_config,
.write = grpci1_write_config,
};
/* GENIRQ IRQ chip implementation for grpci1 irqmode=0..2. In configuration
* 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller
* this is not needed and the standard IRQ controller can be used.
*/
static void grpci1_mask_irq(struct irq_data *data)
{
u32 irqidx;
struct grpci1_priv *priv = grpci1priv;
irqidx = (u32)data->chip_data - 1;
if (irqidx > 3) /* only mask PCI interrupts here */
return;
irqidx += IRQ_MASK_BIT;
REGSTORE(priv->regs->irq, REGLOAD(priv->regs->irq) & ~(1 << irqidx));
}
static void grpci1_unmask_irq(struct irq_data *data)
{
u32 irqidx;
struct grpci1_priv *priv = grpci1priv;
irqidx = (u32)data->chip_data - 1;
if (irqidx > 3) /* only unmask PCI interrupts here */
return;
irqidx += IRQ_MASK_BIT;
REGSTORE(priv->regs->irq, REGLOAD(priv->regs->irq) | (1 << irqidx));
}
static unsigned int grpci1_startup_irq(struct irq_data *data)
{
grpci1_unmask_irq(data);
return 0;
}
static void grpci1_shutdown_irq(struct irq_data *data)
{
grpci1_mask_irq(data);
}
static struct irq_chip grpci1_irq = {
.name = "grpci1",
.irq_startup = grpci1_startup_irq,
.irq_shutdown = grpci1_shutdown_irq,
.irq_mask = grpci1_mask_irq,
.irq_unmask = grpci1_unmask_irq,
};
/* Handle one or multiple IRQs from the PCI core */
static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
{
struct grpci1_priv *priv = grpci1priv;
int i, ack = 0;
unsigned int irqreg;
irqreg = REGLOAD(priv->regs->irq);
irqreg = (irqreg >> IRQ_MASK_BIT) & irqreg;
/* Error Interrupt? */
if (irqreg & IRQ_ALL_ERRORS) {
generic_handle_irq(priv->irq_err);
ack = 1;
}
/* PCI Interrupt? */
if (irqreg & IRQ_INTX) {
/* Call respective PCI Interrupt handler */
for (i = 0; i < 4; i++) {
if (irqreg & (1 << i))
generic_handle_irq(priv->irq_map[i]);
}
ack = 1;
}
/*
* Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ
* Controller, this must be done after IRQ sources have been handled to
* avoid double IRQ generation
*/
if (ack)
desc->irq_data.chip->irq_eoi(&desc->irq_data);
}
/* Create a virtual IRQ */
static unsigned int grpci1_build_device_irq(unsigned int irq)
{
unsigned int virq = 0, pil;
pil = 1 << 8;
virq = irq_alloc(irq, pil);
if (virq == 0)
goto out;
irq_set_chip_and_handler_name(virq, &grpci1_irq, handle_simple_irq,
"pcilvl");
irq_set_chip_data(virq, (void *)irq);
out:
return virq;
}
/*
* Initialize mappings AMBA<->PCI, clear IRQ state, setup PCI interface
*
* Target BARs:
* BAR0: unused in this implementation
* BAR1: peripheral DMA to host's memory (size at least 256MByte)
* BAR2..BAR5: not implemented in hardware
*/
void grpci1_hw_init(struct grpci1_priv *priv)
{
u32 ahbadr, bar_sz, data, pciadr;
struct grpci1_regs *regs = priv->regs;
/* set 1:1 mapping between AHB -> PCI memory space */
REGSTORE(regs->cfg_stat, priv->pci_area & 0xf0000000);
/* map PCI accesses to target BAR1 to Linux kernel memory 1:1 */
ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN((unsigned long) &_end));
REGSTORE(regs->page1, ahbadr);
/* translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */
REGSTORE(regs->iomap, REGLOAD(regs->iomap) & 0x0000ffff);
/* disable and clear pending interrupts */
REGSTORE(regs->irq, 0);
/* Setup BAR0 outside access range so that it does not conflict with
* peripheral DMA. There is no need to set up the PAGE0 register.
*/
grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
grpci1_cfg_r32(priv, TGT, 0, PCI_BASE_ADDRESS_0, &bar_sz);
bar_sz = ~bar_sz + 1;
pciadr = priv->pci_area - bar_sz;
grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0, pciadr);
/*
* Setup the Host's PCI Target BAR1 for other peripherals to access,
* and do DMA to the host's memory.
*/
grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_1, ahbadr);
/*
* Setup Latency Timer and cache line size. Default cache line
* size will result in poor performance (256 word fetches), 0xff
* will set it according to the max size of the PCI FIFO.
*/
grpci1_cfg_w8(priv, TGT, 0, PCI_CACHE_LINE_SIZE, 0xff);
grpci1_cfg_w8(priv, TGT, 0, PCI_LATENCY_TIMER, 0x40);
/* set as bus master, enable pci memory responses, clear status bits */
grpci1_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data);
data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, data);
}
static irqreturn_t grpci1_jump_interrupt(int irq, void *arg)
{
struct grpci1_priv *priv = arg;
dev_err(priv->dev, "Jump IRQ happened\n");
return IRQ_NONE;
}
/* Handle GRPCI1 Error Interrupt */
static irqreturn_t grpci1_err_interrupt(int irq, void *arg)
{
struct grpci1_priv *priv = arg;
u32 status;
grpci1_cfg_r16(priv, TGT, 0, PCI_STATUS, &status);
status &= priv->pci_err_mask;
if (status == 0)
return IRQ_NONE;
if (status & PCI_STATUS_PARITY)
dev_err(priv->dev, "Data Parity Error\n");
if (status & PCI_STATUS_SIG_TARGET_ABORT)
dev_err(priv->dev, "Signalled Target Abort\n");
if (status & PCI_STATUS_REC_TARGET_ABORT)
dev_err(priv->dev, "Received Target Abort\n");
if (status & PCI_STATUS_REC_MASTER_ABORT)
dev_err(priv->dev, "Received Master Abort\n");
if (status & PCI_STATUS_SIG_SYSTEM_ERROR)
dev_err(priv->dev, "Signalled System Error\n");
if (status & PCI_STATUS_DETECTED_PARITY)
dev_err(priv->dev, "Parity Error\n");
/* Clear handled INT TYPE IRQs */
grpci1_cfg_w16(priv, TGT, 0, PCI_STATUS, status);
return IRQ_HANDLED;
}
static int grpci1_of_probe(struct platform_device *ofdev)
{
struct grpci1_regs *regs;
struct grpci1_priv *priv;
int err, len;
const int *tmp;
u32 cfg, size, err_mask;
struct resource *res;
if (grpci1priv) {
dev_err(&ofdev->dev, "only one GRPCI1 supported\n");
return -ENODEV;
}
if (ofdev->num_resources < 3) {
dev_err(&ofdev->dev, "not enough APB/AHB resources\n");
return -EIO;
}
priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&ofdev->dev, "memory allocation failed\n");
return -ENOMEM;
}
platform_set_drvdata(ofdev, priv);
priv->dev = &ofdev->dev;
/* find device register base address */
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
regs = devm_request_and_ioremap(&ofdev->dev, res);
if (!regs) {
dev_err(&ofdev->dev, "io-regs mapping failed\n");
return -EADDRNOTAVAIL;
}
/*
* check that we're in Host Slot and that we can act as a Host Bridge
* and not only as target/peripheral.
*/
cfg = REGLOAD(regs->cfg_stat);
if ((cfg & CFGSTAT_HOST) == 0) {
dev_err(&ofdev->dev, "not in host system slot\n");
return -EIO;
}
/* check that BAR1 support 256 MByte so that we can map kernel space */
REGSTORE(regs->page1, 0xffffffff);
size = ~REGLOAD(regs->page1) + 1;
if (size < 0x10000000) {
dev_err(&ofdev->dev, "BAR1 must be at least 256MByte\n");
return -EIO;
}
/* hardware must support little-endian PCI (byte-twisting) */
if ((REGLOAD(regs->page0) & PAGE0_BTEN) == 0) {
dev_err(&ofdev->dev, "byte-twisting is required\n");
return -EIO;
}
priv->regs = regs;
priv->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
dev_info(&ofdev->dev, "host found at 0x%p, irq%d\n", regs, priv->irq);
/* Find PCI Memory, I/O and Configuration Space Windows */
priv->pci_area = ofdev->resource[1].start;
priv->pci_area_end = ofdev->resource[1].end+1;
priv->pci_io = ofdev->resource[2].start;
priv->pci_conf = ofdev->resource[2].start + 0x10000;
priv->pci_conf_end = priv->pci_conf + 0x10000;
priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000);
if (!priv->pci_io_va) {
dev_err(&ofdev->dev, "unable to map PCI I/O area\n");
return -EIO;
}
printk(KERN_INFO
"GRPCI1: MEMORY SPACE [0x%08lx - 0x%08lx]\n"
" I/O SPACE [0x%08lx - 0x%08lx]\n"
" CONFIG SPACE [0x%08lx - 0x%08lx]\n",
priv->pci_area, priv->pci_area_end-1,
priv->pci_io, priv->pci_conf-1,
priv->pci_conf, priv->pci_conf_end-1);
/*
* I/O Space resources in I/O Window mapped into Virtual Adr Space
* We never use low 4KB because some devices seem have problems using
* address 0.
*/
priv->info.io_space.name = "GRPCI1 PCI I/O Space";
priv->info.io_space.start = priv->pci_io_va + 0x1000;
priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1;
priv->info.io_space.flags = IORESOURCE_IO;
/*
* grpci1 has no prefetchable memory, map everything as
* non-prefetchable memory
*/
priv->info.mem_space.name = "GRPCI1 PCI MEM Space";
priv->info.mem_space.start = priv->pci_area;
priv->info.mem_space.end = priv->pci_area_end - 1;
priv->info.mem_space.flags = IORESOURCE_MEM;
if (request_resource(&iomem_resource, &priv->info.mem_space) < 0) {
dev_err(&ofdev->dev, "unable to request PCI memory area\n");
err = -ENOMEM;
goto err1;
}
if (request_resource(&ioport_resource, &priv->info.io_space) < 0) {
dev_err(&ofdev->dev, "unable to request PCI I/O area\n");
err = -ENOMEM;
goto err2;
}
/* setup maximum supported PCI buses */
priv->info.busn.name = "GRPCI1 busn";
priv->info.busn.start = 0;
priv->info.busn.end = 15;
grpci1priv = priv;
/* Initialize hardware */
grpci1_hw_init(priv);
/*
* Get PCI Interrupt to System IRQ mapping and setup IRQ handling
* Error IRQ. All PCI and PCI-Error interrupts are shared using the
* same system IRQ.
*/
leon_update_virq_handling(priv->irq, grpci1_pci_flow_irq, "pcilvl", 0);
priv->irq_map[0] = grpci1_build_device_irq(1);
priv->irq_map[1] = grpci1_build_device_irq(2);
priv->irq_map[2] = grpci1_build_device_irq(3);
priv->irq_map[3] = grpci1_build_device_irq(4);
priv->irq_err = grpci1_build_device_irq(5);
printk(KERN_INFO " PCI INTA..D#: IRQ%d, IRQ%d, IRQ%d, IRQ%d\n",
priv->irq_map[0], priv->irq_map[1], priv->irq_map[2],
priv->irq_map[3]);
/* Enable IRQs on LEON IRQ controller */
err = devm_request_irq(&ofdev->dev, priv->irq, grpci1_jump_interrupt, 0,
"GRPCI1_JUMP", priv);
if (err) {
dev_err(&ofdev->dev, "ERR IRQ request failed: %d\n", err);
goto err3;
}
/* Setup IRQ handler for access errors */
err = devm_request_irq(&ofdev->dev, priv->irq_err,
grpci1_err_interrupt, IRQF_SHARED, "GRPCI1_ERR",
priv);
if (err) {
dev_err(&ofdev->dev, "ERR VIRQ request failed: %d\n", err);
goto err3;
}
tmp = of_get_property(ofdev->dev.of_node, "all_pci_errors", &len);
if (tmp && (len == 4)) {
priv->pci_err_mask = ALL_PCI_ERRORS;
err_mask = IRQ_ALL_ERRORS << IRQ_MASK_BIT;
} else {
priv->pci_err_mask = DEF_PCI_ERRORS;
err_mask = IRQ_DEF_ERRORS << IRQ_MASK_BIT;
}
/*
* Enable Error Interrupts. PCI interrupts are unmasked once request_irq
* is called by the PCI Device drivers
*/
REGSTORE(regs->irq, err_mask);
/* Init common layer and scan buses */
priv->info.ops = &grpci1_ops;
priv->info.map_irq = grpci1_map_irq;
leon_pci_init(ofdev, &priv->info);
return 0;
err3:
release_resource(&priv->info.io_space);
err2:
release_resource(&priv->info.mem_space);
err1:
iounmap((void *)priv->pci_io_va);
grpci1priv = NULL;
return err;
}
static struct of_device_id grpci1_of_match[] = {
{
.name = "GAISLER_PCIFBRG",
},
{
.name = "01_014",
},
{},
};
static struct platform_driver grpci1_of_driver = {
.driver = {
.name = "grpci1",
.owner = THIS_MODULE,
.of_match_table = grpci1_of_match,
},
.probe = grpci1_of_probe,
};
static int __init grpci1_init(void)
{
return platform_driver_register(&grpci1_of_driver);
}
subsys_initcall(grpci1_init);
......@@ -799,6 +799,11 @@ static int grpci2_of_probe(struct platform_device *ofdev)
if (request_resource(&ioport_resource, &priv->info.io_space) < 0)
goto err4;
/* setup maximum supported PCI buses */
priv->info.busn.name = "GRPCI2 busn";
priv->info.busn.start = 0;
priv->info.busn.end = 255;
grpci2_hw_init(priv);
/*
......
......@@ -48,7 +48,7 @@ void pmc_leon_idle_fixup(void)
*/
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
__asm__ __volatile__ (
"mov %%g0, %%asr19\n"
"wr %%g0, %%asr19\n"
"lda [%0] %1, %%g0\n"
:
: "r"(address), "i"(ASI_LEON_BYPASS));
......@@ -61,7 +61,7 @@ void pmc_leon_idle_fixup(void)
void pmc_leon_idle(void)
{
/* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("mov %g0, %asr19\n\t");
__asm__ __volatile__ ("wr %g0, %asr19\n\t");
}
/* Install LEON Power Down function */
......
......@@ -342,6 +342,7 @@ static void vio_remove(struct mdesc_handle *hp, u64 node)
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
device_unregister(dev);
put_device(dev);
}
}
......
......@@ -681,10 +681,9 @@ void get_new_mmu_context(struct mm_struct *mm)
{
unsigned long ctx, new_ctx;
unsigned long orig_pgsz_bits;
unsigned long flags;
int new_version;
spin_lock_irqsave(&ctx_alloc_lock, flags);
spin_lock(&ctx_alloc_lock);
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
......@@ -720,7 +719,7 @@ void get_new_mmu_context(struct mm_struct *mm)
out:
tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
spin_unlock_irqrestore(&ctx_alloc_lock, flags);
spin_unlock(&ctx_alloc_lock);
if (unlikely(new_version))
smp_new_mmu_context_version();
......@@ -2125,7 +2124,6 @@ void free_initmem(void)
ClearPageReserved(p);
init_page_count(p);
__free_page(p);
num_physpages++;
totalram_pages++;
}
}
......@@ -2142,7 +2140,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
ClearPageReserved(p);
init_page_count(p);
__free_page(p);
num_physpages++;
totalram_pages++;
}
}
......
# Makefile for Sparc-specific hibernate files.
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o
/*
* hibernate.c: Hibernaton support specific for sparc64.
*
* Copyright (C) 2013 Kirill V Tkhai (tkhai@yandex.ru)
*/
#include <linux/mm.h>
#include <asm/hibernate.h>
#include <asm/visasm.h>
#include <asm/page.h>
#include <asm/tlb.h>
/* References to section boundaries */
extern const void __nosave_begin, __nosave_end;
struct saved_context saved_context;
/*
* pfn_is_nosave - check if given pfn is in the 'nosave' section
*/
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN((unsigned long)&__nosave_begin);
unsigned long nosave_end_pfn = PFN_DOWN((unsigned long)&__nosave_end);
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
void save_processor_state(void)
{
save_and_clear_fpu();
}
void restore_processor_state(void)
{
struct mm_struct *mm = current->active_mm;
load_secondary_context(mm);
tsb_context_switch(mm);
}
/*
* hibernate_asm.S: Hibernaton support specific for sparc64.
*
* Copyright (C) 2013 Kirill V Tkhai (tkhai@yandex.ru)
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/cpudata.h>
#include <asm/page.h>
ENTRY(swsusp_arch_suspend)
save %sp, -128, %sp
save %sp, -128, %sp
flushw
setuw saved_context, %g3
/* Save window regs */
rdpr %cwp, %g2
stx %g2, [%g3 + SC_REG_CWP]
rdpr %wstate, %g2
stx %g2, [%g3 + SC_REG_WSTATE]
stx %fp, [%g3 + SC_REG_FP]
/* Save state regs */
rdpr %tick, %g2
stx %g2, [%g3 + SC_REG_TICK]
rdpr %pstate, %g2
stx %g2, [%g3 + SC_REG_PSTATE]
/* Save global regs */
stx %g4, [%g3 + SC_REG_G4]
stx %g5, [%g3 + SC_REG_G5]
stx %g6, [%g3 + SC_REG_G6]
call swsusp_save
nop
mov %o0, %i0
restore
mov %o0, %i0
ret
restore
ENTRY(swsusp_arch_resume)
/* Write restore_pblist to %l0 */
sethi %hi(restore_pblist), %l0
ldx [%l0 + %lo(restore_pblist)], %l0
call __flush_tlb_all
nop
/* Write PAGE_OFFSET to %g7 */
sethi %uhi(PAGE_OFFSET), %g7
sllx %g7, 32, %g7
setuw (PAGE_SIZE-8), %g3
/* Use MMU Bypass */
rd %asi, %g1
wr %g0, ASI_PHYS_USE_EC, %asi
ba fill_itlb
nop
pbe_loop:
cmp %l0, %g0
be restore_ctx
sub %l0, %g7, %l0
ldxa [%l0 ] %asi, %l1 /* address */
ldxa [%l0 + 8] %asi, %l2 /* orig_address */
/* phys addr */
sub %l1, %g7, %l1
sub %l2, %g7, %l2
mov %g3, %l3 /* PAGE_SIZE-8 */
copy_loop:
ldxa [%l1 + %l3] ASI_PHYS_USE_EC, %g2
stxa %g2, [%l2 + %l3] ASI_PHYS_USE_EC
cmp %l3, %g0
bne copy_loop
sub %l3, 8, %l3
/* next pbe */
ba pbe_loop
ldxa [%l0 + 16] %asi, %l0
restore_ctx:
setuw saved_context, %g3
/* Restore window regs */
wrpr %g0, 0, %canrestore
wrpr %g0, 0, %otherwin
wrpr %g0, 6, %cansave
wrpr %g0, 0, %cleanwin
ldxa [%g3 + SC_REG_CWP] %asi, %g2
wrpr %g2, %cwp
ldxa [%g3 + SC_REG_WSTATE] %asi, %g2
wrpr %g2, %wstate
ldxa [%g3 + SC_REG_FP] %asi, %fp
/* Restore state regs */
ldxa [%g3 + SC_REG_PSTATE] %asi, %g2
wrpr %g2, %pstate
ldxa [%g3 + SC_REG_TICK] %asi, %g2
wrpr %g2, %tick
/* Restore global regs */
ldxa [%g3 + SC_REG_G4] %asi, %g4
ldxa [%g3 + SC_REG_G5] %asi, %g5
ldxa [%g3 + SC_REG_G6] %asi, %g6
wr %g1, %g0, %asi
restore
restore
wrpr %g0, 14, %pil
retl
mov %g0, %o0
fill_itlb:
ba pbe_loop
wrpr %g0, 15, %pil
......@@ -1592,6 +1592,7 @@ static int __init sunsu_init(void)
static void __exit sunsu_exit(void)
{
platform_driver_unregister(&su_driver);
if (sunsu_reg.nr)
sunserial_unregister_minors(&sunsu_reg, sunsu_reg.nr);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment