Commit b94ce25b authored by Ivan Kokshaysky's avatar Ivan Kokshaysky Committed by Richard Henderson

[PATCH] alpha: lynx support

Forward port of Jay's 2.4 patch.
Also I've cleaned up EISA configury - we only need it for
systems with EISA.

Ivan.
parent d33a9219
......@@ -59,6 +59,7 @@ choice
Jensen DECpc 150, DEC 2000 model 300,
DEC 2000 model 500
LX164 AlphaPC164-LX
Lynx AS 2100A
Miata Personal Workstation 433a, 433au, 500a,
500au, 600a, or 600au
Marvel AlphaServer ES47 / ES80 / GS1280
......@@ -169,6 +170,11 @@ config ALPHA_LX164
A technical overview of this board is available at
<http://www.unix-ag.org/Linux-Alpha/Architectures/LX164.html>.
config ALPHA_LYNX
bool "Lynx"
help
AlphaServer 2100A-based systems.
config ALPHA_MARVEL
bool "Marvel"
help
......@@ -263,22 +269,6 @@ config ISA
(MCA) or VESA. ISA is an older system, now being displaced by PCI;
newer boards don't support it. If you have ISA, say Y, otherwise N.
config EISA
bool
default y
---help---
The Extended Industry Standard Architecture (EISA) bus was
developed as an open alternative to the IBM MicroChannel bus.
The EISA bus provided some of the features of the IBM MicroChannel
bus while maintaining backward compatibility with cards made for
the older ISA bus. The EISA bus saw limited use between 1988 and
1995 when it was made obsolete by the PCI bus.
Say Y here if you are building a kernel for an EISA-based machine.
Otherwise, say N.
config SBUS
bool
......@@ -325,8 +315,8 @@ config ALPHA_NONAME
config ALPHA_EV4
bool
depends on ALPHA_JENSEN || ALPHA_SABLE && !ALPHA_GAMMA || ALPHA_NORITAKE && !ALPHA_PRIMO || ALPHA_MIKASA && !ALPHA_PRIMO || ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P_CH || ALPHA_XL || ALPHA_NONAME || ALPHA_EB66 || ALPHA_EB66P || ALPHA_P2K
default y
depends on ALPHA_JENSEN || (ALPHA_SABLE && !ALPHA_GAMMA) || ALPHA_LYNX || ALPHA_NORITAKE && !ALPHA_PRIMO || ALPHA_MIKASA && !ALPHA_PRIMO || ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P_CH || ALPHA_XL || ALPHA_NONAME || ALPHA_EB66 || ALPHA_EB66P || ALPHA_P2K
default y if !ALPHA_LYNX
config ALPHA_LCA
bool
......@@ -351,9 +341,12 @@ config ALPHA_EB64P
Runs from standard PC power supply.
config ALPHA_EV5
bool "EV5 CPU(s) (model 5/xxx)?" if ALPHA_LYNX
default y if ALPHA_RX164 || ALPHA_RAWHIDE || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_SABLE && ALPHA_GAMMA || ALPHA_NORITAKE && ALPHA_PRIMO || ALPHA_MIKASA && ALPHA_PRIMO || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR
config ALPHA_EV4
bool
depends on ALPHA_RX164 || ALPHA_RAWHIDE || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_SABLE && ALPHA_GAMMA || ALPHA_NORITAKE && ALPHA_PRIMO || ALPHA_MIKASA && ALPHA_PRIMO || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR
default y
default y if ALPHA_LYNX && !ALPHA_EV5
config ALPHA_CIA
bool
......@@ -384,9 +377,14 @@ config ALPHA_GAMMA
help
Say Y if you have an AS 2000 5/xxx or an AS 2100 5/xxx.
config ALPHA_GAMMA
bool
depends on ALPHA_LYNX
default y
config ALPHA_T2
bool
depends on ALPHA_SABLE
depends on ALPHA_SABLE || ALPHA_LYNX
default y
config ALPHA_PYXIS
......@@ -431,9 +429,23 @@ config ALPHA_IRONGATE
depends on ALPHA_NAUTILUS
default y
config ALPHA_AVANTI
bool
depends on ALPHA_XL || ALPHA_AVANTI_CH
default y
help
Avanti AS 200, AS 205, AS 250, AS 255, AS 300, and AS 400-based
Alphas. Info at
<http://www.unix-ag.org/Linux-Alpha/Architectures/Avanti.html>.
config ALPHA_BROKEN_IRQ_MASK
bool
depends on ALPHA_GENERIC || ALPHA_PC164
default y
config ALPHA_SRM
bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME
default y if ALPHA_JENSEN || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_NORITAKE || ALPHA_DP264 || ALPHA_RAWHIDE || ALPHA_EIGER || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_MARVEL
default y if ALPHA_JENSEN || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_DP264 || ALPHA_RAWHIDE || ALPHA_EIGER || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_MARVEL
---help---
There are two different types of booting firmware on Alphas: SRM,
which is command line driven, and ARC, which uses menus and arrow
......@@ -459,28 +471,26 @@ config EARLY_PRINTK
depends on ALPHA_GENERIC || ALPHA_SRM
default y
config ALPHA_EISA
config EISA
bool
depends on ALPHA_ALCOR || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_NORITAKE || ALPHA_RAWHIDE
depends on ALPHA_GENERIC || ALPHA_JENSEN || ALPHA_ALCOR || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_RAWHIDE
default y
---help---
The Extended Industry Standard Architecture (EISA) bus was
developed as an open alternative to the IBM MicroChannel bus.
config ALPHA_AVANTI
bool
depends on ALPHA_XL || ALPHA_AVANTI_CH
default y
help
Avanti AS 200, AS 205, AS 250, AS 255, AS 300, and AS 400-based
Alphas. Info at
<http://www.unix-ag.org/Linux-Alpha/Architectures/Avanti.html>.
The EISA bus provided some of the features of the IBM MicroChannel
bus while maintaining backward compatibility with cards made for
the older ISA bus. The EISA bus saw limited use between 1988 and
1995 when it was made obsolete by the PCI bus.
config ALPHA_BROKEN_IRQ_MASK
bool
depends on ALPHA_GENERIC || ALPHA_PC164
default y
Say Y here if you are building a kernel for an EISA-based machine.
Otherwise, say N.
config SMP
bool "Symmetric multi-processing support"
depends on ALPHA_SABLE || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
......
......@@ -83,6 +83,7 @@ obj-$(CONFIG_ALPHA_RAWHIDE) += sys_rawhide.o irq_i8259.o
obj-$(CONFIG_ALPHA_RUFFIAN) += sys_ruffian.o irq_pyxis.o irq_i8259.o
obj-$(CONFIG_ALPHA_RX164) += sys_rx164.o irq_i8259.o
obj-$(CONFIG_ALPHA_SABLE) += sys_sable.o
obj-$(CONFIG_ALPHA_LYNX) += sys_sable.o
obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
......
......@@ -17,6 +17,7 @@
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/delay.h>
#define __EXTERN_INLINE
#include <asm/io.h>
......@@ -26,6 +27,12 @@
#include "proto.h"
#include "pci_impl.h"
/* For dumping initial DMA window settings. */
#define DEBUG_PRINT_INITIAL_SETTINGS 0
/* For dumping final DMA window settings. */
#define DEBUG_PRINT_FINAL_SETTINGS 0
/*
* By default, we direct-map starting at 2GB, in order to allow the
* maximum size direct-map window (2GB) to match the maximum amount of
......@@ -34,11 +41,23 @@
* ISA DMA, since the maximum ISA DMA address is 2GB-1.
*
* For now, this seems a reasonable trade-off: even though most SABLEs
* have far less than even 1GB of memory, floppy usage/performance will
* not really be affected by forcing it to go via scatter/gather...
* have less than 1GB of memory, floppy usage/performance will not
* really be affected by forcing it to go via scatter/gather...
*/
#define T2_DIRECTMAP_2G 1
#if T2_DIRECTMAP_2G
# define T2_DIRECTMAP_START 0x80000000UL
# define T2_DIRECTMAP_LENGTH 0x80000000UL
#else
# define T2_DIRECTMAP_START 0x40000000UL
# define T2_DIRECTMAP_LENGTH 0x40000000UL
#endif
/* The ISA scatter/gather window settings. */
#define T2_ISA_SG_START 0x00800000UL
#define T2_ISA_SG_LENGTH 0x00800000UL
/*
* NOTE: Herein lie back-to-back mb instructions. They are magic.
* One plausible explanation is that the i/o controller does not properly
......@@ -57,6 +76,24 @@
# define DBG(args)
#endif
static volatile unsigned int t2_mcheck_any_expected;
static volatile unsigned int t2_mcheck_last_taken;
/* Place to save the DMA Window registers as set up by SRM
for restoration during shutdown. */
static struct
{
struct {
unsigned long wbase;
unsigned long wmask;
unsigned long tbase;
} window[2];
unsigned long hae_1;
unsigned long hae_2;
unsigned long hae_3;
unsigned long hae_4;
unsigned long hbase;
} t2_saved_config __attribute((common));
/*
* Given a bus, device, and function number, compute resulting
......@@ -134,42 +171,34 @@ mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
return 0;
}
/*
* NOTE: both conf_read() and conf_write() may set HAE_3 when needing
* to do type1 access. This is protected by the use of spinlock IRQ
* primitives in the wrapper functions pci_{read,write}_config_*()
* defined in drivers/pci/pci.c.
*/
static unsigned int
conf_read(unsigned long addr, unsigned char type1)
{
unsigned long flags;
unsigned int value, cpu;
unsigned int value, cpu, taken;
unsigned long t2_cfg = 0;
cpu = smp_processor_id();
local_irq_save(flags); /* avoid getting hit by machine check */
DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
#if 0
{
unsigned long stat0;
/* Reset status register to avoid losing errors. */
stat0 = *(vulp)T2_IOCSR;
*(vulp)T2_IOCSR = stat0;
mb();
DBG(("conf_read: T2 IOCSR was 0x%x\n", stat0));
}
#endif
/* If Type1 access, must set T2 CFG. */
if (type1) {
t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
*(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg;
mb();
DBG(("conf_read: TYPE1 access\n"));
}
mb();
draina();
mcheck_expected(cpu) = 1;
mcheck_taken(cpu) = 0;
t2_mcheck_any_expected |= (1 << cpu);
mb();
/* Access configuration space. */
......@@ -177,12 +206,20 @@ conf_read(unsigned long addr, unsigned char type1)
mb();
mb(); /* magic */
if (mcheck_taken(cpu)) {
/* Wait for possible mcheck. Also, this lets other CPUs clear
their mchecks as well, as they can reliably tell when
another CPU is in the midst of handling a real mcheck via
the "taken" function. */
udelay(100);
if ((taken = mcheck_taken(cpu))) {
mcheck_taken(cpu) = 0;
t2_mcheck_last_taken |= (1 << cpu);
value = 0xffffffffU;
mb();
}
mcheck_expected(cpu) = 0;
t2_mcheck_any_expected = 0;
mb();
/* If Type1 access, must reset T2 CFG so normal IO space ops work. */
......@@ -190,45 +227,30 @@ conf_read(unsigned long addr, unsigned char type1)
*(vulp)T2_HAE_3 = t2_cfg;
mb();
}
DBG(("conf_read(): finished\n"));
local_irq_restore(flags);
return value;
}
static void
conf_write(unsigned long addr, unsigned int value, unsigned char type1)
{
unsigned long flags;
unsigned int cpu;
unsigned int cpu, taken;
unsigned long t2_cfg = 0;
cpu = smp_processor_id();
local_irq_save(flags); /* avoid getting hit by machine check */
#if 0
{
unsigned long stat0;
/* Reset status register to avoid losing errors. */
stat0 = *(vulp)T2_IOCSR;
*(vulp)T2_IOCSR = stat0;
mb();
DBG(("conf_write: T2 ERR was 0x%x\n", stat0));
}
#endif
/* If Type1 access, must set T2 CFG. */
if (type1) {
t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
*(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL;
mb();
DBG(("conf_write: TYPE1 access\n"));
}
mb();
draina();
mcheck_expected(cpu) = 1;
mcheck_taken(cpu) = 0;
t2_mcheck_any_expected |= (1 << cpu);
mb();
/* Access configuration space. */
......@@ -236,7 +258,19 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1)
mb();
mb(); /* magic */
/* Wait for possible mcheck. Also, this lets other CPUs clear
their mchecks as well, as they can reliably tell when
this CPU is in the midst of handling a real mcheck via
the "taken" function. */
udelay(100);
if ((taken = mcheck_taken(cpu))) {
mcheck_taken(cpu) = 0;
t2_mcheck_last_taken |= (1 << cpu);
mb();
}
mcheck_expected(cpu) = 0;
t2_mcheck_any_expected = 0;
mb();
/* If Type1 access, must reset T2 CFG so normal IO space ops work. */
......@@ -244,8 +278,6 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1)
*(vulp)T2_HAE_3 = t2_cfg;
mb();
}
DBG(("conf_write(): finished\n"));
local_irq_restore(flags);
}
static int
......@@ -290,48 +322,121 @@ struct pci_ops t2_pci_ops =
.write = t2_write_config,
};
static void __init
t2_direct_map_window1(unsigned long base, unsigned long length)
{
unsigned long temp;
__direct_map_base = base;
__direct_map_size = length;
temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
*(vulp)T2_WBASE1 = temp | 0x80000UL; /* OR in ENABLE bit */
temp = (length - 1) & 0xfff00000UL;
*(vulp)T2_WMASK1 = temp;
*(vulp)T2_TBASE1 = 0;
#if DEBUG_PRINT_FINAL_SETTINGS
printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
__FUNCTION__,
*(vulp)T2_WBASE1,
*(vulp)T2_WMASK1,
*(vulp)T2_TBASE1);
#endif
}
static void __init
t2_sg_map_window2(struct pci_controller *hose,
unsigned long base,
unsigned long length)
{
unsigned long temp;
/* Note we can only do 1 SG window, as the other is for direct, so
do an ISA SG area, especially for the floppy. */
hose->sg_isa = iommu_arena_new(hose, base, length, 0);
hose->sg_pci = NULL;
temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
*(vulp)T2_WBASE2 = temp | 0xc0000UL; /* OR in ENABLE/SG bits */
temp = (length - 1) & 0xfff00000UL;
*(vulp)T2_WMASK2 = temp;
*(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1;
mb();
t2_pci_tbi(hose, 0, -1); /* flush TLB all */
#if DEBUG_PRINT_FINAL_SETTINGS
printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
__FUNCTION__,
*(vulp)T2_WBASE2,
*(vulp)T2_WMASK2,
*(vulp)T2_TBASE2);
#endif
}
static void __init
t2_save_configuration(void)
{
#if DEBUG_PRINT_INITIAL_SETTINGS
printk("%s: HAE_1 was 0x%lx\n", __FUNCTION__, srm_hae); /* HW is 0 */
printk("%s: HAE_2 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_2);
printk("%s: HAE_3 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_3);
printk("%s: HAE_4 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_4);
printk("%s: HBASE was 0x%lx\n", __FUNCTION__, *(vulp)T2_HBASE);
printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __FUNCTION__,
*(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __FUNCTION__,
*(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
#endif
/*
* Save the DMA Window registers.
*/
t2_saved_config.window[0].wbase = *(vulp)T2_WBASE1;
t2_saved_config.window[0].wmask = *(vulp)T2_WMASK1;
t2_saved_config.window[0].tbase = *(vulp)T2_TBASE1;
t2_saved_config.window[1].wbase = *(vulp)T2_WBASE2;
t2_saved_config.window[1].wmask = *(vulp)T2_WMASK2;
t2_saved_config.window[1].tbase = *(vulp)T2_TBASE2;
t2_saved_config.hae_1 = srm_hae; /* HW is already set to 0 */
t2_saved_config.hae_2 = *(vulp)T2_HAE_2;
t2_saved_config.hae_3 = *(vulp)T2_HAE_3;
t2_saved_config.hae_4 = *(vulp)T2_HAE_4;
t2_saved_config.hbase = *(vulp)T2_HBASE;
}
void __init
t2_init_arch(void)
{
struct pci_controller *hose;
unsigned long t2_iocsr;
unsigned long temp;
unsigned int i;
for (i = 0; i < NR_CPUS; i++) {
mcheck_expected(i) = 0;
mcheck_taken(i) = 0;
}
#if 0
/* Set up error reporting. */
t2_iocsr = *(vulp)T2_IOCSR;
*(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 7); /* TLB error check */
mb();
*(vulp)T2_IOCSR; /* read it back to make sure */
#endif
t2_mcheck_any_expected = 0;
t2_mcheck_last_taken = 0;
/* Enable scatter/gather TLB use. */
t2_iocsr = *(vulp)T2_IOCSR;
if (!(t2_iocsr & (0x1UL << 26))) {
temp = *(vulp)T2_IOCSR;
if (!(temp & (0x1UL << 26))) {
printk("t2_init_arch: enabling SG TLB, IOCSR was 0x%lx\n",
t2_iocsr);
*(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 26);
temp);
*(vulp)T2_IOCSR = temp | (0x1UL << 26);
mb();
*(vulp)T2_IOCSR; /* read it back to make sure */
}
#if 0
printk("t2_init_arch: HBASE was 0x%lx\n", *(vulp)T2_HBASE);
printk("t2_init_arch: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
*(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
printk("t2_init_arch: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
*(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
#endif
t2_save_configuration();
/*
* Create our single hose.
*/
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
hose->mem_space = &iomem_resource;
......@@ -342,52 +447,51 @@ t2_init_arch(void)
hose->sparse_io_base = T2_IO - IDENT_ADDR;
hose->dense_io_base = 0;
/* Note we can only do 1 SG window, as the other is for direct, so
do an ISA SG area, especially for the floppy. */
hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
hose->sg_pci = NULL;
/*
* Set up the PCI->physical memory translation windows.
*
* Window 1 goes at ? GB and is ?GB large, direct mapped.
* Window 2 goes at 8 MB and is 8MB large, scatter/gather (for ISA).
* Window 1 is direct mapped.
* Window 2 is scatter/gather (for ISA).
*/
#if T2_DIRECTMAP_2G
__direct_map_base = 0x80000000UL;
__direct_map_size = 0x80000000UL;
t2_direct_map_window1(T2_DIRECTMAP_START, T2_DIRECTMAP_LENGTH);
/* WARNING!! must correspond to the direct map window params!!! */
*(vulp)T2_WBASE1 = 0x80080fffU;
*(vulp)T2_WMASK1 = 0x7ff00000U;
*(vulp)T2_TBASE1 = 0;
#else /* T2_DIRECTMAP_2G */
__direct_map_base = 0x40000000UL;
__direct_map_size = 0x40000000UL;
/* Always make an ISA DMA window. */
t2_sg_map_window2(hose, T2_ISA_SG_START, T2_ISA_SG_LENGTH);
/* WARNING!! must correspond to the direct map window params!!! */
*(vulp)T2_WBASE1 = 0x400807ffU;
*(vulp)T2_WMASK1 = 0x3ff00000U;
*(vulp)T2_TBASE1 = 0;
#endif /* T2_DIRECTMAP_2G */
/* WARNING!! must correspond to the SG arena/window params!!! */
*(vulp)T2_WBASE2 = 0x008c000fU;
*(vulp)T2_WMASK2 = 0x00700000U;
*(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1;
*(vulp)T2_HBASE = 0x0;
*(vulp)T2_HBASE = 0x0; /* Disable HOLES. */
/* Zero HAE. */
*(vulp)T2_HAE_1 = 0; mb();
*(vulp)T2_HAE_2 = 0; mb();
*(vulp)T2_HAE_3 = 0; mb();
*(vulp)T2_HAE_1 = 0; mb(); /* Sparse MEM HAE */
*(vulp)T2_HAE_2 = 0; mb(); /* Sparse I/O HAE */
*(vulp)T2_HAE_3 = 0; mb(); /* Config Space HAE */
#if 0
*(vulp)T2_HAE_4 = 0; mb(); /* DO NOT TOUCH THIS!!! */
/* !!! DO NOT EVER TOUCH THIS !!! */
*(vulp)T2_HAE_4 = 0; mb(); /* Dense MEM HAE */
#endif
}
t2_pci_tbi(hose, 0, -1); /* flush TLB all */
void
t2_kill_arch(int mode)
{
/*
* Restore the DMA Window registers.
*/
*(vulp)T2_WBASE1 = t2_saved_config.window[0].wbase;
*(vulp)T2_WMASK1 = t2_saved_config.window[0].wmask;
*(vulp)T2_TBASE1 = t2_saved_config.window[0].tbase;
*(vulp)T2_WBASE2 = t2_saved_config.window[1].wbase;
*(vulp)T2_WMASK2 = t2_saved_config.window[1].wmask;
*(vulp)T2_TBASE2 = t2_saved_config.window[1].tbase;
mb();
*(vulp)T2_HAE_1 = srm_hae;
*(vulp)T2_HAE_2 = t2_saved_config.hae_2;
*(vulp)T2_HAE_3 = t2_saved_config.hae_3;
*(vulp)T2_HAE_4 = t2_saved_config.hae_4;
*(vulp)T2_HBASE = t2_saved_config.hbase;
mb();
*(vulp)T2_HBASE; /* READ it back to ensure WRITE occurred. */
}
void
......@@ -415,13 +519,7 @@ t2_clear_errors(int cpu)
{
struct sable_cpu_csr *cpu_regs;
cpu_regs = (struct sable_cpu_csr *)T2_CPU0_BASE;
if (cpu == 1)
cpu_regs = (struct sable_cpu_csr *)T2_CPU1_BASE;
if (cpu == 2)
cpu_regs = (struct sable_cpu_csr *)T2_CPU2_BASE;
if (cpu == 3)
cpu_regs = (struct sable_cpu_csr *)T2_CPU3_BASE;
cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu);
cpu_regs->sic &= ~SIC_SEIC;
......@@ -438,19 +536,76 @@ t2_clear_errors(int cpu)
mb(); /* magic */
}
/*
* SABLE seems to have a "broadcast" style machine check, in that all
* CPUs receive it. And, the issuing CPU, in the case of PCI Config
* space read/write faults, will also receive a second mcheck, upon
* lowering IPL during completion processing in pci_read_config_byte()
* et al.
*
* Hence all the taken/expected/any_expected/last_taken stuff...
*/
void
t2_machine_check(unsigned long vector, unsigned long la_ptr,
struct pt_regs * regs)
{
int cpu = smp_processor_id();
#if DEBUG_MCHECK > 0
struct el_common *mchk_header = (struct el_common *)la_ptr;
#endif /* DEBUG_MCHECK */
/* Clear the error before any reporting. */
mb();
mb(); /* magic */
draina();
t2_clear_errors(cpu);
wrmces(rdmces()|1); /* ??? */
/* This should not actually be done until the logout frame is
examined, but, since we don't do that, go on and do this... */
wrmces(0x7);
mb();
/* Now, do testing for the anomalous conditions. */
if (!mcheck_expected(cpu) && t2_mcheck_any_expected) {
/*
* FUNKY: Received mcheck on a CPU and not
* expecting it, but another CPU is expecting one.
*
* Just dismiss it for now on this CPU...
*/
#if DEBUG_MCHECK > 0
printk("t2_machine_check(cpu%d): any_expected 0x%x -"
" (assumed) spurious -"
" code 0x%x\n", cpu, t2_mcheck_any_expected,
(unsigned int)mchk_header->code);
#endif /* DEBUG_MCHECK */
return;
}
if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) {
if (t2_mcheck_last_taken & (1 << cpu)) {
#if DEBUG_MCHECK > 0
printk("t2_machine_check(cpu%d): last_taken 0x%x - "
"unexpected mcheck - code 0x%x\n",
cpu, t2_mcheck_last_taken,
(unsigned int)mchk_header->code);
#endif /* DEBUG_MCHECK */
t2_mcheck_last_taken = 0;
mb();
return;
} else {
t2_mcheck_last_taken = 0;
mb();
}
}
#if DEBUG_MCHECK > 0
printk("%s t2_mcheck(cpu%d): last_taken 0x%x - "
"any_expected 0x%x - code 0x%x\n",
(mcheck_expected(cpu) ? "EX" : "UN"), cpu,
t2_mcheck_last_taken, t2_mcheck_any_expected,
(unsigned int)mchk_header->code);
#endif /* DEBUG_MCHECK */
process_mcheck_info(vector, la_ptr, regs, "T2", mcheck_expected(cpu));
}
......@@ -76,6 +76,7 @@ extern void polaris_machine_check(u64, u64, struct pt_regs *);
/* core_t2.c */
extern struct pci_ops t2_pci_ops;
extern void t2_init_arch(void);
extern void t2_kill_arch(int);
extern void t2_machine_check(u64, u64, struct pt_regs *);
extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
......
......@@ -162,6 +162,7 @@ WEAK(eb66p_mv);
WEAK(eiger_mv);
WEAK(jensen_mv);
WEAK(lx164_mv);
WEAK(lynx_mv);
WEAK(marvel_ev7_mv);
WEAK(miata_mv);
WEAK(mikasa_mv);
......@@ -739,7 +740,7 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
NULL, /* Turbolaser */
&avanti_mv,
NULL, /* Mustang */
&alcor_mv, /* Alcor, Bret, Maverick. */
NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
NULL, /* Tradewind */
NULL, /* Mikasa -- see below. */
NULL, /* EB64 */
......@@ -748,7 +749,7 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
&alphabook1_mv,
&rawhide_mv,
NULL, /* K2 */
NULL, /* Lynx */
&lynx_mv, /* Lynx */
&xl_mv,
NULL, /* EB164 -- see variation. */
NULL, /* Noritake -- see below. */
......@@ -930,6 +931,7 @@ get_sysvec_byname(const char *name)
&eiger_mv,
&jensen_mv,
&lx164_mv,
&lynx_mv,
&miata_mv,
&mikasa_mv,
&mikasa_primo_mv,
......
......@@ -5,7 +5,7 @@
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the Sable and Sable-Gamma systems.
* Code supporting the Sable, Sable-Gamma, and Lynx systems.
*/
#include <linux/config.h>
......@@ -31,8 +31,28 @@
#include "pci_impl.h"
#include "machvec_impl.h"
spinlock_t sable_irq_lock = SPIN_LOCK_UNLOCKED;
spinlock_t sable_lynx_irq_lock = SPIN_LOCK_UNLOCKED;
typedef struct irq_swizzle_struct
{
char irq_to_mask[64];
char mask_to_irq[64];
/* Note mask bit is true for DISABLED irqs. */
unsigned long shadow_mask;
void (*update_irq_hw)(unsigned long bit, unsigned long mask);
void (*ack_irq_hw)(unsigned long bit);
} irq_swizzle_t;
static irq_swizzle_t *sable_lynx_irq_swizzle;
static void sable_lynx_init_irq(int nr_irqs);
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE)
/***********************************************************************/
/*
* For SABLE, which is really baroque, we manage 40 IRQ's, but the
* hardware really only supports 24, not via normal ISA PIC,
......@@ -71,30 +91,7 @@ spinlock_t sable_irq_lock = SPIN_LOCK_UNLOCKED;
*23 IIC -
*/
static struct
{
char irq_to_mask[40];
char mask_to_irq[40];
/* Note mask bit is true for DISABLED irqs. */
unsigned long shadow_mask;
} sable_irq_swizzle = {
{
-1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
-1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 0-7 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 8-15 */
2, 1, 0, 4, 5, -1, -1, -1, /* pseudo PCI */
},
{
34, 33, 32, 12, 35, 36, 1, 6, /* mask 0-7 */
3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
},
-1
};
static inline void
static void
sable_update_irq_hw(unsigned long bit, unsigned long mask)
{
int port = 0x537;
......@@ -110,7 +107,7 @@ sable_update_irq_hw(unsigned long bit, unsigned long mask)
outb(mask, port);
}
static inline void
static void
sable_ack_irq_hw(unsigned long bit)
{
int port, val1, val2;
......@@ -133,102 +130,46 @@ sable_ack_irq_hw(unsigned long bit)
outb(val2, 0x534); /* ack the master */
}
static inline void
sable_enable_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_irq_swizzle.irq_to_mask[irq];
spin_lock(&sable_irq_lock);
mask = sable_irq_swizzle.shadow_mask &= ~(1UL << bit);
sable_update_irq_hw(bit, mask);
spin_unlock(&sable_irq_lock);
}
static void
sable_disable_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_irq_swizzle.irq_to_mask[irq];
spin_lock(&sable_irq_lock);
mask = sable_irq_swizzle.shadow_mask |= 1UL << bit;
sable_update_irq_hw(bit, mask);
spin_unlock(&sable_irq_lock);
}
static unsigned int
sable_startup_irq(unsigned int irq)
{
sable_enable_irq(irq);
return 0;
}
static void
sable_end_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
sable_enable_irq(irq);
}
static void
sable_mask_and_ack_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_irq_swizzle.irq_to_mask[irq];
spin_lock(&sable_irq_lock);
mask = sable_irq_swizzle.shadow_mask |= 1UL << bit;
sable_update_irq_hw(bit, mask);
sable_ack_irq_hw(bit);
spin_unlock(&sable_irq_lock);
}
static struct hw_interrupt_type sable_irq_type = {
.typename = "SABLE",
.startup = sable_startup_irq,
.shutdown = sable_disable_irq,
.enable = sable_enable_irq,
.disable = sable_disable_irq,
.ack = sable_mask_and_ack_irq,
.end = sable_end_irq,
static irq_swizzle_t sable_irq_swizzle = {
{
-1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
-1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 0-7 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 8-15 */
2, 1, 0, 4, 5, -1, -1, -1, /* pseudo PCI */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1 /* */
},
{
34, 33, 32, 12, 35, 36, 1, 6, /* mask 0-7 */
3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1 /* */
},
-1,
sable_update_irq_hw,
sable_ack_irq_hw
};
static void
sable_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
/* Note that the vector reported by the SRM PALcode corresponds
to the interrupt mask bits, but we have to manage via more
normal IRQs. */
int bit, irq;
bit = (vector - 0x800) >> 4;
irq = sable_irq_swizzle.mask_to_irq[bit];
handle_irq(irq, regs);
}
static void __init
sable_init_irq(void)
{
long i;
outb(-1, 0x537); /* slave 0 */
outb(-1, 0x53b); /* slave 1 */
outb(-1, 0x53d); /* slave 2 */
outb(0x44, 0x535); /* enable cascades in master */
for (i = 0; i < 40; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
irq_desc[i].handler = &sable_irq_type;
}
common_init_isa_dma();
sable_lynx_irq_swizzle = &sable_irq_swizzle;
sable_lynx_init_irq(40);
}
/*
* PCI Fixup configuration for ALPHA SABLE (2100) - 2100A is different ??
* PCI Fixup configuration for ALPHA SABLE (2100).
*
* The device to slot mapping looks like:
*
......@@ -256,7 +197,7 @@ sable_init_irq(void)
static int __init
sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[9][5] __initdata = {
static char irq_tab[9][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */
{ 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */
......@@ -266,13 +207,349 @@ sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{ -1, -1, -1, -1, -1}, /* IdSel 5, none */
{ 32+2, 32+2, 32+2, 32+2, 32+2}, /* IdSel 6, slot 0 */
{ 32+3, 32+3, 32+3, 32+3, 32+3}, /* IdSel 7, slot 1 */
{ 32+4, 32+4, 32+4, 32+4, 32+4}, /* IdSel 8, slot 2 */
};
{ 32+4, 32+4, 32+4, 32+4, 32+4} /* IdSel 8, slot 2 */
};
long min_idsel = 0, max_idsel = 8, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX)
/***********************************************************************/
/* LYNX hardware specifics
*/
/*
* For LYNX, which is also baroque, we manage 64 IRQs, via a custom IC.
*
* Bit Meaning Kernel IRQ
*------------------------------------------
* 0
* 1
* 2
* 3 mouse 12
* 4
* 5
* 6 keyboard 1
* 7 floppy 6
* 8 COM2 3
* 9 parallel port 7
*10 EISA irq 3 -
*11 EISA irq 4 -
*12 EISA irq 5 5
*13 EISA irq 6 -
*14 EISA irq 7 -
*15 COM1 4
*16 EISA irq 9 9
*17 EISA irq 10 10
*18 EISA irq 11 11
*19 EISA irq 12 -
*20
*21 EISA irq 14 14
*22 EISA irq 15 15
*23 IIC -
*24 VGA (builtin) -
*25
*26
*27
*28 NCR810 (builtin) 28
*29
*30
*31
*32 PCI 0 slot 4 A primary bus 32
*33 PCI 0 slot 4 B primary bus 33
*34 PCI 0 slot 4 C primary bus 34
*35 PCI 0 slot 4 D primary bus
*36 PCI 0 slot 5 A primary bus
*37 PCI 0 slot 5 B primary bus
*38 PCI 0 slot 5 C primary bus
*39 PCI 0 slot 5 D primary bus
*40 PCI 0 slot 6 A primary bus
*41 PCI 0 slot 6 B primary bus
*42 PCI 0 slot 6 C primary bus
*43 PCI 0 slot 6 D primary bus
*44 PCI 0 slot 7 A primary bus
*45 PCI 0 slot 7 B primary bus
*46 PCI 0 slot 7 C primary bus
*47 PCI 0 slot 7 D primary bus
*48 PCI 0 slot 0 A secondary bus
*49 PCI 0 slot 0 B secondary bus
*50 PCI 0 slot 0 C secondary bus
*51 PCI 0 slot 0 D secondary bus
*52 PCI 0 slot 1 A secondary bus
*53 PCI 0 slot 1 B secondary bus
*54 PCI 0 slot 1 C secondary bus
*55 PCI 0 slot 1 D secondary bus
*56 PCI 0 slot 2 A secondary bus
*57 PCI 0 slot 2 B secondary bus
*58 PCI 0 slot 2 C secondary bus
*59 PCI 0 slot 2 D secondary bus
*60 PCI 0 slot 3 A secondary bus
*61 PCI 0 slot 3 B secondary bus
*62 PCI 0 slot 3 C secondary bus
*63 PCI 0 slot 3 D secondary bus
*/
static void
lynx_update_irq_hw(unsigned long bit, unsigned long mask)
{
/*
* Write the AIR register on the T3/T4 with the
* address of the IC mask register (offset 0x40)
*/
*(vulp)T2_AIR = 0x40;
mb();
*(vulp)T2_AIR; /* re-read to force write */
mb();
*(vulp)T2_DIR = mask;
mb();
mb();
}
static void
lynx_ack_irq_hw(unsigned long bit)
{
*(vulp)T2_VAR = (u_long) bit;
mb();
mb();
}
static irq_swizzle_t lynx_irq_swizzle = {
{ /* irq_to_mask */
-1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
-1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo */
-1, -1, -1, -1, 28, -1, -1, -1, /* pseudo */
32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */
40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */
48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */
56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */
},
{ /* mask_to_irq */
-1, -1, -1, 12, -1, -1, 1, 6, /* mask 0-7 */
3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
-1, -1, -1, -1, 28, -1, -1, -1, /* mask 24-31 */
32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */
40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */
48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */
56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */
},
-1,
lynx_update_irq_hw,
lynx_ack_irq_hw
};
static void __init
lynx_init_irq(void)
{
sable_lynx_irq_swizzle = &lynx_irq_swizzle;
sable_lynx_init_irq(64);
}
/*
* PCI Fixup configuration for ALPHA LYNX (2100A)
*
* The device to slot mapping looks like:
*
* Slot Device
* 0 none
* 1 none
* 2 PCI-EISA bridge
* 3 PCI-PCI bridge
* 4 NCR 810 (Demi-Lynx only)
* 5 none
* 6 PCI on board slot 4
* 7 PCI on board slot 5
* 8 PCI on board slot 6
* 9 PCI on board slot 7
*
* And behind the PPB we have:
*
* 11 PCI on board slot 0
* 12 PCI on board slot 1
* 13 PCI on board slot 2
* 14 PCI on board slot 3
*/
/*
* NOTE: the IRQ assignments below are arbitrary, but need to be consistent
* with the values in the irq swizzling tables above.
*/
static int __init
lynx_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[19][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */
{ -1, -1, -1, -1, -1}, /* IdSel 14, PPB */
{ 28, 28, 28, 28, 28}, /* IdSel 15, NCR demi */
{ -1, -1, -1, -1, -1}, /* IdSel 16, none */
{ 32, 32, 33, 34, 35}, /* IdSel 17, slot 4 */
{ 36, 36, 37, 38, 39}, /* IdSel 18, slot 5 */
{ 40, 40, 41, 42, 43}, /* IdSel 19, slot 6 */
{ 44, 44, 45, 46, 47}, /* IdSel 20, slot 7 */
{ -1, -1, -1, -1, -1}, /* IdSel 22, none */
/* The following are actually behind the PPB. */
{ -1, -1, -1, -1, -1}, /* IdSel 16 none */
{ 28, 28, 28, 28, 28}, /* IdSel 17 NCR lynx */
{ -1, -1, -1, -1, -1}, /* IdSel 18 none */
{ -1, -1, -1, -1, -1}, /* IdSel 19 none */
{ -1, -1, -1, -1, -1}, /* IdSel 20 none */
{ -1, -1, -1, -1, -1}, /* IdSel 21 none */
{ 48, 48, 49, 50, 51}, /* IdSel 22 slot 0 */
{ 52, 52, 53, 54, 55}, /* IdSel 23 slot 1 */
{ 56, 56, 57, 58, 59}, /* IdSel 24 slot 2 */
{ 60, 60, 61, 62, 63} /* IdSel 25 slot 3 */
};
const long min_idsel = 2, max_idsel = 20, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static u8 __init
lynx_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
if (dev->bus->number == 0) {
slot = PCI_SLOT(dev->devfn);
}
/* Check for the built-in bridge */
else if (PCI_SLOT(dev->bus->self->devfn) == 3) {
slot = PCI_SLOT(dev->devfn) + 11;
}
else
{
/* Must be a card-based bridge. */
do {
if (PCI_SLOT(dev->bus->self->devfn) == 3) {
slot = PCI_SLOT(dev->devfn) + 11;
break;
}
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
/* Move up the chain of bridges. */
dev = dev->bus->self;
/* Slot of the next bridge. */
slot = PCI_SLOT(dev->devfn);
} while (dev->bus->self);
}
*pinp = pin;
return slot;
}
#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) */
/***********************************************************************/
/* GENERIC irq routines */
static inline void
sable_lynx_enable_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
spin_unlock(&sable_lynx_irq_lock);
#if 0
printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n",
__FUNCTION__, mask, bit, irq);
#endif
}
static void
sable_lynx_disable_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
spin_unlock(&sable_lynx_irq_lock);
#if 0
printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n",
__FUNCTION__, mask, bit, irq);
#endif
}
static unsigned int
sable_lynx_startup_irq(unsigned int irq)
{
sable_lynx_enable_irq(irq);
return 0;
}
static void
sable_lynx_end_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
sable_lynx_enable_irq(irq);
}
static void
sable_lynx_mask_and_ack_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
sable_lynx_irq_swizzle->ack_irq_hw(bit);
spin_unlock(&sable_lynx_irq_lock);
}
static struct hw_interrupt_type sable_lynx_irq_type = {
.typename = "SABLE/LYNX",
.startup = sable_lynx_startup_irq,
.shutdown = sable_lynx_disable_irq,
.enable = sable_lynx_enable_irq,
.disable = sable_lynx_disable_irq,
.ack = sable_lynx_mask_and_ack_irq,
.end = sable_lynx_end_irq,
};
static void
sable_lynx_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
/* Note that the vector reported by the SRM PALcode corresponds
to the interrupt mask bits, but we have to manage via the
so-called legacy IRQs for many common devices. */
int bit, irq;
bit = (vector - 0x800) >> 4;
irq = sable_lynx_irq_swizzle->mask_to_irq[bit];
#if 0
printk("%s: vector 0x%lx bit 0x%x irq 0x%x\n",
__FUNCTION__, vector, bit, irq);
#endif
handle_irq(irq, regs);
}
static void __init
sable_lynx_init_irq(int nr_irqs)
{
long i;
for (i = 0; i < nr_irqs; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
irq_desc[i].handler = &sable_lynx_irq_type;
}
common_init_isa_dma();
}
static void __init
sable_lynx_init_pci(void)
{
common_init_pci();
}
/*****************************************************************/
/*
* The System Vectors
*
......@@ -280,7 +557,8 @@ sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
* these games with GAMMA_BIAS.
*/
#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_GAMMA)
#if defined(CONFIG_ALPHA_GENERIC) || \
(defined(CONFIG_ALPHA_SABLE) && !defined(CONFIG_ALPHA_GAMMA))
#undef GAMMA_BIAS
#define GAMMA_BIAS 0
struct alpha_machine_vector sable_mv __initmv = {
......@@ -295,13 +573,13 @@ struct alpha_machine_vector sable_mv __initmv = {
.min_mem_address = T2_DEFAULT_MEM_BASE,
.nr_irqs = 40,
.device_interrupt = sable_srm_device_interrupt,
.device_interrupt = sable_lynx_srm_device_interrupt,
.init_arch = t2_init_arch,
.init_irq = sable_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = NULL,
.init_pci = sable_lynx_init_pci,
.kill_arch = t2_kill_arch,
.pci_map_irq = sable_map_irq,
.pci_swizzle = common_swizzle,
......@@ -310,9 +588,10 @@ struct alpha_machine_vector sable_mv __initmv = {
} }
};
ALIAS_MV(sable)
#endif
#endif /* GENERIC || (SABLE && !GAMMA) */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_GAMMA)
#if defined(CONFIG_ALPHA_GENERIC) || \
(defined(CONFIG_ALPHA_SABLE) && defined(CONFIG_ALPHA_GAMMA))
#undef GAMMA_BIAS
#define GAMMA_BIAS _GAMMA_BIAS
struct alpha_machine_vector sable_gamma_mv __initmv = {
......@@ -327,12 +606,13 @@ struct alpha_machine_vector sable_gamma_mv __initmv = {
.min_mem_address = T2_DEFAULT_MEM_BASE,
.nr_irqs = 40,
.device_interrupt = sable_srm_device_interrupt,
.device_interrupt = sable_lynx_srm_device_interrupt,
.init_arch = t2_init_arch,
.init_irq = sable_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.init_pci = sable_lynx_init_pci,
.kill_arch = t2_kill_arch,
.pci_map_irq = sable_map_irq,
.pci_swizzle = common_swizzle,
......@@ -341,4 +621,36 @@ struct alpha_machine_vector sable_gamma_mv __initmv = {
} }
};
ALIAS_MV(sable_gamma)
#endif
#endif /* GENERIC || (SABLE && GAMMA) */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX)
#undef GAMMA_BIAS
#define GAMMA_BIAS _GAMMA_BIAS
struct alpha_machine_vector lynx_mv __initmv = {
.vector_name = "Lynx",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_T2_IO,
DO_T2_BUS,
.machine_check = t2_machine_check,
.max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = T2_DEFAULT_MEM_BASE,
.nr_irqs = 64,
.device_interrupt = sable_lynx_srm_device_interrupt,
.init_arch = t2_init_arch,
.init_irq = lynx_init_irq,
.init_rtc = common_init_rtc,
.init_pci = sable_lynx_init_pci,
.kill_arch = t2_kill_arch,
.pci_map_irq = lynx_map_irq,
.pci_swizzle = lynx_swizzle,
.sys = { .t2 = {
.gamma_bias = _GAMMA_BIAS
} }
};
ALIAS_MV(lynx)
#endif /* GENERIC || LYNX */
......@@ -3,8 +3,9 @@
#include <linux/config.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/compiler.h>
#include <asm/system.h>
/*
* T2 is the internal name for the core logic chipset which provides
......@@ -22,6 +23,7 @@
#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
#define _GAMMA_BIAS 0x8000000000UL
#if defined(CONFIG_ALPHA_GENERIC)
......@@ -57,10 +59,33 @@
#define T2_WMASK2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001c0UL)
#define T2_TBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001e0UL)
#define T2_TLBBR (IDENT_ADDR + GAMMA_BIAS + 0x38e000200UL)
#define T2_IVR (IDENT_ADDR + GAMMA_BIAS + 0x38e000220UL)
#define T2_HAE_3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL)
#define T2_HAE_4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL)
/* The CSRs below are T3/T4 only */
#define T2_WBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000280UL)
#define T2_WMASK3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002a0UL)
#define T2_TBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002c0UL)
#define T2_TDR0 (IDENT_ADDR + GAMMA_BIAS + 0x38e000300UL)
#define T2_TDR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000320UL)
#define T2_TDR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000340UL)
#define T2_TDR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000360UL)
#define T2_TDR4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000380UL)
#define T2_TDR5 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003a0UL)
#define T2_TDR6 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003c0UL)
#define T2_TDR7 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003e0UL)
#define T2_WBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000400UL)
#define T2_WMASK4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000420UL)
#define T2_TBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000440UL)
#define T2_AIR (IDENT_ADDR + GAMMA_BIAS + 0x38e000460UL)
#define T2_VAR (IDENT_ADDR + GAMMA_BIAS + 0x38e000480UL)
#define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL)
#define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL)
#define T2_HAE_ADDRESS T2_HAE_1
/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
......@@ -100,6 +125,9 @@
#define T2_CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L)
#define T2_CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L)
#define T2_CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L)
#define T2_CPUn_BASE(n) (T2_CPU0_BASE + (((n)&3) * 0x001000000L))
#define T2_MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L)
#define T2_MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L)
#define T2_MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L)
......@@ -408,87 +436,120 @@ __EXTERN_INLINE void t2_outl(u32 b, unsigned long addr)
set_hae(msb); \
}
static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED;
__EXTERN_INLINE u8 t2_readb(unsigned long addr)
{
unsigned long result, msb;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extbl(result, addr & 3);
}
__EXTERN_INLINE u16 t2_readw(unsigned long addr)
{
unsigned long result, msb;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extwl(result, addr & 3);
}
/* On SABLE with T2, we must use SPARSE memory even for 32-bit access. */
/*
* On SABLE with T2, we must use SPARSE memory even for 32-bit access,
* because we cannot access all of DENSE without changing its HAE.
*/
__EXTERN_INLINE u32 t2_readl(unsigned long addr)
{
unsigned long msb;
unsigned long result, msb;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
return *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
spin_unlock_irqrestore(&t2_hae_lock, flags);
return result & 0xffffffffUL;
}
__EXTERN_INLINE u64 t2_readq(unsigned long addr)
{
unsigned long r0, r1, work, msb;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
r0 = *(vuip)(work);
r1 = *(vuip)(work + (4 << 5));
spin_unlock_irqrestore(&t2_hae_lock, flags);
return r1 << 32 | r0;
}
__EXTERN_INLINE void t2_writeb(u8 b, unsigned long addr)
{
unsigned long msb, w;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writew(u16 b, unsigned long addr)
{
unsigned long msb, w;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
spin_unlock_irqrestore(&t2_hae_lock, flags);
}
/* On SABLE with T2, we must use SPARSE memory even for 32-bit access. */
/*
* On SABLE with T2, we must use SPARSE memory even for 32-bit access,
* because we cannot access all of DENSE without changing its HAE.
*/
__EXTERN_INLINE void t2_writel(u32 b, unsigned long addr)
{
unsigned long msb;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writeq(u64 b, unsigned long addr)
{
unsigned long msb, work;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
*(vuip)work = b;
*(vuip)(work + (4 << 5)) = b >> 32;
spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE unsigned long t2_ioremap(unsigned long addr,
......
......@@ -42,6 +42,7 @@
# define NR_IRQS 40
#elif defined(CONFIG_ALPHA_DP264) || \
defined(CONFIG_ALPHA_LYNX) || \
defined(CONFIG_ALPHA_SHARK) || \
defined(CONFIG_ALPHA_EIGER)
# define NR_IRQS 64
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment