Commit 43c4e96e authored by Linus Torvalds's avatar Linus Torvalds

Import pre2.0.12

parent a0f3dc93
......@@ -34,7 +34,7 @@ Current Releases
- Termcap 2.0.8
- Procps 0.99a
- Gpm 1.09
- SysVinit 2.60
- sysvinit 2.62
- Util-linux 2.5
Upgrade notes
......@@ -164,7 +164,9 @@ or earlier, you will probably get a weird error on shutdown in which
your computer shuts down fine but "INIT: error reading initrequest" or
words to that effect scroll across your screen hundreds of times. To
fix, upgrade to
ftp://ftp.cistron.nl/pub/people/miquels/debian/sysvinit-2.60.tar.gz.
ftp://sunsite.unc.edu/pub/Linux/system/Daemons/init/sysvinit-2.62.tar.gz
ftp://tsx-11.mit.edu/pub/linux/sources/sbin/sysvinit-2.62.tar.gz
ftp://ftp.cistron.nl/pub/people/miquels/software/sysvinit-2.62.tar.gz.
If you're trying to run NCSA httpd, you have to set pre-spawning of
daemons to zero, as it incorrectly assumes SunOS behavior. I recommend
......
......@@ -2,9 +2,10 @@ This is the README file for the Optics Storage 8000 AT CDROM device driver.
This is the driver for the so-called 'DOLPHIN' drive, with the 34-pin
Sony-compatible interface. For the IDE-compatible Optics Storage 8001
drive, you will want the ATAPI CDROM driver. If you have a drive that
works with this driver, and that doesn't report itself as DOLPHIN,
please drop me a mail.
drive, you will want the ATAPI CDROM driver. The driver also seems to
work with the Lasermate CR328A. If you have a drive that works with
this driver, and that doesn't report itself as DOLPHIN, please drop me
a mail.
The support for multisession CDs is in ALPHA stage. If you use it,
please mail me your experiences. Multisession support can be disabled
......
VERSION = 1
PATCHLEVEL = 99
SUBLEVEL = 11
SUBLEVEL = 12
ARCH = i386
......
......@@ -35,6 +35,7 @@ fi
choice 'Alpha system type' \
"Avanti CONFIG_ALPHA_AVANTI \
Alpha-XL CONFIG_ALPHA_XL \
Cabriolet CONFIG_ALPHA_CABRIOLET \
EB66 CONFIG_ALPHA_EB66 \
EB66+ CONFIG_ALPHA_EB66P \
......@@ -43,6 +44,8 @@ choice 'Alpha system type' \
PC164 CONFIG_ALPHA_PC164 \
Jensen CONFIG_ALPHA_JENSEN \
Noname CONFIG_ALPHA_NONAME \
Mikasa CONFIG_ALPHA_MIKASA \
Alcor CONFIG_ALPHA_ALCOR \
Platform2000 CONFIG_ALPHA_P2K" Cabriolet
if [ "$CONFIG_ALPHA_NONAME" = "y" -o "$CONFIG_ALPHA_EB66" = "y" \
-o "$CONFIG_ALPHA_EB66P" = "y" -o "$CONFIG_ALPHA_P2K" = "y" ]
......@@ -50,33 +53,33 @@ then
define_bool CONFIG_PCI y
define_bool CONFIG_ALPHA_LCA y
fi
if [ "$CONFIG_ALPHA_AVANTI" = "y" ]
then
bool 'Is it really a true XL' CONFIG_ALPHA_XL
fi
if [ "$CONFIG_ALPHA_CABRIOLET" = "y" -o "$CONFIG_ALPHA_AVANTI" = "y" \
-o "$CONFIG_ALPHA_EB64P" = "y" ]
-o "$CONFIG_ALPHA_EB64P" = "y" -o "$CONFIG_ALPHA_MIKASA" = "y" \
-o "$CONFIG_ALPHA_XL" = "y" ]
then
define_bool CONFIG_PCI y
define_bool CONFIG_ALPHA_APECS y
fi
if [ "$CONFIG_ALPHA_EB164" = "y" -o "$CONFIG_ALPHA_PC164" = "y" ]
if [ "$CONFIG_ALPHA_EB164" = "y" -o "$CONFIG_ALPHA_PC164" = "y" \
-o "$CONFIG_ALPHA_ALCOR" = "y" ]
then
define_bool CONFIG_PCI y
define_bool CONFIG_ALPHA_EV5 y
define_bool CONFIG_ALPHA_ALCOR y
define_bool CONFIG_ALPHA_CIA y
else
# EV5 and newer supports all rounding modes in hw:
define_bool CONFIG_ALPHA_NEED_ROUNDING_EMULATION y
fi
if [ "$CONFIG_ALPHA_CABRIOLET" = "y" -o "$CONFIG_ALPHA_AVANTI" = "y" \
-o "$CONFIG_ALPHA_EB64P" = "y" -o "$CONFIG_ALPHA_JENSEN" = "y" ]
-o "$CONFIG_ALPHA_EB64P" = "y" -o "$CONFIG_ALPHA_JENSEN" = "y" \
-o "$CONFIG_ALPHA_MIKASA" = "y" -o "$CONFIG_ALPHA_ALCOR" = "y" ]
then
bool 'Using SRM as bootloader' CONFIG_ALPHA_SRM
fi
if [ "$CONFIG_ALPHA_XL" = "y" ]
then
if [ "$CONFIG_ALPHA_XL" = "n" ]
then
bool 'Using SRM as bootloader' CONFIG_ALPHA_SRM
fi
define_bool CONFIG_ALPHA_AVANTI y
fi
bool 'Echo console messages on /dev/ttyS1' CONFIG_SERIAL_ECHO
......
......@@ -16,7 +16,7 @@ all: kernel.o head.o
O_TARGET := kernel.o
O_OBJS := entry.o traps.o process.o osf_sys.o irq.o signal.o setup.o \
bios32.o ptrace.o time.o apecs.o lca.o alcor.o ksyms.o
bios32.o ptrace.o time.o apecs.o lca.o cia.o ksyms.o
all: kernel.o head.o
......
......@@ -519,7 +519,13 @@ void apecs_machine_check(unsigned long vector, unsigned long la_ptr,
* Check if machine check is due to a badaddr() and if so,
* ignore the machine check.
*/
#ifdef CONFIG_ALPHA_MIKASA
/* for now on MIKASA, if it was expected, ignore it */
/* we need the details of the mcheck frame to really know... */
if (apecs_mcheck_expected) {
#else
if (apecs_mcheck_expected && (mchk_sysdata->epic_dcsr && 0x0c00UL)) {
#endif
apecs_mcheck_expected = 0;
apecs_mcheck_taken = 1;
mb();
......
......@@ -87,7 +87,7 @@ extern struct hwrpb_struct *hwrpb;
#if PCI_MODIFY
#if 0
#if defined(CONFIG_ALPHA_MIKASA) || defined(CONFIG_ALPHA_ALCOR)
static unsigned int io_base = 64*KB; /* <64KB are (E)ISA ports */
#else
static unsigned int io_base = 0xb000;
......@@ -116,6 +116,17 @@ static void disable_dev(struct pci_dev *dev)
struct pci_bus *bus;
unsigned short cmd;
#if defined(CONFIG_ALPHA_MIKASA) || defined(CONFIG_ALPHA_ALCOR)
/*
* HACK: the PCI-to-EISA bridge does not seem to identify
* itself as a bridge... :-(
*/
if (dev->vendor == 0x8086 && dev->device == 0x0482) {
DBG_DEVS(("disable_dev: ignoring...\n"));
return;
}
#endif
bus = dev->bus;
pcibios_read_config_word(bus->number, dev->devfn, PCI_COMMAND, &cmd);
......@@ -525,7 +536,13 @@ static inline void common_fixup(long min_idsel, long max_idsel, long irqs_per_sl
* Go through all devices, fixing up irqs as we see fit:
*/
for (dev = pci_devices; dev; dev = dev->next) {
if (dev->class >> 16 != PCI_BASE_CLASS_BRIDGE) {
if (dev->class >> 16 != PCI_BASE_CLASS_BRIDGE
#if defined(CONFIG_ALPHA_MIKASA) || defined(CONFIG_ALPHA_ALCOR)
/* PCEB (PCI to EISA bridge) does not identify
itself as a bridge... :-( */
&& !((dev->vendor==0x8086) && (dev->device==0x482))
#endif
) {
dev->irq = 0;
/*
* This device is not on the primary bus, we need to figure out which
......@@ -554,8 +571,10 @@ static inline void common_fixup(long min_idsel, long max_idsel, long irqs_per_sl
/* work out the slot */
slot = PCI_SLOT(dev->devfn) ;
/* read the pin */
pcibios_read_config_byte(dev->bus->number, dev->devfn,
PCI_INTERRUPT_PIN, &pin);
pcibios_read_config_byte(dev->bus->number,
dev->devfn,
PCI_INTERRUPT_PIN,
&pin);
}
if (irq_tab[slot - min_idsel][pin] != -1)
dev->irq = irq_tab[slot - min_idsel][pin];
......@@ -568,7 +587,8 @@ static inline void common_fixup(long min_idsel, long max_idsel, long irqs_per_sl
* if it's a VGA, enable its BIOS ROM at C0000
*/
if ((dev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
pcibios_write_config_dword(dev->bus->number, dev->devfn,
pcibios_write_config_dword(dev->bus->number,
dev->devfn,
PCI_ROM_ADDRESS,
0x000c0000 | PCI_ROM_ADDRESS_ENABLE);
}
......@@ -749,6 +769,116 @@ static inline void eb66_and_eb64p_fixup(void)
}
/*
* Fixup configuration for MIKASA (NORITAKE is different)
*
* Summary @ 0x536:
* Bit Meaning
* 0 Interrupt Line A from slot 0
* 1 Interrupt Line B from slot 0
* 2 Interrupt Line C from slot 0
* 3 Interrupt Line D from slot 0
* 4 Interrupt Line A from slot 1
* 5 Interrupt line B from slot 1
* 6 Interrupt Line C from slot 1
* 7 Interrupt Line D from slot 1
* 8 Interrupt Line A from slot 2
* 9 Interrupt Line B from slot 2
*10 Interrupt Line C from slot 2
*11 Interrupt Line D from slot 2
*12 NCR 810 SCSI
*13 Power Supply Fail
*14 Temperature Warn
*15 Reserved
*
* The device to slot mapping looks like:
*
* Slot Device
* 6 NCR SCSI controller
* 7 Intel PCI-EISA bridge chip
* 11 PCI on board slot 0
* 12 PCI on board slot 1
* 13 PCI on board slot 2
*
*
* This two layered interrupt approach means that we allocate IRQ 16 and
* above for PCI interrupts. The IRQ relates to which bit the interrupt
* comes in on. This makes interrupt processing much easier.
*/
static inline void mikasa_fixup(void)
{
char irq_tab[8][5] = {
/*INT INTA INTB INTC INTD */
{16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */
{ -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */
{ -1, -1, -1, -1, -1}, /* IdSel 19, ???? */
{ -1, -1, -1, -1, -1}, /* IdSel 20, ???? */
{ -1, -1, -1, -1, -1}, /* IdSel 21, ???? */
{ 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 0 */
{ 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */
{ 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 24, slot 2 */
};
common_fixup(6, 13, 5, irq_tab, 0);
}
/*
* Fixup configuration for ALCOR
*
* Summary @ GRU_INT_REQ:
* Bit Meaning
* 0 Interrupt Line A from slot 2
* 1 Interrupt Line B from slot 2
* 2 Interrupt Line C from slot 2
* 3 Interrupt Line D from slot 2
* 4 Interrupt Line A from slot 1
* 5 Interrupt line B from slot 1
* 6 Interrupt Line C from slot 1
* 7 Interrupt Line D from slot 1
* 8 Interrupt Line A from slot 0
* 9 Interrupt Line B from slot 0
*10 Interrupt Line C from slot 0
*11 Interrupt Line D from slot 0
*12 Interrupt Line A from slot 4
*13 Interrupt Line B from slot 4
*14 Interrupt Line C from slot 4
*15 Interrupt Line D from slot 4
*16 Interrupt Line D from slot 3
*17 Interrupt Line D from slot 3
*18 Interrupt Line D from slot 3
*19 Interrupt Line D from slot 3
*20-30 Reserved
*31 EISA interrupt
*
* The device to slot mapping looks like:
*
* Slot Device
* 7 PCI on board slot 0
* 8 PCI on board slot 3
* 9 PCI on board slot 4
* 10 PCEB (PCI-EISA bridge)
* 11 PCI on board slot 2
* 12 PCI on board slot 1
*
*
* This two layered interrupt approach means that we allocate IRQ 16 and
* above for PCI interrupts. The IRQ relates to which bit the interrupt
* comes in on. This makes interrupt processing much easier.
*/
static inline void alcor_fixup(void)
{
char irq_tab[6][5] = {
/*INT INTA INTB INTC INTD */
{ 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */
{16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 19, slot 3 */
{16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 20, slot 4 */
{ -1, -1, -1, -1, -1}, /* IdSel 21, PCEB */
{ 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */
{ 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */
};
common_fixup(7, 12, 5, irq_tab, 0);
}
/*
* Fixup configuration for all boards that route the PCI interrupts
* through the SIO PCI/ISA bridge. This includes Noname (AXPpci33),
......@@ -929,6 +1059,10 @@ unsigned long pcibios_fixup(unsigned long mem_start, unsigned long mem_end)
eb66_and_eb64p_fixup();
#elif defined(CONFIG_ALPHA_EB64P)
eb66_and_eb64p_fixup();
#elif defined(CONFIG_ALPHA_MIKASA)
mikasa_fixup();
#elif defined(CONFIG_ALPHA_ALCOR)
alcor_fixup();
#else
# error You must tell me what kind of platform you want.
#endif
......
/*
* Code common to all ALCOR chips.
* Code common to all CIA chips.
*
* Written by David A Rusling (david.rusling@reo.mts.dec.com).
* December 1995.
......@@ -25,7 +25,7 @@ extern int alpha_sys_type;
* BIOS32-style PCI interface:
*/
#ifdef CONFIG_ALPHA_ALCOR
#ifdef CONFIG_ALPHA_CIA
#ifdef DEBUG
# define DBG(args) printk args
......@@ -36,14 +36,14 @@ extern int alpha_sys_type;
#define vulp volatile unsigned long *
#define vuip volatile unsigned int *
static volatile unsigned int ALCOR_mcheck_expected = 0;
static volatile unsigned int ALCOR_mcheck_taken = 0;
static unsigned long ALCOR_jd, ALCOR_jd1, ALCOR_jd2;
static volatile unsigned int CIA_mcheck_expected = 0;
static volatile unsigned int CIA_mcheck_taken = 0;
static unsigned long CIA_jd, CIA_jd1, CIA_jd2;
/*
* Given a bus, device, and function number, compute resulting
* configuration space address and setup the ALCOR_HAXR2 register
* configuration space address and setup the CIA_HAXR2 register
* accordingly. It is therefore not safe to have concurrent
* invocations to configuration space access routines, but there
* really shouldn't be any need for this.
......@@ -126,32 +126,32 @@ static unsigned int conf_read(unsigned long addr, unsigned char type1)
DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
/* reset status register to avoid losing errors: */
stat0 = *((volatile unsigned int *)ALCOR_IOC_CIA_ERR);
*((volatile unsigned int *)ALCOR_IOC_CIA_ERR) = stat0;
stat0 = *((volatile unsigned int *)CIA_IOC_CIA_ERR);
*((volatile unsigned int *)CIA_IOC_CIA_ERR) = stat0;
mb();
DBG(("conf_read: ALCOR CIA ERR was 0x%x\n", stat0));
DBG(("conf_read: CIA ERR was 0x%x\n", stat0));
/* if Type1 access, must set CIA CFG */
if (type1) {
cia_cfg = *((unsigned int *)ALCOR_IOC_CFG);
cia_cfg = *((unsigned int *)CIA_IOC_CFG);
mb();
*((unsigned int *)ALCOR_IOC_CFG) = cia_cfg | 1;
*((unsigned int *)CIA_IOC_CFG) = cia_cfg | 1;
DBG(("conf_read: TYPE1 access\n"));
}
draina();
ALCOR_mcheck_expected = 1;
ALCOR_mcheck_taken = 0;
CIA_mcheck_expected = 1;
CIA_mcheck_taken = 0;
mb();
/* access configuration space: */
value = *((volatile unsigned int *)addr);
mb();
mb();
if (ALCOR_mcheck_taken) {
ALCOR_mcheck_taken = 0;
if (CIA_mcheck_taken) {
CIA_mcheck_taken = 0;
value = 0xffffffffU;
mb();
}
ALCOR_mcheck_expected = 0;
CIA_mcheck_expected = 0;
mb();
/*
* david.rusling@reo.mts.dec.com. This code is needed for the
......@@ -163,16 +163,16 @@ static unsigned int conf_read(unsigned long addr, unsigned char type1)
draina();
/* now look for any errors */
stat0 = *((unsigned int *)ALCOR_IOC_CIA_ERR);
DBG(("conf_read: ALCOR CIA ERR after read 0x%x\n", stat0));
stat0 = *((unsigned int *)CIA_IOC_CIA_ERR);
DBG(("conf_read: CIA ERR after read 0x%x\n", stat0));
if (stat0 & 0x8280U) { /* is any error bit set? */
/* if not NDEV, print status */
if (!(stat0 & 0x0080)) {
printk("ALCOR.c:conf_read: got stat0=%x\n", stat0);
printk("CIA.c:conf_read: got stat0=%x\n", stat0);
}
/* reset error status: */
*((volatile unsigned long *)ALCOR_IOC_CIA_ERR) = stat0;
*((volatile unsigned long *)CIA_IOC_CIA_ERR) = stat0;
mb();
wrmces(0x7); /* reset machine check */
value = 0xffffffff;
......@@ -181,7 +181,7 @@ static unsigned int conf_read(unsigned long addr, unsigned char type1)
/* if Type1 access, must reset IOC CFG so normal IO space ops work */
if (type1) {
*((unsigned int *)ALCOR_IOC_CFG) = cia_cfg & ~1;
*((unsigned int *)CIA_IOC_CFG) = cia_cfg & ~1;
mb();
}
......@@ -202,26 +202,26 @@ static void conf_write(unsigned long addr, unsigned int value, unsigned char typ
cli();
/* reset status register to avoid losing errors: */
stat0 = *((volatile unsigned int *)ALCOR_IOC_CIA_ERR);
*((volatile unsigned int *)ALCOR_IOC_CIA_ERR) = stat0;
stat0 = *((volatile unsigned int *)CIA_IOC_CIA_ERR);
*((volatile unsigned int *)CIA_IOC_CIA_ERR) = stat0;
mb();
DBG(("conf_write: ALCOR CIA ERR was 0x%x\n", stat0));
DBG(("conf_write: CIA ERR was 0x%x\n", stat0));
/* if Type1 access, must set CIA CFG */
if (type1) {
cia_cfg = *((unsigned int *)ALCOR_IOC_CFG);
cia_cfg = *((unsigned int *)CIA_IOC_CFG);
mb();
*((unsigned int *)ALCOR_IOC_CFG) = cia_cfg | 1;
*((unsigned int *)CIA_IOC_CFG) = cia_cfg | 1;
DBG(("conf_read: TYPE1 access\n"));
}
draina();
ALCOR_mcheck_expected = 1;
CIA_mcheck_expected = 1;
mb();
/* access configuration space: */
*((volatile unsigned int *)addr) = value;
mb();
mb();
ALCOR_mcheck_expected = 0;
CIA_mcheck_expected = 0;
mb();
/*
* david.rusling@reo.mts.dec.com. This code is needed for the
......@@ -233,16 +233,16 @@ static void conf_write(unsigned long addr, unsigned int value, unsigned char typ
draina();
/* now look for any errors */
stat0 = *((unsigned int *)ALCOR_IOC_CIA_ERR);
DBG(("conf_write: ALCOR CIA ERR after write 0x%x\n", stat0));
stat0 = *((unsigned int *)CIA_IOC_CIA_ERR);
DBG(("conf_write: CIA ERR after write 0x%x\n", stat0));
if (stat0 & 0x8280U) { /* is any error bit set? */
/* if not NDEV, print status */
if (!(stat0 & 0x0080)) {
printk("ALCOR.c:conf_read: got stat0=%x\n", stat0);
printk("CIA.c:conf_read: got stat0=%x\n", stat0);
}
/* reset error status: */
*((volatile unsigned long *)ALCOR_IOC_CIA_ERR) = stat0;
*((volatile unsigned long *)CIA_IOC_CIA_ERR) = stat0;
mb();
wrmces(0x7); /* reset machine check */
value = 0xffffffff;
......@@ -251,7 +251,7 @@ static void conf_write(unsigned long addr, unsigned int value, unsigned char typ
/* if Type1 access, must reset IOC CFG so normal IO space ops work */
if (type1) {
*((unsigned int *)ALCOR_IOC_CFG) = cia_cfg & ~1;
*((unsigned int *)CIA_IOC_CFG) = cia_cfg & ~1;
mb();
}
......@@ -263,7 +263,7 @@ static void conf_write(unsigned long addr, unsigned int value, unsigned char typ
int pcibios_read_config_byte (unsigned char bus, unsigned char device_fn,
unsigned char where, unsigned char *value)
{
unsigned long addr = ALCOR_CONF;
unsigned long addr = CIA_CONF;
unsigned long pci_addr;
unsigned char type1;
......@@ -284,7 +284,7 @@ int pcibios_read_config_byte (unsigned char bus, unsigned char device_fn,
int pcibios_read_config_word (unsigned char bus, unsigned char device_fn,
unsigned char where, unsigned short *value)
{
unsigned long addr = ALCOR_CONF;
unsigned long addr = CIA_CONF;
unsigned long pci_addr;
unsigned char type1;
......@@ -308,7 +308,7 @@ int pcibios_read_config_word (unsigned char bus, unsigned char device_fn,
int pcibios_read_config_dword (unsigned char bus, unsigned char device_fn,
unsigned char where, unsigned int *value)
{
unsigned long addr = ALCOR_CONF;
unsigned long addr = CIA_CONF;
unsigned long pci_addr;
unsigned char type1;
......@@ -329,7 +329,7 @@ int pcibios_read_config_dword (unsigned char bus, unsigned char device_fn,
int pcibios_write_config_byte (unsigned char bus, unsigned char device_fn,
unsigned char where, unsigned char value)
{
unsigned long addr = ALCOR_CONF;
unsigned long addr = CIA_CONF;
unsigned long pci_addr;
unsigned char type1;
......@@ -345,7 +345,7 @@ int pcibios_write_config_byte (unsigned char bus, unsigned char device_fn,
int pcibios_write_config_word (unsigned char bus, unsigned char device_fn,
unsigned char where, unsigned short value)
{
unsigned long addr = ALCOR_CONF;
unsigned long addr = CIA_CONF;
unsigned long pci_addr;
unsigned char type1;
......@@ -361,7 +361,7 @@ int pcibios_write_config_word (unsigned char bus, unsigned char device_fn,
int pcibios_write_config_dword (unsigned char bus, unsigned char device_fn,
unsigned char where, unsigned int value)
{
unsigned long addr = ALCOR_CONF;
unsigned long addr = CIA_CONF;
unsigned long pci_addr;
unsigned char type1;
......@@ -374,16 +374,16 @@ int pcibios_write_config_dword (unsigned char bus, unsigned char device_fn,
}
unsigned long alcor_init(unsigned long mem_start, unsigned long mem_end)
unsigned long cia_init(unsigned long mem_start, unsigned long mem_end)
{
unsigned int cia_err ;
/*
* Set up error reporting.
*/
cia_err = *(vuip)ALCOR_IOC_CIA_ERR ;
cia_err = *(vuip)CIA_IOC_CIA_ERR ;
cia_err |= (0x1 << 7) ; /* master abort */
*(vuip)ALCOR_IOC_CIA_ERR = cia_err ;
*(vuip)CIA_IOC_CIA_ERR = cia_err ;
mb() ;
/*
......@@ -393,19 +393,19 @@ unsigned long alcor_init(unsigned long mem_start, unsigned long mem_end)
* goes at 1 GB and is 1 GB large.
*/
*(vuip)ALCOR_IOC_PCI_W0_BASE = 1U | (ALCOR_DMA_WIN_BASE & 0xfff00000U);
*(vuip)ALCOR_IOC_PCI_W0_MASK = (ALCOR_DMA_WIN_SIZE - 1) & 0xfff00000U;
*(vuip)ALCOR_IOC_PCI_T0_BASE = 0;
*(vuip)CIA_IOC_PCI_W0_BASE = 1U | (CIA_DMA_WIN_BASE & 0xfff00000U);
*(vuip)CIA_IOC_PCI_W0_MASK = (CIA_DMA_WIN_SIZE - 1) & 0xfff00000U;
*(vuip)CIA_IOC_PCI_T0_BASE = 0;
*(vuip)ALCOR_IOC_PCI_W1_BASE = 0x0 ;
*(vuip)ALCOR_IOC_PCI_W2_BASE = 0x0 ;
*(vuip)ALCOR_IOC_PCI_W3_BASE = 0x0 ;
*(vuip)CIA_IOC_PCI_W1_BASE = 0x0 ;
*(vuip)CIA_IOC_PCI_W2_BASE = 0x0 ;
*(vuip)CIA_IOC_PCI_W3_BASE = 0x0 ;
/*
* check ASN in HWRPB for validity, report if bad
*/
if (hwrpb->max_asn != MAX_ASN) {
printk("alcor_init: max ASN from HWRPB is bad (0x%lx)\n",
printk("CIA_init: max ASN from HWRPB is bad (0x%lx)\n",
hwrpb->max_asn);
hwrpb->max_asn = MAX_ASN;
}
......@@ -418,43 +418,43 @@ unsigned long alcor_init(unsigned long mem_start, unsigned long mem_end)
*/
{
#if 0
unsigned int cia_cfg = *((unsigned int *)ALCOR_IOC_CFG); mb();
if (cia_cfg) printk("alcor_init: CFG was 0x%x\n", cia_cfg);
unsigned int cia_cfg = *((unsigned int *)CIA_IOC_CFG); mb();
if (cia_cfg) printk("CIA_init: CFG was 0x%x\n", cia_cfg);
#endif
*((unsigned int *)ALCOR_IOC_CFG) = 0; mb();
*((unsigned int *)CIA_IOC_CFG) = 0; mb();
}
return mem_start;
}
int ALCOR_pci_clr_err(void)
int cia_pci_clr_err(void)
{
ALCOR_jd = *((unsigned int *)ALCOR_IOC_CIA_ERR);
DBG(("ALCOR_pci_clr_err: ALCOR CIA ERR after read 0x%x\n", ALCOR_jd));
*((unsigned long *)ALCOR_IOC_CIA_ERR) = 0x0080;
CIA_jd = *((unsigned int *)CIA_IOC_CIA_ERR);
DBG(("CIA_pci_clr_err: CIA ERR after read 0x%x\n", CIA_jd));
*((unsigned long *)CIA_IOC_CIA_ERR) = 0x0080;
mb();
return 0;
}
void alcor_machine_check(unsigned long vector, unsigned long la_ptr,
void cia_machine_check(unsigned long vector, unsigned long la_ptr,
struct pt_regs * regs)
{
#if 1
printk("ALCOR machine check\n") ;
printk("CIA machine check\n") ;
#else
struct el_common *mchk_header;
struct el_ALCOR_sysdata_mcheck *mchk_sysdata;
struct el_CIA_sysdata_mcheck *mchk_sysdata;
mchk_header = (struct el_common *)la_ptr;
mchk_sysdata =
(struct el_ALCOR_sysdata_mcheck *)(la_ptr + mchk_header->sys_offset);
(struct el_CIA_sysdata_mcheck *)(la_ptr + mchk_header->sys_offset);
DBG(("ALCOR_machine_check: vector=0x%lx la_ptr=0x%lx\n", vector, la_ptr));
DBG(("cia_machine_check: vector=0x%lx la_ptr=0x%lx\n", vector, la_ptr));
DBG((" pc=0x%lx size=0x%x procoffset=0x%x sysoffset 0x%x\n",
regs->pc, mchk_header->size, mchk_header->proc_offset, mchk_header->sys_offset));
DBG(("ALCOR_machine_check: expected %d DCSR 0x%lx PEAR 0x%lx\n",
ALCOR_mcheck_expected, mchk_sysdata->epic_dcsr, mchk_sysdata->epic_pear));
DBG(("cia_machine_check: expected %d DCSR 0x%lx PEAR 0x%lx\n",
CIA_mcheck_expected, mchk_sysdata->epic_dcsr, mchk_sysdata->epic_pear));
#ifdef DEBUG
{
unsigned long *ptr;
......@@ -470,12 +470,12 @@ void alcor_machine_check(unsigned long vector, unsigned long la_ptr,
* Check if machine check is due to a badaddr() and if so,
* ignore the machine check.
*/
if (ALCOR_mcheck_expected && (mchk_sysdata->epic_dcsr && 0x0c00UL)) {
ALCOR_mcheck_expected = 0;
ALCOR_mcheck_taken = 1;
if (CIA_mcheck_expected && (mchk_sysdata->epic_dcsr && 0x0c00UL)) {
CIA_mcheck_expected = 0;
CIA_mcheck_taken = 1;
mb();
mb();
ALCOR_pci_clr_err();
cia_pci_clr_err();
wrmces(0x7);
mb();
draina();
......@@ -483,4 +483,4 @@ void alcor_machine_check(unsigned long vector, unsigned long la_ptr,
#endif
}
#endif /* CONFIG_ALPHA_ALCOR */
#endif /* CONFIG_ALPHA_CIA */
......@@ -31,12 +31,18 @@ extern void timer_interrupt(struct pt_regs * regs);
static unsigned char cache_21 = 0xff;
static unsigned char cache_A1 = 0xff;
#if NR_IRQS == 33
#if NR_IRQS == 48
static unsigned int cache_irq_mask = 0x7fffffff; /* enable EISA */
#elif NR_IRQS == 33
static unsigned int cache_804 = 0x00ffffef;
#elif NR_IRQS == 32
#ifdef CONFIG_ALPHA_MIKASA
static unsigned short cache_536 = 0xffff;
#else
static unsigned char cache_26 = 0xdf;
static unsigned char cache_27 = 0xff;
#endif
#endif
static void mask_irq(int irq)
{
......@@ -51,12 +57,23 @@ static void mask_irq(int irq)
cache_A1 |= mask;
outb(cache_A1, 0xA1);
}
#if NR_IRQS == 33
#if NR_IRQS == 48
} else {
mask = 1 << (irq - 16);
cache_irq_mask |= mask;
*(unsigned int *)GRU_INT_MASK = ~cache_irq_mask; /* invert */
#elif NR_IRQS == 33
} else {
mask = 1 << (irq - 16);
cache_804 |= mask;
outl(cache_804, 0x804);
#elif NR_IRQS == 32
#ifdef CONFIG_ALPHA_MIKASA
} else {
mask = 1 << (irq & 15);
cache_536 |= mask;
outw(~cache_536, 0x536); /* note invert */
#else
} else {
mask = 1 << (irq & 7);
if (irq < 24) {
......@@ -66,6 +83,7 @@ static void mask_irq(int irq)
cache_27 |= mask;
outb(cache_27, 0x27);
}
#endif
#endif
}
}
......@@ -83,12 +101,23 @@ static void unmask_irq(unsigned long irq)
cache_A1 &= mask;
outb(cache_A1, 0xA1);
}
#if NR_IRQS == 33
#if NR_IRQS == 48
} else {
mask = ~(1 << (irq - 16));
cache_irq_mask &= mask;
*(unsigned int *)GRU_INT_MASK = ~cache_irq_mask; /* invert */
#elif NR_IRQS == 33
} else {
mask = ~(1 << (irq - 16));
cache_804 &= mask;
outl(cache_804, 0x804);
#elif NR_IRQS == 32
#ifdef CONFIG_ALPHA_MIKASA
} else {
mask = ~(1 << (irq & 15));
cache_536 &= mask;
outw(~cache_536, 0x536); /* note invert */
#else
} else {
mask = ~(1 << (irq & 7));
......@@ -99,6 +128,7 @@ static void unmask_irq(unsigned long irq)
cache_27 &= mask;
outb(cache_27, 0x27);
}
#endif
#endif
}
}
......@@ -115,9 +145,8 @@ void disable_irq(unsigned int irq_nr)
void enable_irq(unsigned int irq_nr)
{
unsigned long flags, mask;
unsigned long flags;
mask = ~(1 << (irq_nr & 7));
save_flags(flags);
cli();
unmask_irq(irq_nr);
......@@ -199,7 +228,8 @@ int request_irq(unsigned int irq,
shared = 1;
}
action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
GFP_KERNEL);
if (!action)
return -ENOMEM;
......@@ -352,8 +382,8 @@ static inline void isa_device_interrupt(unsigned long vector,
# define IACK_SC APECS_IACK_SC
#elif defined(CONFIG_ALPHA_LCA)
# define IACK_SC LCA_IACK_SC
#elif defined(CONFIG_ALPHA_ALCOR)
# define IACK_SC ALCOR_IACK_SC
#elif defined(CONFIG_ALPHA_CIA)
# define IACK_SC CIA_IACK_SC
#else
/*
* This is bogus but necessary to get it to compile
......@@ -559,11 +589,17 @@ unsigned long probe_irq_on(void)
/* now filter out any obviously spurious interrupts */
irqmask = (((unsigned long)cache_A1)<<8) | (unsigned long) cache_21;
#if NR_IRQS == 33
#if NR_IRQS == 48
irqmask |= (unsigned long) cache_irq_mask << 16;
#elif NR_IRQS == 33
irqmask |= (unsigned long) cache_804 << 16;
#elif NR_IRQS == 32
#ifdef CONFIG_ALPHA_MIKASA
irqmask |= (unsigned long) cache_536 << 16;
#else
irqmask |= ((((unsigned long)cache_26)<<16) |
(((unsigned long)cache_27)<<24));
#endif
#endif
irqs &= ~irqmask;
return irqs;
......@@ -580,11 +616,17 @@ int probe_irq_off(unsigned long irqs)
int i;
irqmask = (((unsigned int)cache_A1)<<8) | (unsigned int)cache_21;
#if NR_IRQS == 33
#if NR_IRQS == 48
irqmask |= (unsigned long) cache_irq_mask << 16;
#elif NR_IRQS == 33
irqmask |= (unsigned long) cache_804 << 16;
#elif NR_IRQS == 32
#ifdef CONFIG_ALPHA_MIKASA
irqmask |= (unsigned long) cache_536 << 16;
#else
irqmask |= ((((unsigned long)cache_26)<<16) |
(((unsigned long)cache_27)<<24));
#endif
#endif
irqs &= irqmask & ~1; /* always mask out irq 0---it's the unused timer */
#ifdef CONFIG_ALPHA_P2K
......@@ -608,10 +650,10 @@ static void machine_check(unsigned long vector, unsigned long la, struct pt_regs
extern void apecs_machine_check(unsigned long vector, unsigned long la,
struct pt_regs * regs);
apecs_machine_check(vector, la, regs);
#elif defined(CONFIG_ALPHA_ALCOR)
extern void alcor_machine_check(unsigned long vector, unsigned long la,
#elif defined(CONFIG_ALPHA_CIA)
extern void cia_machine_check(unsigned long vector, unsigned long la,
struct pt_regs * regs);
alcor_machine_check(vector, la, regs);
cia_machine_check(vector, la, regs);
#else
printk("Machine check\n");
#endif
......@@ -638,7 +680,11 @@ asmlinkage void do_entInt(unsigned long type, unsigned long vector, unsigned lon
#elif NR_IRQS == 33
cabriolet_and_eb66p_device_interrupt(vector, &regs);
#elif NR_IRQS == 32
#ifdef CONFIG_ALPHA_MIKASA
# error we got a problem here Charlie MIKASA should be SRM console
#else
eb66_and_eb64p_device_interrupt(vector, &regs);
#endif
#elif NR_IRQS == 16
isa_device_interrupt(vector, &regs);
#endif
......@@ -661,11 +707,17 @@ void init_IRQ(void)
dma_outb(0, DMA2_RESET_REG);
dma_outb(0, DMA1_CLR_MASK_REG);
dma_outb(0, DMA2_CLR_MASK_REG);
#if NR_IRQS == 33
#if NR_IRQS == 48
*(unsigned int *)GRU_INT_MASK = ~cache_irq_mask; /* invert */
#elif NR_IRQS == 33
outl(cache_804, 0x804);
#elif NR_IRQS == 32
#ifdef CONFIG_ALPHA_MIKASA
outw(~cache_536, 0x536); /* note invert */
#else
outb(cache_26, 0x26);
outb(cache_27, 0x27);
#endif
#endif
enable_irq(2); /* enable cascade */
}
......@@ -153,8 +153,8 @@ void setup_arch(char **cmdline_p,
*memory_start_p = lca_init(*memory_start_p, *memory_end_p);
#elif defined(CONFIG_ALPHA_APECS)
*memory_start_p = apecs_init(*memory_start_p, *memory_end_p);
#elif defined(CONFIG_ALPHA_ALCOR)
*memory_start_p = alcor_init(*memory_start_p, *memory_end_p);
#elif defined(CONFIG_ALPHA_CIA)
*memory_start_p = cia_init(*memory_start_p, *memory_end_p);
#endif
}
......
......@@ -92,10 +92,12 @@ void unplug_device(void * data)
save_flags(flags);
cli();
if (dev->current_request == &dev->plug) {
dev->current_request = dev->plug.next;
dev->plug.next = NULL;
if (dev->current_request)
struct request * next = dev->plug.next;
dev->current_request = next;
if (next) {
dev->plug.next = NULL;
(dev->request_fn)();
}
}
restore_flags(flags);
}
......
......@@ -53,6 +53,10 @@
copied from Werner Zimmermann, who copied it
from Heiko Schlittermann's mcdx.
17-1-96 v0.6 Multisession works; some cleanup too.
18-4-96 v0.7 Increased some timing constants;
thanks to Luke McFarlane. Also tidied up some
printk behaviour. ISP16 initialization
is now handled by a separate driver.
*/
/* Includes */
......@@ -87,7 +91,7 @@ static void debug(int debug_this, const char* fmt, ...)
va_start(args, fmt);
vsprintf(s, fmt, args);
printk("optcd: %s\n", s);
printk(KERN_DEBUG "optcd: %s\n", s);
va_end(args);
}
#else
......@@ -732,7 +736,7 @@ static struct cdrom_subchnl toc[MAX_TRACKS];
#if DEBUG_TOC
void toc_debug_info(int i)
{
printk("#%3d ctl %1x, adr %1x, track %2d index %3d"
printk(KERN_DEBUG "#%3d ctl %1x, adr %1x, track %2d index %3d"
" %2d:%02d.%02d %2d:%02d.%02d\n",
i, toc[i].cdsc_ctrl, toc[i].cdsc_adr,
toc[i].cdsc_trk, toc[i].cdsc_ind,
......@@ -951,7 +955,7 @@ static int update_toc(void)
get_multi_disk_info(); /* Here disk_info.multi is set */
#endif MULTISESSION
if (disk_info.multi)
printk("optcd: Multisession support experimental, "
printk(KERN_WARNING "optcd: Multisession support experimental, "
"see linux/Documentation/cdrom/optcd\n");
DEBUG((DEBUG_TOC, "exiting update_toc"));
......@@ -992,7 +996,7 @@ inline static void opt_invalidate_buffers(void)
static void transfer(void)
{
#if DEBUG_BUFFERS | DEBUG_REQUEST
printk("optcd: executing transfer\n");
printk(KERN_DEBUG "optcd: executing transfer\n");
#endif
if (!CURRENT_VALID)
......@@ -1078,11 +1082,11 @@ static void poll(void)
int skip = 0;
if (error) {
printk("optcd: I/O error 0x%02x\n", error);
printk(KERN_ERR "optcd: I/O error 0x%02x\n", error);
opt_invalidate_buffers();
if (!tries--) {
printk("optcd: read block %d failed; Giving up\n",
next_bn);
printk(KERN_ERR "optcd: read block %d failed;"
" Giving up\n", next_bn);
if (transfer_is_active)
loop_again = 0;
if (CURRENT_VALID)
......@@ -1104,9 +1108,9 @@ static void poll(void)
else {
state_old = state;
if (++state_n > 1)
printk("optcd: %ld times in previous state\n",
state_n);
printk("optcd: state %d\n", state);
printk(KERN_DEBUG "optcd: %ld times "
"in previous state\n", state_n);
printk(KERN_DEBUG "optcd: state %d\n", state);
state_n = 0;
}
#endif
......@@ -1141,9 +1145,10 @@ static void poll(void)
if ((status & ST_DOOR_OPEN) || (status & ST_DRVERR)) {
toc_uptodate = 0;
opt_invalidate_buffers();
printk((status & ST_DOOR_OPEN)
? "optcd: door open\n"
: "optcd: disk removed\n");
printk(KERN_WARNING "optcd: %s\n",
(status & ST_DOOR_OPEN)
? "door open"
: "disk removed");
state = S_IDLE;
while (CURRENT_VALID)
end_request(0);
......@@ -1184,16 +1189,17 @@ static void poll(void)
#if DEBUG_STATE
if (flags != flags_old) {
flags_old = flags;
printk("optcd: flags:%x\n", flags);
printk(KERN_DEBUG "optcd: flags:%x\n", flags);
}
if (flags == FL_STEN)
printk("timeout cnt: %d\n", timeout);
printk(KERN_DEBUG "timeout cnt: %d\n", timeout);
#endif
switch (flags) {
case FL_DTEN: /* only STEN low */
if (!tries--) {
printk("optcd: read block %d failed; "
printk(KERN_ERR
"optcd: read block %d failed; "
"Giving up\n", next_bn);
if (transfer_is_active) {
tries = 0;
......@@ -1216,19 +1222,22 @@ static void poll(void)
break;
}
if (read_count<=0)
printk("optcd: warning - try to read"
printk(KERN_WARNING
"optcd: warning - try to read"
" 0 frames\n");
while (read_count) {
buf_bn[buf_in] = NOBUF;
if (!flag_low(FL_DTEN, BUSY_TIMEOUT)) {
/* should be no waiting here!?? */
printk("read_count:%d "
printk(KERN_ERR
"read_count:%d "
"CURRENT->nr_sectors:%ld "
"buf_in:%d\n",
read_count,
CURRENT->nr_sectors,
buf_in);
printk("transfer active: %x\n",
printk(KERN_ERR
"transfer active: %x\n",
transfer_is_active);
read_count = 0;
state = S_STOP;
......@@ -1287,7 +1296,8 @@ static void poll(void)
break;
case S_STOP:
if (read_count != 0)
printk("optcd: discard data=%x frames\n",
printk(KERN_ERR
"optcd: discard data=%x frames\n",
read_count);
flush_data();
if (send_cmd(COMDRVST)) {
......@@ -1323,13 +1333,13 @@ static void poll(void)
}
break;
default:
printk("optcd: invalid state %d\n", state);
printk(KERN_ERR "optcd: invalid state %d\n", state);
return;
} /* case */
} /* while */
if (!timeout--) {
printk("optcd: timeout in state %d\n", state);
printk(KERN_ERR "optcd: timeout in state %d\n", state);
state = S_STOP;
if (exec_cmd(COMSTOP) < 0) {
state = S_IDLE;
......@@ -1349,7 +1359,7 @@ static void do_optcd_request(void)
CURRENT -> sector, CURRENT -> nr_sectors));
if (disk_info.audio) {
printk("optcd: Error: tried to mount an Audio CD\n");
printk(KERN_WARNING "optcd: tried to mount an Audio CD\n");
end_request(0);
return;
}
......@@ -1521,9 +1531,6 @@ static int cdromreadtocentry(unsigned long arg)
struct cdrom_tocentry entry;
struct cdrom_subchnl *tocptr;
status = verify_area(VERIFY_READ, (void *) arg, sizeof entry);
if (status)
return status;
status = verify_area(VERIFY_WRITE, (void *) arg, sizeof entry);
if (status)
return status;
......@@ -1586,9 +1593,6 @@ static int cdromsubchnl(unsigned long arg)
int status;
struct cdrom_subchnl subchnl;
status = verify_area(VERIFY_READ, (void *) arg, sizeof subchnl);
if (status)
return status;
status = verify_area(VERIFY_WRITE, (void *) arg, sizeof subchnl);
if (status)
return status;
......@@ -1615,9 +1619,6 @@ static int cdromread(unsigned long arg, int blocksize, int cmd)
struct cdrom_msf msf;
char buf[CD_FRAMESIZE_RAWER];
status = verify_area(VERIFY_READ, (void *) arg, sizeof msf);
if (status)
return status;
status = verify_area(VERIFY_WRITE, (void *) arg, blocksize);
if (status)
return status;
......@@ -1667,9 +1668,6 @@ static int cdrommultisession(unsigned long arg)
int status;
struct cdrom_multisession ms;
status = verify_area(VERIFY_READ, (void*) arg, sizeof ms);
if (status)
return status;
status = verify_area(VERIFY_WRITE, (void*) arg, sizeof ms);
if (status)
return status;
......@@ -1692,13 +1690,15 @@ static int cdrommultisession(unsigned long arg)
#if DEBUG_MULTIS
if (ms.addr_format == CDROM_MSF)
printk("optcd: multisession xa:%d, msf:%02d:%02d.%02d\n",
printk(KERN_DEBUG
"optcd: multisession xa:%d, msf:%02d:%02d.%02d\n",
ms.xa_flag,
ms.addr.msf.minute,
ms.addr.msf.second,
ms.addr.msf.frame);
else
printk("optcd: multisession %d, lba:0x%08x [%02d:%02d.%02d])\n",
printk(KERN_DEBUG
"optcd: multisession %d, lba:0x%08x [%02d:%02d.%02d])\n",
ms.xa_flag,
ms.addr.lba,
disk_info.last_session.minute,
......@@ -1884,7 +1884,7 @@ static int opt_open(struct inode *ip, struct file *fp)
}
DEBUG((DEBUG_VFS, "status: %02x", status));
if ((status & ST_DOOR_OPEN) || (status & ST_DRVERR)) {
printk("optcd: no disk or door open\n");
printk(KERN_INFO "optcd: no disk or door open\n");
return -EIO;
}
status = exec_cmd(COMLOCK); /* Lock door */
......@@ -1984,7 +1984,7 @@ static int version_ok(void)
if (ch < 0)
return 0;
printk("optcd: Device %s detected\n", devname);
printk(KERN_INFO "optcd: Device %s detected\n", devname);
return ((devname[0] == 'D')
&& (devname[1] == 'O')
&& (devname[2] == 'L')
......@@ -2026,32 +2026,33 @@ int optcd_init(void)
int status;
if (optcd_port <= 0) {
printk("optcd: no Optics Storage CDROM Initialization\n");
printk(KERN_INFO
"optcd: no Optics Storage CDROM Initialization\n");
return -EIO;
}
if (check_region(optcd_port, 4)) {
printk("optcd: conflict, I/O port 0x%x already used\n",
printk(KERN_ERR "optcd: conflict, I/O port 0x%x already used\n",
optcd_port);
return -EIO;
}
if (!reset_drive()) {
printk("optcd: drive at 0x%x not ready\n", optcd_port);
printk(KERN_ERR "optcd: drive at 0x%x not ready\n", optcd_port);
return -EIO;
}
if (!version_ok()) {
printk("optcd: unknown drive detected; aborting\n");
printk(KERN_ERR "optcd: unknown drive detected; aborting\n");
return -EIO;
}
status = exec_cmd(COMINITDOUBLE);
if (status < 0) {
printk("optcd: cannot init double speed mode\n");
printk(KERN_ERR "optcd: cannot init double speed mode\n");
DEBUG((DEBUG_VFS, "exec_cmd COMINITDOUBLE: %02x", -status));
return -EIO;
}
if (register_blkdev(MAJOR_NR, "optcd", &opt_fops) != 0)
{
printk("optcd: unable to get major %d\n", MAJOR_NR);
printk(KERN_ERR "optcd: unable to get major %d\n", MAJOR_NR);
return -EIO;
}
......@@ -2074,7 +2075,7 @@ int init_module(void)
void cleanup_module(void)
{
if (unregister_blkdev(MAJOR_NR, "optcd") == -EINVAL) {
printk("optcd: what's that: can't unregister\n");
printk(KERN_ERR "optcd: what's that: can't unregister\n");
return;
}
release_region(optcd_port, 4);
......
......@@ -15,6 +15,10 @@
*
* 03/96: Modularised by Angelo Haritsis <ah@doc.ic.ac.uk>
*
* rs_set_termios fixed to look also for changes of the input
* flags INPCK, BRKINT, PARMRK, IGNPAR and IGNBRK.
* Bernd Anhdupl 05/17/96.
*
* This module exports the following rs232 io functions:
*
* int rs_init(void);
......@@ -405,7 +409,9 @@ static _INLINE_ void receive_chars(struct async_struct *info,
break;
tty->flip.count++;
if (*status & (UART_LSR_BI)) {
#ifdef SERIAL_DEBUG_INTR
printk("handling break....");
#endif
*tty->flip.flag_buf_ptr++ = TTY_BREAK;
if (info->flags & ASYNC_SAK)
do_SAK(tty);
......@@ -1266,6 +1272,8 @@ static void change_speed(struct async_struct *info)
/*
* Set up parity check flag
*/
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (I_INPCK(info->tty))
info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
......@@ -1273,10 +1281,13 @@ static void change_speed(struct async_struct *info)
info->read_status_mask |= UART_LSR_BI;
info->ignore_status_mask = 0;
#if 0
/* This should be safe, but for some broken bits of hardware... */
if (I_IGNPAR(info->tty)) {
info->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
info->read_status_mask |= UART_LSR_PE | UART_LSR_FE;
}
#endif
if (I_IGNBRK(info->tty)) {
info->ignore_status_mask |= UART_LSR_BI;
info->read_status_mask |= UART_LSR_BI;
......@@ -1285,11 +1296,12 @@ static void change_speed(struct async_struct *info)
* overruns too. (For real raw support).
*/
if (I_IGNPAR(info->tty)) {
info->ignore_status_mask |= UART_LSR_OE;
info->read_status_mask |= UART_LSR_OE;
info->ignore_status_mask |= UART_LSR_OE | \
UART_LSR_PE | UART_LSR_FE;
info->read_status_mask |= UART_LSR_OE | \
UART_LSR_PE | UART_LSR_FE;
}
}
cli();
serial_outp(info, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
serial_outp(info, UART_DLL, quot & 0xff); /* LS of divisor */
......@@ -2065,8 +2077,10 @@ static void rs_set_termios(struct tty_struct *tty, struct termios *old_termios)
{
struct async_struct *info = (struct async_struct *)tty->driver_data;
if (tty->termios->c_cflag == old_termios->c_cflag)
return;
if ( (tty->termios->c_cflag == old_termios->c_cflag)
&& ( RELEVANT_IFLAG(tty->termios->c_iflag)
== RELEVANT_IFLAG(old_termios->c_iflag)))
return;
change_speed(info);
......
......@@ -205,6 +205,10 @@ set_palette (void)
{
int i, j;
if (console_blanked || vt_cons[fg_console]->vc_mode == KD_GRAPHICS)
return;
if (tga_type == 0) { /* 8-plane */
BT485_WRITE(0x00, BT485_ADDR_PAL_WRITE);
TGA_WRITE_REG(BT485_DATA_PAL, TGA_RAMDAC_SETUP_REG);
......
......@@ -5600,7 +5600,7 @@ NCR53c7xx_abort (Scsi_Cmnd *cmd) {
*/
int
NCR53c7xx_reset (Scsi_Cmnd *cmd) {
NCR53c7xx_reset (Scsi_Cmnd *cmd, unsigned int reset_flags) {
NCR53c7x0_local_declare();
unsigned long flags;
int found = 0;
......
......@@ -59,7 +59,7 @@
extern int NCR53c7xx_abort(Scsi_Cmnd *);
extern int NCR53c7xx_detect(Scsi_Host_Template *tpnt);
extern int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
extern int NCR53c7xx_reset(Scsi_Cmnd *);
extern int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
#ifdef MODULE
extern int NCR53c7xx_release(struct Scsi_Host *);
#else
......
comment 'SCSI support type (disk, tape, CDrom)'
comment 'SCSI support type (disk, tape, CD-ROM)'
dep_tristate 'SCSI disk support' CONFIG_BLK_DEV_SD $CONFIG_SCSI
dep_tristate 'SCSI tape support' CONFIG_CHR_DEV_ST $CONFIG_SCSI
dep_tristate 'SCSI CDROM support' CONFIG_BLK_DEV_SR $CONFIG_SCSI
dep_tristate 'SCSI CD-ROM support' CONFIG_BLK_DEV_SR $CONFIG_SCSI
dep_tristate 'SCSI generic support' CONFIG_CHR_DEV_SG $CONFIG_SCSI
comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
......
......@@ -63,7 +63,7 @@ static const char *group_1_commands[] = {
static const char *group_2_commands[] = {
/* 40-41 */ "Change Definition", "Write Same",
/* 42-48 */ unknown, unknown, unknown, unknown, unknown, unknown, unknown,
/* 42-48 */ unknown, "Read TOC", unknown, unknown, unknown, unknown, unknown,
/* 49-4f */ unknown, unknown, unknown, "Log Select", "Log Sense", unknown, unknown,
/* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)",
/* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown,
......@@ -101,7 +101,11 @@ static void print_opcode(int opcode) {
printk("%s(0x%02x) ", vendor, opcode);
break;
default:
printk("%s ",table[opcode & 0x1f]);
if (table[opcode & 0x1f] != unknown)
printk("%s ",table[opcode & 0x1f]);
else
printk("%s(0x%02x) ", unknown, opcode);
break;
}
}
#else /* CONST & CONST_COMMAND */
......
......@@ -257,9 +257,6 @@ static Scsi_Host_Template builtin_scsi_hosts[] =
#ifdef CONFIG_SCSI_QLOGIC_FAS
QLOGICFAS,
#endif
#ifdef CONFIG_SCSI_QLOGIC_ISP
QLOGICISP,
#endif
#ifdef CONFIG_SCSI_PAS16
MV_PAS16,
#endif
......@@ -290,6 +287,9 @@ static Scsi_Host_Template builtin_scsi_hosts[] =
#ifdef CONFIG_SCSI_AM53C974
AM53C974,
#endif
#ifdef CONFIG_SCSI_QLOGIC_ISP
QLOGICISP,
#endif
#ifdef CONFIG_SCSI_PPA
PPA,
#endif
......
......@@ -51,6 +51,7 @@
#include <unistd.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/delay.h>
#include "sd.h"
#include "hosts.h"
......@@ -80,6 +81,8 @@
#define DEBUG_ISP1020_INT 0
#define DEBUG_ISP1020_SETUP 0
#define DEFAULT_LOOP_COUNT 1000000
/* End Configuration section *************************************************/
#include <linux/module.h>
......@@ -106,6 +109,9 @@
#define ISP1020_REV_ID 1
#define MAX_TARGETS 16
#define MAX_LUNS 8
/* host configuration and control registers */
#define HOST_HCCR 0xc0 /* host command and control */
......@@ -113,7 +119,7 @@
#define PCI_ID_LOW 0x00 /* vendor id */
#define PCI_ID_HIGH 0x02 /* device id */
#define ISP_CFG0 0x04 /* configuration register #0 */
#define ISP_CFG1 0x08 /* configuration register #1 */
#define ISP_CFG1 0x06 /* configuration register #1 */
#define PCI_INTF_CTL 0x08 /* pci interface control */
#define PCI_INTF_STS 0x0a /* pci interface status */
#define PCI_SEMAPHORE 0x0c /* pci semaphore */
......@@ -138,7 +144,7 @@
/* async event status codes */
#define ASYNC_SCSI_BUS_RESET 0x8001
#define SYSTEM_ERROR 0x8002
#define REQUEST_TRANSFER ERROR 0x8003
#define REQUEST_TRANSFER_ERROR 0x8003
#define RESPONSE_TRANSFER_ERROR 0x8004
#define REQUEST_QUEUE_WAKEUP 0x8005
#define EXECUTION_TIMEOUT_RESET 0x8006
......@@ -164,13 +170,13 @@ struct Entry_header {
#define EFLAG_BAD_PAYLOAD 8
struct dataseg {
caddr_t d_base;
u_long d_count;
u_int d_base;
u_int d_count;
};
struct Command_Entry {
struct Entry_header hdr;
caddr_t handle;
u_int handle;
u_char target_lun;
u_char target_id;
u_short cdb_length;
......@@ -196,7 +202,7 @@ struct Command_Entry {
struct Ext_Command_Entry {
struct Entry_header hdr;
caddr_t handle;
u_int handle;
u_char target_lun;
u_char target_id;
u_short cdb_length;
......@@ -209,7 +215,7 @@ struct Ext_Command_Entry {
struct Continuation_Entry {
struct Entry_header hdr;
u_long reserved;
u_int reserved;
struct dataseg dataseg0;
struct dataseg dataseg1;
struct dataseg dataseg2;
......@@ -221,7 +227,7 @@ struct Continuation_Entry {
struct Marker_Entry {
struct Entry_header hdr;
caddr_t reserved;
u_int reserved;
u_char target_lun;
u_char target_id;
u_char modifier;
......@@ -236,14 +242,14 @@ struct Marker_Entry {
struct Status_Entry {
struct Entry_header hdr;
caddr_t handle;
u_int handle;
u_short scsi_status;
u_short completion_status;
u_short state_flags;
u_short status_flags;
u_short time;
u_short req_sense_len;
u_long residual;
u_int residual;
u_char rsvd[8];
u_char req_sense_data[32];
};
......@@ -290,6 +296,11 @@ struct Status_Entry {
#define STF_TIMEOUT 0x0040
#define STF_NEGOTIATION 0x0080
/* interface control commands */
#define ISP_RESET 0x0001
#define ISP_EN_INT 0x0002
#define ISP_EN_RISC 0x0004
/* host control commands */
#define HCCR_NOP 0x0000
#define HCCR_RESET 0x1000
......@@ -300,9 +311,11 @@ struct Status_Entry {
#define HCCR_CLEAR_HOST_INTR 0x6000
#define HCCR_CLEAR_RISC_INTR 0x7000
#define HCCR_BP_ENABLE 0x8000
#define HCCR_BIOS_ENABLE 0x9000
#define HCCR_BIOS_DISABLE 0x9000
#define HCCR_TEST_MODE 0xf000
#define RISC_BUSY 0x0004
/* mailbox commands */
#define MBOX_NO_OP 0x0000
#define MBOX_LOAD_RAM 0x0001
......@@ -467,6 +480,7 @@ struct dev_param {
u_short synchronous_period;
u_short synchronous_offset;
u_short device_enable;
u_short reserved; /* pad */
};
#define REQ_QUEUE_LEN 32
......@@ -474,9 +488,9 @@ struct dev_param {
#define QUEUE_ENTRY_LEN 64
struct isp1020_hostdata {
u_int io_base;
u_char irq;
u_char bus;
u_long io_base;
u_char revision;
u_char device_fn;
u_short res_queue_in_ptr;
......@@ -484,12 +498,12 @@ struct isp1020_hostdata {
u_short req_queue_in_ptr;
u_short req_queue_out_ptr;
struct host_param host_param;
struct dev_param dev_param[16];
struct dev_param dev_param[MAX_TARGETS];
char res_queue[RES_QUEUE_LEN][QUEUE_ENTRY_LEN];
char req_queue[REQ_QUEUE_LEN][QUEUE_ENTRY_LEN];
};
struct isp1020_hostdata *irq2host[16];
struct isp1020_hostdata *irq2host[NR_IRQS];
void isp1020_enable_irqs(struct isp1020_hostdata *);
void isp1020_disable_irqs(struct isp1020_hostdata *);
......@@ -505,8 +519,12 @@ void isp1020_print_status_entry(struct Status_Entry *);
void isp1020_print_scsi_cmd(Scsi_Cmnd *);
void isp1020_scsi_done(Scsi_Cmnd *);
int isp1020_return_status(struct Status_Entry *);
void isp1020_intr_handler(int, struct pt_regs *);
void isp1020_intr_handler(int, void *, struct pt_regs *);
struct proc_dir_entry proc_scsi_isp1020 = {
PROC_SCSI_QLOGICISP, 7, "isp1020",
S_IFDIR | S_IRUGO | S_IXUGO, 2
};
int isp1020_detect(Scsi_Host_Template *tmpt)
{
......@@ -518,6 +536,8 @@ int isp1020_detect(Scsi_Host_Template *tmpt)
ENTER("isp1020_detect");
tmpt->proc_dir = &proc_scsi_isp1020;
if (pcibios_present() == 0) {
printk("qlogicisp : PCI bios not present\n");
return 0;
......@@ -549,7 +569,7 @@ int isp1020_detect(Scsi_Host_Template *tmpt)
scsihost->this_id = hostdata->host_param.initiator_scsi_id;
if (request_irq(hostdata->irq, isp1020_intr_handler, 0,
if (request_irq(hostdata->irq, isp1020_intr_handler, SA_INTERRUPT,
"qlogicisp", NULL)) {
printk("qlogicisp : interrupt %d already in use\n", hostdata->irq);
scsi_unregister(scsihost);
......@@ -557,7 +577,7 @@ int isp1020_detect(Scsi_Host_Template *tmpt)
}
if (check_region(hostdata->io_base, 0xff)) {
printk("qlogicisp : i/o region 0x%04lx-0x%04lx already in use\n",
printk("qlogicisp : i/o region 0x%04x-0x%04x already in use\n",
hostdata->io_base, hostdata->io_base + 0xff);
free_irq(hostdata->irq, NULL);
scsi_unregister(scsihost);
......@@ -567,6 +587,8 @@ int isp1020_detect(Scsi_Host_Template *tmpt)
request_region(hostdata->io_base, 0xff, "qlogicisp");
irq2host[hostdata->irq] = hostdata;
outw(0x0, hostdata->io_base + PCI_SEMAPHORE);
outw(HCCR_CLEAR_RISC_INTR, hostdata->io_base + HOST_HCCR);
isp1020_enable_irqs(hostdata);
hosts++;
......@@ -605,8 +627,9 @@ const char *isp1020_info(struct Scsi_Host *host)
ENTER("isp1020_info");
hostdata = (struct isp1020_hostdata *) host->hostdata;
sprintf(buf, "QLogic ISP1020 SCSI on PCI bus %d, device %d, irq %d",
hostdata->bus, (hostdata->device_fn & 0xf8) >> 3, hostdata->irq);
sprintf(buf, "QLogic ISP1020 SCSI on PCI bus %d device %d irq %d base 0x%x",
hostdata->bus, (hostdata->device_fn & 0xf8) >> 3, hostdata->irq,
hostdata->io_base);
LEAVE("isp1020_info");
......@@ -627,6 +650,7 @@ int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (* done)(Scsi_Cmnd *))
struct scatterlist *sg;
struct Command_Entry *cmd;
struct isp1020_hostdata *hostdata;
unsigned long flags;
ENTER("isp1020_queuecommand");
......@@ -635,10 +659,14 @@ int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (* done)(Scsi_Cmnd *))
DEBUG(isp1020_print_scsi_cmd(Cmnd);)
save_flags(flags);
cli();
hostdata->req_queue_out_ptr = inw(hostdata->io_base + MBOX4);
if ((hostdata->req_queue_in_ptr + 1) % REQ_QUEUE_LEN ==
hostdata->req_queue_out_ptr) {
restore_flags(flags);
printk("qlogicisp : request queue overflow\n");
return 1;
}
......@@ -653,11 +681,12 @@ int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (* done)(Scsi_Cmnd *))
cmd->hdr.entry_type = ENTRY_COMMAND;
cmd->hdr.entry_cnt = 1;
cmd->handle = (caddr_t) Cmnd;
cmd->handle = (u_int) virt_to_bus(Cmnd);
cmd->target_lun = Cmnd->lun;
cmd->target_id = Cmnd->target;
cmd->cdb_length = Cmnd->cmd_len;
cmd->control_flags = CFLAG_READ | CFLAG_WRITE;
cmd->time_out = 30;
for (i = 0; i < Cmnd->cmd_len; i++)
cmd->cdb[i] = Cmnd->cmnd[i];
......@@ -668,13 +697,13 @@ int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (* done)(Scsi_Cmnd *))
iptr = (int *) &cmd->dataseg0.d_base;
for (i = 0; sg_count > 0; sg_count--, i++) {
*iptr++ = (int) sg[i].address;
*iptr++ = (int) virt_to_bus(sg[i].address);
*iptr++ = sg[i].length;
}
}
else {
cmd->dataseg0.d_base = (caddr_t) Cmnd->request_buffer;
cmd->dataseg0.d_count = (u_long) Cmnd->request_bufflen;
cmd->dataseg0.d_base = (u_int) virt_to_bus(Cmnd->request_buffer);
cmd->dataseg0.d_count = (u_int) Cmnd->request_bufflen;
cmd->segment_cnt = 1;
}
......@@ -683,6 +712,8 @@ int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (* done)(Scsi_Cmnd *))
outw(hostdata->req_queue_in_ptr, hostdata->io_base + MBOX4);
restore_flags(flags);
LEAVE("isp1020_queuecommand");
return 0;
......@@ -701,6 +732,7 @@ int isp1020_abort(Scsi_Cmnd *Cmnd)
u_short param[6];
struct isp1020_hostdata *hostdata;
int return_status = SCSI_ABORT_SUCCESS;
u_int cmdaddr = virt_to_bus(Cmnd);
ENTER("isp1020_abort");
......@@ -712,8 +744,8 @@ int isp1020_abort(Scsi_Cmnd *Cmnd)
param[0] = MBOX_ABORT;
param[1] = (((u_short) Cmnd->target) << 8) | Cmnd->lun;
param[2] = (u_long) Cmnd >> 16;
param[3] = (u_long) Cmnd & 0xffff;
param[2] = cmdaddr >> 16;
param[3] = cmdaddr & 0xffff;
isp1020_mbox_command(hostdata, param);
......@@ -730,7 +762,7 @@ int isp1020_abort(Scsi_Cmnd *Cmnd)
}
int isp1020_reset(Scsi_Cmnd *Cmnd)
int isp1020_reset(Scsi_Cmnd *Cmnd, unsigned int reset_flags)
{
u_short param[6];
struct isp1020_hostdata *hostdata;
......@@ -756,11 +788,11 @@ int isp1020_reset(Scsi_Cmnd *Cmnd)
LEAVE("isp1020_reset");
return SCSI_RESET_SUCCESS;
return return_status;;
}
int isp1020_biosparam(Disk *disk, int n, int ip[])
int isp1020_biosparam(Disk *disk, kdev_t n, int ip[])
{
int size = disk->capacity;
......@@ -786,16 +818,22 @@ int isp1020_biosparam(Disk *disk, int n, int ip[])
int isp1020_reset_hardware(struct isp1020_hostdata *hostdata)
{
u_short param[6];
int loop_count;
ENTER("isp1020_reset_hardware");
outw(0x0001, hostdata->io_base + PCI_INTF_CTL);
outw(ISP_RESET, hostdata->io_base + PCI_INTF_CTL);
outw(HCCR_RESET, hostdata->io_base + HOST_HCCR);
outw(HCCR_RELEASE, hostdata->io_base + HOST_HCCR);
outw(HCCR_BIOS_ENABLE, hostdata->io_base + HOST_HCCR);
outw(HCCR_BIOS_DISABLE, hostdata->io_base + HOST_HCCR);
while (inw(hostdata->io_base + MBOX0))
loop_count = DEFAULT_LOOP_COUNT;
while (--loop_count && inw(hostdata->io_base + HOST_HCCR) == RISC_BUSY)
barrier();
if (!loop_count)
printk("qlogicisp: reset_hardware loop timeout\n");
outw(0, hostdata->io_base + ISP_CFG1);
#if DEBUG_ISP1020
printk("qlogicisp : mbox 0 0x%04x \n", inw(hostdata->io_base + MBOX0));
......@@ -864,7 +902,7 @@ int isp1020_reset_hardware(struct isp1020_hostdata *hostdata)
int isp1020_init(struct Scsi_Host *sh)
{
u_long io_base;
u_int io_base;
struct isp1020_hostdata *hostdata;
u_char bus, device_fn, revision, irq;
u_short vendor_id, device_id, command;
......@@ -914,7 +952,7 @@ int isp1020_init(struct Scsi_Host *sh)
}
if (revision != ISP1020_REV_ID)
printk("qlogicisp : warning : new isp1020 revision id\n");
printk("qlogicisp : new isp1020 revision ID (%d)\n", revision);
if (inw(io_base + PCI_ID_LOW) != PCI_VENDOR_ID_QLOGIC
|| inw(io_base + PCI_ID_HIGH) != PCI_DEVICE_ID_QLOGIC_ISP1020) {
......@@ -941,7 +979,8 @@ int isp1020_get_defaults(struct isp1020_hostdata *hostdata)
if (!isp1020_verify_nvram(hostdata)) {
printk("qlogicisp : nvram checksum failure\n");
return 1;
printk("qlogicisp : attempting to use default parameters\n");
return isp1020_set_defaults(hostdata);
}
value = isp1020_read_nvram_word(hostdata, 2);
......@@ -999,7 +1038,7 @@ int isp1020_get_defaults(struct isp1020_hostdata *hostdata)
hostdata->host_param.max_queue_depth);
#endif /* DEBUG_ISP1020_SETUP */
for (i = 0; i < 16; i++) {
for (i = 0; i < MAX_TARGETS; i++) {
value = isp1020_read_nvram_word(hostdata, 14 + i * 3);
hostdata->dev_param[i].device_flags = value & 0xff;
......@@ -1042,7 +1081,7 @@ int isp1020_set_defaults(struct isp1020_hostdata *hostdata)
hostdata->host_param.initiator_scsi_id = 7;
hostdata->host_param.bus_reset_delay = 3;
hostdata->host_param.retry_count = 0;
hostdata->host_param.retry_delay = 0;
hostdata->host_param.retry_delay = 1;
hostdata->host_param.async_data_setup_time = 6;
hostdata->host_param.req_ack_active_negation = 1;
hostdata->host_param.data_line_active_negation = 1;
......@@ -1052,8 +1091,8 @@ int isp1020_set_defaults(struct isp1020_hostdata *hostdata)
hostdata->host_param.selection_timeout = 250;
hostdata->host_param.max_queue_depth = 256;
for (i = 0; i < 16; i++) {
hostdata->dev_param[i].device_flags = 0xc4;
for (i = 0; i < MAX_TARGETS; i++) {
hostdata->dev_param[i].device_flags = 0xfd;
hostdata->dev_param[i].execution_throttle = 16;
hostdata->dev_param[i].synchronous_period = 25;
hostdata->dev_param[i].synchronous_offset = 12;
......@@ -1069,12 +1108,16 @@ int isp1020_set_defaults(struct isp1020_hostdata *hostdata)
int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
{
int i, k;
u_long queue_addr;
u_int queue_addr;
u_short param[6];
u_short isp_cfg1;
unsigned long flags;
ENTER("isp1020_load_parameters");
save_flags(flags);
cli();
outw(hostdata->host_param.fifo_threshold, hostdata->io_base + ISP_CFG1);
param[0] = MBOX_SET_INIT_SCSI_ID;
......@@ -1083,6 +1126,7 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set initiator id failure\n");
return 1;
}
......@@ -1094,6 +1138,7 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set retry count failure\n");
return 1;
}
......@@ -1104,6 +1149,7 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : async data setup time failure\n");
return 1;
}
......@@ -1115,6 +1161,7 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set active negation state failure\n");
return 1;
}
......@@ -1126,6 +1173,7 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set pci control parameter failure\n");
return 1;
}
......@@ -1146,6 +1194,7 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set tag age limit failure\n");
return 1;
}
......@@ -1156,11 +1205,12 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set selection timeout failure\n");
return 1;
}
for (i = 0; i < 16; i++) {
for (i = 0; i < MAX_TARGETS; i++) {
if (!hostdata->dev_param[i].device_enable)
continue;
......@@ -1174,11 +1224,12 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set target parameter failure\n");
return 1;
}
for (k = 0; k < 8; k++) {
for (k = 0; k < MAX_LUNS; k++) {
param[0] = MBOX_SET_DEV_QUEUE_PARAMS;
param[1] = (i << 8) | k;
......@@ -1188,13 +1239,14 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set device queue parameter failure\n");
return 1;
}
}
}
queue_addr = (u_long) &hostdata->res_queue[0][0];
queue_addr = (u_int) virt_to_bus(&hostdata->res_queue[0][0]);
param[0] = MBOX_INIT_RES_QUEUE;
param[1] = RES_QUEUE_LEN;
......@@ -1206,11 +1258,12 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set response queue failure\n");
return 1;
}
queue_addr = (u_long) &hostdata->req_queue[0][0];
queue_addr = (u_int) virt_to_bus(&hostdata->req_queue[0][0]);
param[0] = MBOX_INIT_REQ_QUEUE;
param[1] = REQ_QUEUE_LEN;
......@@ -1221,23 +1274,35 @@ int isp1020_load_parameters(struct isp1020_hostdata *hostdata)
isp1020_mbox_command(hostdata, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set request queue failure\n");
return 1;
}
restore_flags(flags);
LEAVE("isp1020_load_parameters");
return 0;
}
/*
* currently, this is only called during initialization or abort/reset,
* at which times interrupts are disabled, so polling is OK, I guess...
*/
int isp1020_mbox_command(struct isp1020_hostdata *hostdata, u_short param[])
{
int loop_count;
if (mbox_param[param[0]] == 0)
return 1;
while (inw(hostdata->io_base + HOST_HCCR) & 0x0080)
loop_count = DEFAULT_LOOP_COUNT;
while (--loop_count && inw(hostdata->io_base + HOST_HCCR) & 0x0080)
barrier();
if (!loop_count)
printk("qlogicisp: mbox_command loop timeout #1\n");
switch(mbox_param[param[0]] >> 4) {
case 6: outw(param[5], hostdata->io_base + MBOX5);
......@@ -1248,19 +1313,21 @@ int isp1020_mbox_command(struct isp1020_hostdata *hostdata, u_short param[])
case 1: outw(param[0], hostdata->io_base + MBOX0);
}
outw(HCCR_CLEAR_RISC_INTR, hostdata->io_base + HOST_HCCR);
outw(0x0, hostdata->io_base + PCI_SEMAPHORE);
outw(HCCR_CLEAR_RISC_INTR, hostdata->io_base + HOST_HCCR);
outw(HCCR_SET_HOST_INTR, hostdata->io_base + HOST_HCCR);
while (!(inw(hostdata->io_base + PCI_INTF_STS) & 0x04))
loop_count = DEFAULT_LOOP_COUNT;
while (--loop_count && !(inw(hostdata->io_base + PCI_INTF_STS) & 0x04))
barrier();
if (!loop_count)
printk("qlogicisp: mbox_command loop timeout #2\n");
while (inw(hostdata->io_base + MBOX0) == 0x04)
loop_count = DEFAULT_LOOP_COUNT;
while (--loop_count && inw(hostdata->io_base + MBOX0) == 0x04)
barrier();
outw(HCCR_CLEAR_RISC_INTR, hostdata->io_base + HOST_HCCR);
outw(0x0, hostdata->io_base + PCI_SEMAPHORE);
if (!loop_count)
printk("qlogicisp: mbox_command loop timeout #3\n");
switch(mbox_param[param[0]] & 0xf) {
case 6: param[5] = inw(hostdata->io_base + MBOX5);
......@@ -1271,19 +1338,23 @@ int isp1020_mbox_command(struct isp1020_hostdata *hostdata, u_short param[])
case 1: param[0] = inw(hostdata->io_base + MBOX0);
}
outw(0x0, hostdata->io_base + PCI_SEMAPHORE);
outw(HCCR_CLEAR_RISC_INTR, hostdata->io_base + HOST_HCCR);
return 0;
}
#define RESPONSE_QUEUE_UPDATE 0x01
#define MAILBOX_INTERRUPT 0x01
void isp1020_intr_handler(int irq, struct pt_regs *regs)
void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
Scsi_Cmnd *Cmnd;
struct Status_Entry *sts;
struct Marker_Entry *marker;
u_short status, add_marker = 0;
struct isp1020_hostdata *hostdata;
int loop_count;
ENTER_INTR("isp1020_intr_handler");
......@@ -1294,14 +1365,18 @@ void isp1020_intr_handler(int irq, struct pt_regs *regs)
DEBUG_INTR(printk("qlogicisp : interrupt on line %d\n", irq);)
while (!(inw(hostdata->io_base + PCI_INTF_STS) & 0x04))
loop_count = DEFAULT_LOOP_COUNT;
while (--loop_count && !(inw(hostdata->io_base + PCI_INTF_STS) & 0x04))
barrier();
if (!loop_count)
printk("qlogicisp: intr_handler loop timeout\n");
hostdata->res_queue_in_ptr = inw(hostdata->io_base + MBOX5);
outw(HCCR_CLEAR_RISC_INTR, hostdata->io_base + HOST_HCCR);
status = inw(hostdata->io_base + PCI_SEMAPHORE);
if ((status & RESPONSE_QUEUE_UPDATE) == 0) {
if ((status & MAILBOX_INTERRUPT) == 0) {
hostdata->res_queue_in_ptr = inw(hostdata->io_base + MBOX5);
outw(HCCR_CLEAR_RISC_INTR, hostdata->io_base + HOST_HCCR);
DEBUG_INTR(printk("qlogicisp : response queue update\n");)
DEBUG_INTR(printk("qlogicisp : response queue depth %d\n",
......@@ -1312,7 +1387,7 @@ void isp1020_intr_handler(int irq, struct pt_regs *regs)
sts = (struct Status_Entry *)
&hostdata->res_queue[hostdata->res_queue_out_ptr][0];
Cmnd = (Scsi_Cmnd *) sts->handle;
Cmnd = (Scsi_Cmnd *) bus_to_virt(sts->handle);
if (sts->completion_status == CS_RESET_OCCURRED
|| sts->completion_status == CS_ABORTED
......@@ -1321,7 +1396,7 @@ void isp1020_intr_handler(int irq, struct pt_regs *regs)
if (sts->state_flags & SF_GOT_SENSE)
memcpy(Cmnd->sense_buffer, sts->req_sense_data,
sizeof(Cmnd->sense_buffer));
sizeof(Cmnd->sense_buffer));
DEBUG_INTR(isp1020_print_status_entry(sts);)
......@@ -1340,10 +1415,7 @@ void isp1020_intr_handler(int irq, struct pt_regs *regs)
}
else {
DEBUG_INTR(printk("qlogicisp : mbox completion\n");)
status = inw(hostdata->io_base + MBOX0);
outw(0x0, hostdata->io_base + PCI_SEMAPHORE);
DEBUG_INTR(printk("qlogicisp : mbox completion status: %x\n", status);)
......@@ -1359,14 +1431,26 @@ void isp1020_intr_handler(int irq, struct pt_regs *regs)
printk("qlogicisp : bad mailbox return status\n");
break;
}
outw(0x0, hostdata->io_base + PCI_SEMAPHORE);
outw(HCCR_CLEAR_RISC_INTR, hostdata->io_base + HOST_HCCR);
}
if (add_marker) {
#if 0
unsigned long flags;
save_flags(flags);
cli();
#endif
DEBUG_INTR(printk("qlogicisp : adding marker entry\n");)
if ((hostdata->req_queue_in_ptr + 1) % REQ_QUEUE_LEN
== hostdata->req_queue_out_ptr) {
#if 0
restore_flags(flags);
#endif
printk("qlogicisp : request queue overflow\n");
return;
}
......@@ -1384,14 +1468,19 @@ void isp1020_intr_handler(int irq, struct pt_regs *regs)
% REQ_QUEUE_LEN;
outw(hostdata->req_queue_in_ptr, hostdata->io_base + MBOX4);
#if 0
restore_flags(flags);
#endif
}
isp1020_enable_irqs(hostdata);
LEAVE_INTR("isp1020_intr_handler");
}
#define NVRAM_DELAY() { \
int counter = 0; while (counter++ < 0xc8) barrier(); }
#define NVRAM_DELAY() udelay(2) /* 2 microsecond delay */
u_short isp1020_read_nvram_word(struct isp1020_hostdata *hostdata, u_short byte)
......@@ -1409,14 +1498,14 @@ u_short isp1020_read_nvram_word(struct isp1020_hostdata *hostdata, u_short byte)
}
for (i = 0xf, value = 0; i >= 0; i--) {
value = value << 0x1;
value <<= 1;
outw(0x3, hostdata->io_base + PCI_NVRAM); NVRAM_DELAY();
input = inw(hostdata->io_base + PCI_NVRAM); NVRAM_DELAY();
outw(0x2, hostdata->io_base + PCI_NVRAM); NVRAM_DELAY();
if (input & 0x8) value |= 0x1;
if (input & 0x8) value |= 1;
}
outw(0x0, hostdata->io_base + PCI_NVRAM);
outw(0x0, hostdata->io_base + PCI_NVRAM); NVRAM_DELAY();
return(value);
}
......@@ -1455,7 +1544,7 @@ int isp1020_verify_nvram(struct isp1020_hostdata *hostdata)
void isp1020_enable_irqs(struct isp1020_hostdata *hostdata)
{
outw(0x6, hostdata->io_base + PCI_INTF_CTL);
outw(ISP_EN_INT|ISP_EN_RISC, hostdata->io_base + PCI_INTF_CTL);
}
......@@ -1479,7 +1568,7 @@ void isp1020_print_status_entry(struct Status_Entry *status)
status->state_flags, status->status_flags);
printk("qlogicisp : time = 0x%04x, request sense length = 0x%04x\n",
status->time, status->req_sense_len);
printk("qlogicisp : residual transfer length = 0x%08lx\n", status->residual);
printk("qlogicisp : residual transfer length = 0x%08x\n", status->residual);
for (i = 0; i < status->req_sense_len; i++)
printk("qlogicisp : sense data = 0x%02x\n", status->req_sense_data[i]);
......
......@@ -48,13 +48,15 @@ int isp1020_release(struct Scsi_Host *);
const char * isp1020_info(struct Scsi_Host *);
int isp1020_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
int isp1020_abort(Scsi_Cmnd *);
int isp1020_reset(Scsi_Cmnd *);
int isp1020_biosparam(Disk *, int, int[]);
int isp1020_reset(Scsi_Cmnd *, unsigned int);
int isp1020_biosparam(Disk *, kdev_t, int[]);
#ifndef NULL
#define NULL (0)
#endif
extern struct proc_dir_entry proc_scsi_isp1020;
#define QLOGICISP { \
/* next */ NULL, \
/* usage_count */ NULL, \
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -22,7 +22,7 @@
#include <scsi/scsi_ioctl.h>
#define MAX_RETRIES 5
#define MAX_TIMEOUT 900
#define MAX_TIMEOUT (9 * HZ)
#define MAX_BUF 4096
#define max(a,b) (((a) > (b)) ? (a) : (b))
......
......@@ -218,11 +218,45 @@ static void rw_intr (Scsi_Cmnd *SCpnt)
{
int result = SCpnt->result;
int this_count = SCpnt->bufflen >> 9;
int good_sectors = (result == 0 ? this_count : 0);
int block_sectors = 1;
#ifdef DEBUG
printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev),
SCpnt->host->host_no, result);
#endif
/*
Handle MEDIUM ERRORs that indicate partial success. Since this is a
relatively rare error condition, no care is taken to avoid unnecessary
additional work such as memcpy's that could be avoided.
*/
if (driver_byte(result) != 0 && /* An error occurred */
SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
SCpnt->sense_buffer[2] == MEDIUM_ERROR)
{
long error_sector = (SCpnt->sense_buffer[3] << 24) |
(SCpnt->sense_buffer[4] << 16) |
(SCpnt->sense_buffer[5] << 8) |
SCpnt->sense_buffer[6];
int sector_size =
rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].sector_size;
if (SCpnt->request.bh != NULL)
block_sectors = SCpnt->request.bh->b_size >> 9;
if (sector_size == 1024)
{
error_sector <<= 1;
if (block_sectors < 2) block_sectors = 2;
}
else if (sector_size == 256)
error_sector >>= 1;
error_sector -= sd[MINOR(SCpnt->request.rq_dev)].start_sect;
error_sector &= ~ (block_sectors - 1);
good_sectors = error_sector - SCpnt->request.sector;
if (good_sectors < 0 || good_sectors >= this_count)
good_sectors = 0;
}
/*
* First case : we assume that the command succeeded. One of two things
......@@ -230,7 +264,7 @@ static void rw_intr (Scsi_Cmnd *SCpnt)
* sectors that we were unable to read last time.
*/
if (!result) {
if (good_sectors > 0) {
#ifdef DEBUG
printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
......@@ -291,11 +325,16 @@ static void rw_intr (Scsi_Cmnd *SCpnt)
SCpnt->request.sector, this_count);
}
}
SCpnt = end_scsi_request(SCpnt, 1, this_count);
requeue_sd_request(SCpnt);
return;
SCpnt = end_scsi_request(SCpnt, 1, good_sectors);
if (result == 0)
{
requeue_sd_request(SCpnt);
return;
}
}
if (good_sectors == 0) {
/* Free up any indirection buffers we allocated for DMA purposes. */
if (SCpnt->use_sg) {
struct scatterlist * sgpnt;
......@@ -319,7 +358,8 @@ static void rw_intr (Scsi_Cmnd *SCpnt)
if (SCpnt->buffer != SCpnt->request.buffer)
scsi_free(SCpnt->buffer, SCpnt->bufflen);
}
}
/*
* Now, if we were good little boys and girls, Santa left us a request
* sense buffer. We can extract information from this, so we
......@@ -382,6 +422,17 @@ static void rw_intr (Scsi_Cmnd *SCpnt)
/* ???? */
}
}
if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
printk("scsi%d: MEDIUM ERROR on channel %d, id %d, lun %d, CDB: ",
SCpnt->host->host_no, (int) SCpnt->channel,
(int) SCpnt->target, (int) SCpnt->lun);
print_command(SCpnt->cmnd);
print_sense("sr", SCpnt);
SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
requeue_sd_request(SCpnt);
return;
}
} /* driver byte != 0 */
if (result) {
printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
......
......@@ -40,7 +40,7 @@
#include "constants.h"
#define MAX_RETRIES 3
#define SR_TIMEOUT (150 * HZ)
#define SR_TIMEOUT (15 * HZ)
static int sr_init(void);
static void sr_finish(void);
......@@ -152,12 +152,50 @@ static void rw_intr (Scsi_Cmnd * SCpnt)
{
int result = SCpnt->result;
int this_count = SCpnt->this_count;
int good_sectors = (result == 0 ? this_count : 0);
int block_sectors = 0;
#ifdef DEBUG
printk("sr.c done: %x %x\n",result, SCpnt->request.bh->b_data);
#endif
if (!result)
{ /* No error */
/*
Handle MEDIUM ERRORs or VOLUME OVERFLOWs that indicate partial success.
Since this is a relatively rare error condition, no care is taken to
avoid unnecessary additional work such as memcpy's that could be avoided.
*/
if (driver_byte(result) != 0 && /* An error occurred */
SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
(SCpnt->sense_buffer[2] == MEDIUM_ERROR ||
SCpnt->sense_buffer[2] == VOLUME_OVERFLOW))
{
long error_sector = (SCpnt->sense_buffer[3] << 24) |
(SCpnt->sense_buffer[4] << 16) |
(SCpnt->sense_buffer[5] << 8) |
SCpnt->sense_buffer[6];
int device_nr = DEVICE_NR(SCpnt->request.rq_dev);
if (SCpnt->request.bh != NULL)
block_sectors = SCpnt->request.bh->b_size >> 9;
if (block_sectors < 4) block_sectors = 4;
if (scsi_CDs[device_nr].sector_size == 2048)
error_sector <<= 2;
error_sector &= ~ (block_sectors - 1);
good_sectors = error_sector - SCpnt->request.sector;
if (good_sectors < 0 || good_sectors >= this_count)
good_sectors = 0;
/*
The SCSI specification allows for the value returned by READ
CAPACITY to be up to 75 2K sectors past the last readable
block. Therefore, if we hit a medium error within the last
75 2K sectors, we decrease the saved size value.
*/
if ((error_sector >> 1) < sr_sizes[device_nr] &&
scsi_CDs[device_nr].capacity - error_sector < 4*75)
sr_sizes[device_nr] = error_sector >> 1;
}
if (good_sectors > 0)
{ /* Some sectors were read successfully. */
if (SCpnt->use_sg == 0) {
if (SCpnt->buffer != SCpnt->request.buffer)
{
......@@ -165,20 +203,20 @@ static void rw_intr (Scsi_Cmnd * SCpnt)
offset = (SCpnt->request.sector % 4) << 9;
memcpy((char *)SCpnt->request.buffer,
(char *)SCpnt->buffer + offset,
this_count << 9);
good_sectors << 9);
/* Even though we are not using scatter-gather, we look
* ahead and see if there is a linked request for the
* other half of this buffer. If there is, then satisfy
* it. */
if((offset == 0) && this_count == 2 &&
SCpnt->request.nr_sectors > this_count &&
if((offset == 0) && good_sectors == 2 &&
SCpnt->request.nr_sectors > good_sectors &&
SCpnt->request.bh &&
SCpnt->request.bh->b_reqnext &&
SCpnt->request.bh->b_reqnext->b_size == 1024) {
memcpy((char *)SCpnt->request.bh->b_reqnext->b_data,
(char *)SCpnt->buffer + 1024,
1024);
this_count += 2;
good_sectors += 2;
};
scsi_free(SCpnt->buffer, 2048);
......@@ -196,15 +234,15 @@ static void rw_intr (Scsi_Cmnd * SCpnt)
};
};
scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
if(SCpnt->request.sector % 4) this_count -= 2;
if(SCpnt->request.sector % 4) good_sectors -= 2;
/* See if there is a padding record at the end that needs to be removed */
if(this_count > SCpnt->request.nr_sectors)
this_count -= 2;
if(good_sectors > SCpnt->request.nr_sectors)
good_sectors -= 2;
};
#ifdef DEBUG
printk("(%x %x %x) ",SCpnt->request.bh, SCpnt->request.nr_sectors,
this_count);
good_sectors);
#endif
if (SCpnt->request.nr_sectors > this_count)
{
......@@ -214,12 +252,16 @@ static void rw_intr (Scsi_Cmnd * SCpnt)
SCpnt->request.sector, this_count);
}
SCpnt = end_scsi_request(SCpnt, 1, this_count); /* All done */
requeue_sr_request(SCpnt);
return;
} /* Normal completion */
SCpnt = end_scsi_request(SCpnt, 1, good_sectors); /* All done */
if (result == 0)
{
requeue_sr_request(SCpnt);
return;
}
}
/* We only come through here if we have an error of some kind */
if (good_sectors == 0) {
/* We only come through here if no sectors were read successfully. */
/* Free up any indirection buffers we allocated for DMA purposes. */
if (SCpnt->use_sg) {
......@@ -229,14 +271,16 @@ static void rw_intr (Scsi_Cmnd * SCpnt)
for(i=0; i<SCpnt->use_sg; i++) {
if (sgpnt[i].alt_address) {
scsi_free(sgpnt[i].address, sgpnt[i].length);
};
};
}
}
scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
} else {
if (SCpnt->buffer != SCpnt->request.buffer)
scsi_free(SCpnt->buffer, SCpnt->bufflen);
};
}
}
if (driver_byte(result) != 0) {
if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
......@@ -267,13 +311,37 @@ static void rw_intr (Scsi_Cmnd * SCpnt)
}
}
if (SCpnt->sense_buffer[2] == NOT_READY) {
printk("CDROM not ready. Make sure you have a disc in the drive.\n");
printk("CD-ROM not ready. Make sure you have a disc in the drive.\n");
SCpnt = end_scsi_request(SCpnt, 0, this_count);
requeue_sr_request(SCpnt); /* Do next request */
return;
};
}
if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
printk("scsi%d: MEDIUM ERROR on "
"channel %d, id %d, lun %d, CDB: ",
SCpnt->host->host_no, (int) SCpnt->channel,
(int) SCpnt->target, (int) SCpnt->lun);
print_command(SCpnt->cmnd);
print_sense("sr", SCpnt);
SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
requeue_sr_request(SCpnt);
return;
}
if (SCpnt->sense_buffer[2] == VOLUME_OVERFLOW) {
printk("scsi%d: VOLUME OVERFLOW on "
"channel %d, id %d, lun %d, CDB: ",
SCpnt->host->host_no, (int) SCpnt->channel,
(int) SCpnt->target, (int) SCpnt->lun);
print_command(SCpnt->cmnd);
print_sense("sr", SCpnt);
SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
requeue_sr_request(SCpnt);
return;
}
}
/* We only get this far if we have an error we have not recognized */
......@@ -433,7 +501,7 @@ void sr_photocd(struct inode *inode)
memset(buf,0,40);
*((unsigned long*)buf) = 0;
*((unsigned long*)buf+1) = 12;
cmd[0] = 0x1a;
cmd[0] = MODE_SENSE;
cmd[2] = 1;
cmd[4] = 12;
rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
......@@ -454,7 +522,7 @@ void sr_photocd(struct inode *inode)
memset(buf,0,40);
*((unsigned long*)buf) = 12; /* sending 12 bytes... */
*((unsigned long*)buf+1) = 0;
cmd[0] = 0x15;
cmd[0] = MODE_SELECT;
cmd[1] = (1 << 4);
cmd[4] = 12;
send = &cmd[6]; /* this is a 6-Byte command */
......@@ -477,10 +545,11 @@ void sr_photocd(struct inode *inode)
#ifdef DEBUG
printk(KERN_DEBUG "sr_photocd: use SONY/PIONEER code\n");
#endif
get_sectorsize(MINOR(inode->i_rdev)); /* spinup (avoid timeout) */
memset(buf,0,40);
*((unsigned long*)buf) = 0x0; /* we send nothing... */
*((unsigned long*)buf+1) = 0x0c; /* and receive 0x0c bytes */
cmd[0] = 0x43; /* Read TOC */
cmd[0] = READ_TOC;
cmd[8] = 0x0c;
cmd[9] = 0x40;
rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
......@@ -488,11 +557,11 @@ void sr_photocd(struct inode *inode)
if (rc != 0) {
if (rc != 0x28000002) /* drop "not ready" */
printk(KERN_WARNING "sr_photocd: ioctl error (SONY): 0x%x\n",rc);
printk(KERN_WARNING "sr_photocd: ioctl error (SONY/PIONEER): 0x%x\n",rc);
break;
}
if ((rec[0] << 8) + rec[1] != 0x0a) {
printk(KERN_INFO "sr_photocd: (SONY) Hmm, seems the CDROM doesn't support multisession CD's\n");
printk(KERN_INFO "sr_photocd: (SONY/PIONEER) Hmm, seems the CDROM doesn't support multisession CD's\n");
no_multi = 1;
break;
}
......@@ -523,31 +592,31 @@ void sr_photocd(struct inode *inode)
static int sr_open(struct inode * inode, struct file * filp)
{
if(MINOR(inode->i_rdev) >= sr_template.nr_dev ||
!scsi_CDs[MINOR(inode->i_rdev)].device) return -ENXIO; /* No such device */
if (filp->f_mode & 2)
return -EROFS;
if(MINOR(inode->i_rdev) >= sr_template.nr_dev ||
!scsi_CDs[MINOR(inode->i_rdev)].device) return -ENXIO; /* No such device */
if (filp->f_mode & 2)
return -EROFS;
check_disk_change(inode->i_rdev);
if(!scsi_CDs[MINOR(inode->i_rdev)].device->access_count++)
if(!scsi_CDs[MINOR(inode->i_rdev)].device->access_count++)
sr_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
if (scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)
if (scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)
(*scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)++;
if(sr_template.usage_count) (*sr_template.usage_count)++;
if(sr_template.usage_count) (*sr_template.usage_count)++;
sr_photocd(inode);
sr_photocd(inode);
/* If this device did not have media in the drive at boot time, then
* we would have been unable to get the sector size. Check to see if
* this is the case, and try again.
*/
if(scsi_CDs[MINOR(inode->i_rdev)].needs_sector_size)
/* If this device did not have media in the drive at boot time, then
* we would have been unable to get the sector size. Check to see if
* this is the case, and try again.
*/
if(scsi_CDs[MINOR(inode->i_rdev)].needs_sector_size)
get_sectorsize(MINOR(inode->i_rdev));
return 0;
return 0;
}
......@@ -1058,42 +1127,42 @@ static int sr_registered = 0;
static int sr_init()
{
int i;
if(sr_template.dev_noticed == 0) return 0;
if(!sr_registered) {
int i;
if(sr_template.dev_noticed == 0) return 0;
if(!sr_registered) {
if (register_blkdev(MAJOR_NR,"sr",&sr_fops)) {
printk("Unable to get major %d for SCSI-CD\n",MAJOR_NR);
return 1;
}
sr_registered++;
}
if (scsi_CDs) return 0;
sr_template.dev_max = sr_template.dev_noticed + SR_EXTRA_DEVS;
scsi_CDs = (Scsi_CD *) scsi_init_malloc(sr_template.dev_max * sizeof(Scsi_CD), GFP_ATOMIC);
memset(scsi_CDs, 0, sr_template.dev_max * sizeof(Scsi_CD));
sr_sizes = (int *) scsi_init_malloc(sr_template.dev_max * sizeof(int), GFP_ATOMIC);
memset(sr_sizes, 0, sr_template.dev_max * sizeof(int));
}
sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max *
sizeof(int), GFP_ATOMIC);
for(i=0;i<sr_template.dev_max;i++) sr_blocksizes[i] = 2048;
blksize_size[MAJOR_NR] = sr_blocksizes;
return 0;
if (scsi_CDs) return 0;
sr_template.dev_max = sr_template.dev_noticed + SR_EXTRA_DEVS;
scsi_CDs = (Scsi_CD *) scsi_init_malloc(sr_template.dev_max * sizeof(Scsi_CD), GFP_ATOMIC);
memset(scsi_CDs, 0, sr_template.dev_max * sizeof(Scsi_CD));
sr_sizes = (int *) scsi_init_malloc(sr_template.dev_max * sizeof(int), GFP_ATOMIC);
memset(sr_sizes, 0, sr_template.dev_max * sizeof(int));
sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max *
sizeof(int), GFP_ATOMIC);
for(i=0;i<sr_template.dev_max;i++) sr_blocksizes[i] = 2048;
blksize_size[MAJOR_NR] = sr_blocksizes;
return 0;
}
void sr_finish()
{
int i;
blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
blk_size[MAJOR_NR] = sr_sizes;
for (i = 0; i < sr_template.nr_dev; ++i)
blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
blk_size[MAJOR_NR] = sr_sizes;
for (i = 0; i < sr_template.nr_dev; ++i)
{
/* If we have already seen this, then skip it. Comes up
* with loadable modules. */
......@@ -1114,15 +1183,15 @@ void sr_finish()
}
/* If our host adapter is capable of scatter-gather, then we increase
* the read-ahead to 16 blocks (32 sectors). If not, we use
* a two block (4 sector) read ahead. */
if(scsi_CDs[0].device && scsi_CDs[0].device->host->sg_tablesize)
/* If our host adapter is capable of scatter-gather, then we increase
* the read-ahead to 16 blocks (32 sectors). If not, we use
* a two block (4 sector) read ahead. */
if(scsi_CDs[0].device && scsi_CDs[0].device->host->sg_tablesize)
read_ahead[MAJOR_NR] = 32; /* 32 sector read-ahead. Always removable. */
else
else
read_ahead[MAJOR_NR] = 4; /* 4 sector read-ahead */
return;
return;
}
static void sr_detach(Scsi_Device * SDp)
......
bool 'ProAudioSpectrum 16 support' CONFIG_PAS
bool 'SoundBlaster (SB, SBPro, SB16, clones) support' CONFIG_SB
bool 'Generic OPL2/OPL3 FM synthesizer support' CONFIG_ADLIB
bool 'Gravis Ultrasound support' CONFIG_GUS
bool 'MPU-401 support (NOT for SB16)' CONFIG_MPU401
bool '6850 UART Midi support' CONFIG_UART6850
bool 'PSS (ECHO-ADI2111) support' CONFIG_PSS
bool '16 bit sampling option of GUS (_NOT_ GUS MAX)' CONFIG_GUS16
bool 'GUS MAX support' CONFIG_GUSMAX
bool 'Microsoft Sound System support' CONFIG_MSS
bool 'Ensoniq Soundscape support' CONFIG_SSCAPE
bool 'MediaTriX AudioTriX Pro support' CONFIG_TRIX
bool 'Support for MAD16 and/or Mozart based cards' CONFIG_MAD16
bool 'Support for Crystal CS4232 based (PnP) cards' CONFIG_CS4232
bool 'Support for Turtle Beach Wave Front (Maui, Tropez) synthesizers' CONFIG_MAUI
bool '/dev/dsp and /dev/audio support' CONFIG_AUDIO
bool 'MIDI interface support' CONFIG_MIDI
bool 'FM synthesizer (YM3812/OPL-3) support' CONFIG_YM3812
bool 'Support for SM Wave' CONFIG_SMWAVE
if [ "$CONFIG_AEDSP16" = "y" ]; then
hex 'I/O base for Audio Excel DSP 16 220 or 240' AEDSP16_BASE 220
fi
if [ "$CONFIG_SB" = "y" ]; then
hex 'I/O base for SB Check from manual of the card' SBC_BASE 220
fi
if [ "$CONFIG_SB" = "y" ]; then
int 'SoundBlaster IRQ Check from manual of the card' SBC_IRQ 7
fi
if [ "$CONFIG_SB" = "y" ]; then
int 'SoundBlaster DMA 0, 1 or 3' SBC_DMA 1
fi
if [ "$CONFIG_SB" = "y" ]; then
int 'SoundBlaster 16 bit DMA (_REQUIRED_for SB16, Jazz16, SMW) 5, 6 or 7' SB_DMA2 5
fi
if [ "$CONFIG_SB" = "y" ]; then
hex 'MPU401 I/O base of SB16, Jazz16 and ES1688 Check from manual of the card' SB_MPU_BASE 0
fi
if [ "$CONFIG_SB" = "y" ]; then
int 'SB MPU401 IRQ (Jazz16, SM Wave and ES1688) Check from manual of the card' SB_MPU_IRQ -1
fi
if [ "$CONFIG_PAS" = "y" ]; then
int 'PAS16 IRQ 3, 4, 5, 7, 9, 10, 11, 12, 14 or 15' PAS_IRQ 10
fi
if [ "$CONFIG_PAS" = "y" ]; then
int 'PAS16 DMA 0, 1, 3, 5, 6 or 7' PAS_DMA 3
fi
if [ "$CONFIG_GUS" = "y" ]; then
hex 'I/O base for GUS 210, 220, 230, 240, 250 or 260' GUS_BASE 220
fi
if [ "$CONFIG_GUS" = "y" ]; then
int 'GUS IRQ 3, 5, 7, 9, 11, 12 or 15' GUS_IRQ 15
fi
if [ "$CONFIG_GUS" = "y" ]; then
int 'GUS DMA 1, 3, 5, 6 or 7' GUS_DMA 6
fi
if [ "$CONFIG_GUS" = "y" ]; then
int 'Second DMA channel for GUS 1, 3, 5, 6 or 7' GUS_DMA2 -1
fi
if [ "$CONFIG_GUS16" = "y" ]; then
hex 'I/O base for the 16 bit daughtercard of GUS 530, 604, E80 or F40' GUS16_BASE 530
fi
if [ "$CONFIG_GUS16" = "y" ]; then
int 'GUS 16 bit daughtercard IRQ 3, 4, 5, 7, or 9' GUS16_IRQ 7
fi
if [ "$CONFIG_GUS16" = "y" ]; then
int 'GUS DMA 0, 1 or 3' GUS16_DMA 3
fi
if [ "$CONFIG_MPU401" = "y" ]; then
hex 'I/O base for MPU401 Check from manual of the card' MPU_BASE 330
fi
if [ "$CONFIG_MPU401" = "y" ]; then
int 'MPU401 IRQ Check from manual of the card' MPU_IRQ 9
fi
if [ "$CONFIG_MAUI" = "y" ]; then
hex 'I/O base for Maui 210, 230, 260, 290, 300, 320, 338 or 330' MAUI_BASE 330
fi
if [ "$CONFIG_MAUI" = "y" ]; then
int 'Maui IRQ 5, 9, 12 or 15' MAUI_IRQ 9
fi
if [ "$CONFIG_UART6850" = "y" ]; then
hex 'I/O base for UART 6850 MIDI port (Unknown)' U6850_BASE 0
fi
if [ "$CONFIG_UART6850" = "y" ]; then
int 'UART6850 IRQ (Unknown)' U6850_IRQ -1
fi
if [ "$CONFIG_PSS" = "y" ]; then
hex 'PSS I/O base 220 or 240' PSS_BASE 220
fi
if [ "$CONFIG_PSS" = "y" ]; then
hex 'PSS audio I/O base 530, 604, E80 or F40' PSS_MSS_BASE 530
fi
if [ "$CONFIG_PSS" = "y" ]; then
int 'PSS audio IRQ 7, 9, 10 or 11' PSS_MSS_IRQ 11
fi
if [ "$CONFIG_PSS" = "y" ]; then
int 'PSS audio DMA 0, 1 or 3' PSS_MSS_DMA 3
fi
if [ "$CONFIG_PSS" = "y" ]; then
hex 'PSS MIDI I/O base ' PSS_MPU_BASE 330
fi
if [ "$CONFIG_PSS" = "y" ]; then
int 'PSS MIDI IRQ 3, 4, 5, 7 or 9' PSS_MPU_IRQ 9
fi
if [ "$CONFIG_MSS" = "y" ]; then
hex 'MSS/WSS I/O base 530, 604, E80 or F40' MSS_BASE 530
fi
if [ "$CONFIG_MSS" = "y" ]; then
int 'MSS/WSS IRQ 7, 9, 10 or 11' MSS_IRQ 11
fi
if [ "$CONFIG_MSS" = "y" ]; then
int 'MSS/WSS DMA 0, 1 or 3' MSS_DMA 3
fi
if [ "$CONFIG_SSCAPE" = "y" ]; then
hex 'Soundscape MIDI I/O base ' SSCAPE_BASE 330
fi
if [ "$CONFIG_SSCAPE" = "y" ]; then
int 'Soundscape MIDI IRQ ' SSCAPE_IRQ 9
fi
if [ "$CONFIG_SSCAPE" = "y" ]; then
int 'Soundscape initialization DMA 0, 1 or 3' SSCAPE_DMA 3
fi
if [ "$CONFIG_SSCAPE" = "y" ]; then
hex 'Soundscape audio I/O base 534, 608, E84 or F44' SSCAPE_MSS_BASE 534
fi
if [ "$CONFIG_SSCAPE" = "y" ]; then
int 'Soundscape audio IRQ 7, 9, 10 or 11' SSCAPE_MSS_IRQ 11
fi
if [ "$CONFIG_SSCAPE" = "y" ]; then
int 'Soundscape audio DMA 0, 1 or 3' SSCAPE_MSS_DMA 0
fi
if [ "$CONFIG_TRIX" = "y" ]; then
hex 'AudioTriX audio I/O base 530, 604, E80 or F40' TRIX_BASE 530
fi
if [ "$CONFIG_TRIX" = "y" ]; then
int 'AudioTriX audio IRQ 7, 9, 10 or 11' TRIX_IRQ 11
fi
if [ "$CONFIG_TRIX" = "y" ]; then
int 'AudioTriX audio DMA 0, 1 or 3' TRIX_DMA 0
fi
if [ "$CONFIG_TRIX" = "y" ]; then
int 'AudioTriX second (duplex) DMA 0, 1 or 3' TRIX_DMA2 3
fi
if [ "$CONFIG_TRIX" = "y" ]; then
hex 'AudioTriX MIDI I/O base 330, 370, 3B0 or 3F0' TRIX_MPU_BASE 330
fi
if [ "$CONFIG_TRIX" = "y" ]; then
int 'AudioTriX MIDI IRQ 3, 4, 5, 7 or 9' TRIX_MPU_IRQ 9
fi
if [ "$CONFIG_TRIX" = "y" ]; then
hex 'AudioTriX SB I/O base 220, 210, 230, 240, 250, 260 or 270' TRIX_SB_BASE 220
fi
if [ "$CONFIG_TRIX" = "y" ]; then
int 'AudioTriX SB IRQ 3, 4, 5 or 7' TRIX_SB_IRQ 7
fi
if [ "$CONFIG_TRIX" = "y" ]; then
int 'AudioTriX SB DMA 1 or 3' TRIX_SB_DMA 1
fi
if [ "$CONFIG_CS4232" = "y" ]; then
hex 'CS4232 audio I/O base 530, 604, E80 or F40' CS4232_BASE 530
fi
if [ "$CONFIG_CS4232" = "y" ]; then
int 'CS4232 audio IRQ 5, 7, 9, 11, 12 or 15' CS4232_IRQ 11
fi
if [ "$CONFIG_CS4232" = "y" ]; then
int 'CS4232 audio DMA 0, 1 or 3' CS4232_DMA 0
fi
if [ "$CONFIG_CS4232" = "y" ]; then
int 'CS4232 second (duplex) DMA 0, 1 or 3' CS4232_DMA2 3
fi
if [ "$CONFIG_CS4232" = "y" ]; then
hex 'CS4232 MIDI I/O base 330, 370, 3B0 or 3F0' CS4232_MPU_BASE 330
fi
if [ "$CONFIG_CS4232" = "y" ]; then
int 'CS4232 MIDI IRQ 5, 7, 9, 11, 12 or 15' CS4232_MPU_IRQ 9
fi
if [ "$CONFIG_MAD16" = "y" ]; then
hex 'MAD16 audio I/O base 530, 604, E80 or F40' MAD16_BASE 530
fi
if [ "$CONFIG_MAD16" = "y" ]; then
int 'MAD16 audio IRQ 7, 9, 10 or 11' MAD16_IRQ 11
fi
if [ "$CONFIG_MAD16" = "y" ]; then
int 'MAD16 audio DMA 0, 1 or 3' MAD16_DMA 3
fi
if [ "$CONFIG_MAD16" = "y" ]; then
int 'MAD16 second (duplex) DMA 0, 1 or 3' MAD16_DMA2 0
fi
if [ "$CONFIG_MAD16" = "y" ]; then
hex 'MAD16 MIDI I/O base 300, 310, 320 or 330 (0 disables)' MAD16_MPU_BASE 330
fi
if [ "$CONFIG_MAD16" = "y" ]; then
int 'MAD16 MIDI IRQ 5, 7, 9 or 10' MAD16_MPU_IRQ 9
fi
if [ "$CONFIG_AUDIO" = "y" ]; then
int 'Audio DMA buffer size 4096, 16384, 32768 or 65536' DSP_BUFFSIZE 65536
fi
#
$MAKE -C drivers/sound kernelconfig || exit 1
# Sound driver configuration
#
#--------
# There is another config script which is compatible with rest of
# the kernel. It can be activated by running 'make mkscript' in this
# directory. Please note that this is an _experimental_ feature which
# doesn't work with all cards (PSS, SM Wave, AudioTriX Pro).
#--------
#
$MAKE -C drivers/sound config || exit 1
......@@ -348,7 +348,7 @@ static inline void remove_from_lru_list(struct buffer_head * bh)
static inline void remove_from_free_list(struct buffer_head * bh)
{
int isize = BUFSIZE_INDEX(bh->b_size);
int isize = BUFSIZE_INDEX(bh->b_size);
if (!(bh->b_prev_free) || !(bh->b_next_free))
panic("VFS: Free block list corrupted");
if(bh->b_dev != B_FREE)
......@@ -369,7 +369,7 @@ static inline void remove_from_free_list(struct buffer_head * bh)
static inline void remove_from_queues(struct buffer_head * bh)
{
if(bh->b_dev == B_FREE) {
if(bh->b_dev == B_FREE) {
remove_from_free_list(bh); /* Free list entries should not be
in the hash queue */
return;
......@@ -410,7 +410,7 @@ static inline void put_last_lru(struct buffer_head * bh)
static inline void put_last_free(struct buffer_head * bh)
{
int isize;
int isize;
if (!bh)
return;
......@@ -432,7 +432,7 @@ static inline void put_last_free(struct buffer_head * bh)
static inline void insert_into_queues(struct buffer_head * bh)
{
/* put at end of free list */
if(bh->b_dev == B_FREE) {
if(bh->b_dev == B_FREE) {
put_last_free(bh);
return;
}
......@@ -556,7 +556,7 @@ void refill_freelist(int size)
struct buffer_head * bh, * tmp;
struct buffer_head * candidate[NR_LIST];
unsigned int best_time, winner;
int isize = BUFSIZE_INDEX(size);
int isize = BUFSIZE_INDEX(size);
int buffers[NR_LIST];
int i;
int needed;
......@@ -748,7 +748,7 @@ void refill_freelist(int size)
struct buffer_head * getblk(kdev_t dev, int block, int size)
{
struct buffer_head * bh;
int isize = BUFSIZE_INDEX(size);
int isize = BUFSIZE_INDEX(size);
/* Update this for the buffer size lav. */
buffer_usage[isize]++;
......@@ -789,7 +789,7 @@ struct buffer_head * getblk(kdev_t dev, int block, int size)
void set_writetime(struct buffer_head * buf, int flag)
{
int newtime;
int newtime;
if (buffer_dirty(buf)) {
/* Move buffer to dirty list if jiffies is clear */
......@@ -1106,20 +1106,19 @@ static inline void after_unlock_page (struct page * page)
swap_after_unlock_page(page->swap_unlock_entry);
}
/* Free all temporary buffers belonging to a page. */
/*
* Free all temporary buffers belonging to a page.
* This needs to be called with interrupts disabled.
*/
static inline void free_async_buffers (struct buffer_head * bh)
{
struct buffer_head * tmp;
unsigned long flags;
tmp = bh;
save_flags(flags);
cli();
do {
if (!test_bit(BH_FreeOnIO, &tmp->b_state)) {
printk ("Whoops: unlock_buffer: "
"async IO mismatch on page.\n");
restore_flags(flags);
return;
}
tmp->b_next_free = reuse_list;
......@@ -1127,7 +1126,6 @@ static inline void free_async_buffers (struct buffer_head * bh)
clear_bit(BH_FreeOnIO, &tmp->b_state);
tmp = tmp->b_this_page;
} while (tmp != bh);
restore_flags(flags);
}
/*
......@@ -1171,11 +1169,12 @@ int brw_page(int rw, unsigned long address, kdev_t dev, int b[], int size, int b
next->b_flushtime = 0;
set_bit(BH_Uptodate, &next->b_state);
/* When we use bmap, we define block zero to represent
a hole. ll_rw_page, however, may legitimately
access block zero, and we need to distinguish the
two cases.
*/
/*
* When we use bmap, we define block zero to represent
* a hole. ll_rw_page, however, may legitimately
* access block zero, and we need to distinguish the
* two cases.
*/
if (bmap && !block) {
memset(next->b_data, 0, size);
next->b_count--;
......@@ -1211,10 +1210,14 @@ int brw_page(int rw, unsigned long address, kdev_t dev, int b[], int size, int b
/* The rest of the work is done in mark_buffer_uptodate()
* and unlock_buffer(). */
} else {
unsigned long flags;
clear_bit(PG_locked, &page->flags);
set_bit(PG_uptodate, &page->flags);
wake_up(&page->wait);
save_flags(flags);
cli();
free_async_buffers(bh);
restore_flags(flags);
after_unlock_page(page);
}
++current->maj_flt;
......@@ -1247,44 +1250,67 @@ void mark_buffer_uptodate(struct buffer_head * bh, int on)
*/
void unlock_buffer(struct buffer_head * bh)
{
unsigned long flags;
struct buffer_head *tmp;
struct page *page;
if (!test_bit(BH_FreeOnIO, &bh->b_state)) {
/* This is a normal buffer. */
clear_bit(BH_Lock, &bh->b_state);
wake_up(&bh->b_wait);
clear_bit(BH_Lock, &bh->b_state);
wake_up(&bh->b_wait);
if (!test_bit(BH_FreeOnIO, &bh->b_state))
return;
}
/* This is a temporary buffer used for page I/O. */
page = mem_map + MAP_NR(bh->b_data);
if (!PageLocked(page)) {
printk ("Whoops: unlock_buffer: "
"async io complete on unlocked page\n");
return;
}
if (bh->b_count != 1) {
printk ("Whoops: unlock_buffer: b_count != 1 on async io.\n");
return;
}
/* Async buffer_heads are here only as labels for IO, and get
thrown away once the IO for this page is complete. IO is
deemed complete once all buffers have been unlocked. */
if (!PageLocked(page))
goto not_locked;
if (bh->b_count != 1)
goto bad_count;
if (!test_bit(BH_Uptodate, &bh->b_state))
set_bit(PG_error, &page->flags);
clear_bit(BH_Lock, &bh->b_state);
wake_up(&bh->b_wait);
for (tmp = bh; tmp=tmp->b_this_page, tmp!=bh; ) {
if (test_bit(BH_Lock, &tmp->b_state))
return;
}
/*
* Be _very_ careful from here on. Bad things can happen if
* two buffer heads end IO at almost the same time and both
* decide that the page is now completely done.
*
* Async buffer_heads are here only as labels for IO, and get
* thrown away once the IO for this page is complete. IO is
* deemed complete once all buffers have been visited
* (b_count==0) and are now unlocked. We must make sure that
* only the _last_ buffer that decrements its count is the one
* that free's the page..
*/
save_flags(flags);
cli();
bh->b_count--;
tmp = bh;
do {
if (tmp->b_count)
goto still_busy;
tmp = tmp->b_this_page;
} while (tmp != bh);
/* OK, the async IO on this page is complete. */
if (!clear_bit(PG_locked, &page->flags))
return;
wake_up(&page->wait);
free_async_buffers(bh);
restore_flags(flags);
clear_bit(PG_locked, &page->flags);
wake_up(&page->wait);
after_unlock_page(page);
wake_up(&buffer_wait);
return;
still_busy:
restore_flags(flags);
return;
not_locked:
printk ("Whoops: unlock_buffer: async io complete on unlocked page\n");
return;
bad_count:
printk ("Whoops: unlock_buffer: b_count != 1 on async io.\n");
return;
}
/*
......
......@@ -35,6 +35,12 @@
Yes, this does map 0 to 64Mb-1 twice, but only window 1 will actually
be used for that range (via virt_to_bus()).
Note that we actually fudge the window 1 maximum as 48Mb instead of 64Mb,
to keep virt_to_bus() from returning an address in the first window, for
a data area that goes beyond the 64Mb first DMA window. Sigh...
The fudge factor MUST match with <asm/dma.h> MAX_DMA_ADDRESS, but
we can't just use that here, because of header file looping... :-(
Window 1 will be used for all DMA from the ISA bus; yes, that does
limit what memory an ISA floppy or soundcard or Ethernet can touch, but
it's also a known limitation on other platforms as well. We use the
......@@ -60,10 +66,11 @@
however, that an XL kernel will run on an AVANTI without problems.
*/
#define APECS_XL_DMA_WIN1_BASE (64*1024*1024)
#define APECS_XL_DMA_WIN1_SIZE (64*1024*1024)
#define APECS_XL_DMA_WIN2_BASE (512*1024*1024)
#define APECS_XL_DMA_WIN2_SIZE (512*1024*1024)
#define APECS_XL_DMA_WIN1_BASE (64*1024*1024)
#define APECS_XL_DMA_WIN1_SIZE (64*1024*1024)
#define APECS_XL_DMA_WIN1_SIZE_PARANOID (48*1024*1024)
#define APECS_XL_DMA_WIN2_BASE (512*1024*1024)
#define APECS_XL_DMA_WIN2_SIZE (512*1024*1024)
#else /* CONFIG_ALPHA_XL */
......@@ -214,11 +221,17 @@
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
/* NOTE: we fudge the window 1 maximum as 48Mb instead of 64Mb, to prevent
virt_to_bus() from returning an address in the first window, for a
data area that goes beyond the 64Mb first DMA window. Sigh...
This MUST match with <asm/dma.h> MAX_DMA_ADDRESS for consistency, but
we can't just use that here, because of header file looping... :-(
*/
extern inline unsigned long virt_to_bus(void * address)
{
unsigned long paddr = virt_to_phys(address);
#ifdef CONFIG_ALPHA_XL
if (paddr < APECS_XL_DMA_WIN1_SIZE)
if (paddr < APECS_XL_DMA_WIN1_SIZE_PARANOID)
return paddr + APECS_XL_DMA_WIN1_BASE;
else
return paddr + APECS_XL_DMA_WIN2_BASE; /* win 2 xlates to 0 also */
......
#ifndef __ALPHA_ALCOR__H__
#define __ALPHA_ALCOR__H__
#ifndef __ALPHA_CIA__H__
#define __ALPHA_CIA__H__
#include <linux/types.h>
/*
* ALCOR is the internal name for the 2117x chipset which provides
* CIA is the internal name for the 2117x chipset which provides
* memory controller and PCI access for the 21164 chip based systems.
*
* This file is based on:
......@@ -35,7 +35,7 @@
**------------------------------------------------------------------------*/
/* ALCOR ADDRESS BIT DEFINITIONS
/* CIA ADDRESS BIT DEFINITIONS
*
* 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
......@@ -76,100 +76,114 @@
#define MEM_SP1_MASK 0x1fffffff /* Mem sparse space 1 mask is 29 bits */
#define ALCOR_DMA_WIN_BASE (1024UL*1024UL*1024UL)
#define ALCOR_DMA_WIN_SIZE (1024*1024*1024)
#define CIA_DMA_WIN_BASE (1024UL*1024UL*1024UL)
#define CIA_DMA_WIN_SIZE (1024*1024*1024)
/*
* 21171-CA Control and Status Registers (p4-1)
*/
#define ALCOR_IOC_CIA_REV (IDENT_ADDR + 0x8740000080UL)
#define ALCOR_IOC_PCI_LAT (IDENT_ADDR + 0x87400000C0UL)
#define ALCOR_IOC_CIA_CTRL (IDENT_ADDR + 0x8740000100UL)
#define ALCOR_IOC_HAE_MEM (IDENT_ADDR + 0x8740000400UL)
#define ALCOR_IOC_HAE_IO (IDENT_ADDR + 0x8740000440UL)
#define ALCOR_IOC_CFG (IDENT_ADDR + 0x8740000480UL)
#define ALCOR_IOC_CACK_EN (IDENT_ADDR + 0x8740000600UL)
#define CIA_IOC_CIA_REV (IDENT_ADDR + 0x8740000080UL)
#define CIA_IOC_PCI_LAT (IDENT_ADDR + 0x87400000C0UL)
#define CIA_IOC_CIA_CTRL (IDENT_ADDR + 0x8740000100UL)
#define CIA_IOC_HAE_MEM (IDENT_ADDR + 0x8740000400UL)
#define CIA_IOC_HAE_IO (IDENT_ADDR + 0x8740000440UL)
#define CIA_IOC_CFG (IDENT_ADDR + 0x8740000480UL)
#define CIA_IOC_CACK_EN (IDENT_ADDR + 0x8740000600UL)
/*
* 21171-CA Diagnostic Registers (p4-2)
*/
#define ALCOR_IOC_CIA_DIAG (IDENT_ADDR + 0x8740002000UL)
#define ALCOR_IOC_DIAG_CHECK (IDENT_ADDR + 0x8740003000UL)
#define CIA_IOC_CIA_DIAG (IDENT_ADDR + 0x8740002000UL)
#define CIA_IOC_DIAG_CHECK (IDENT_ADDR + 0x8740003000UL)
/*
* 21171-CA Performance Monitor registers (p4-3)
*/
#define ALCOR_IOC_PERF_MONITOR (IDENT_ADDR + 0x8740004000UL)
#define ALCOR_IOC_PERF_CONTROL (IDENT_ADDR + 0x8740004040UL)
#define CIA_IOC_PERF_MONITOR (IDENT_ADDR + 0x8740004000UL)
#define CIA_IOC_PERF_CONTROL (IDENT_ADDR + 0x8740004040UL)
/*
* 21171-CA Error registers (p4-3)
*/
#define ALCOR_IOC_CPU_ERR0 (IDENT_ADDR + 0x8740008000UL)
#define ALCOR_IOC_CPU_ERR1 (IDENT_ADDR + 0x8740008040UL)
#define ALCOR_IOC_CIA_ERR (IDENT_ADDR + 0x8740008200UL)
#define ALCOR_IOC_CIA_STAT (IDENT_ADDR + 0x8740008240UL)
#define ALCOR_IOC_ERR_MASK (IDENT_ADDR + 0x8740008280UL)
#define ALCOR_IOC_CIA_SYN (IDENT_ADDR + 0x8740008300UL)
#define ALCOR_IOC_MEM_ERR0 (IDENT_ADDR + 0x8740008400UL)
#define ALCOR_IOC_MEM_ERR1 (IDENT_ADDR + 0x8740008440UL)
#define ALCOR_IOC_PCI_ERR0 (IDENT_ADDR + 0x8740008800UL)
#define ALCOR_IOC_PCI_ERR1 (IDENT_ADDR + 0x8740008840UL)
#define ALCOR_IOC_PCI_ERR3 (IDENT_ADDR + 0x8740008880UL)
#define CIA_IOC_CPU_ERR0 (IDENT_ADDR + 0x8740008000UL)
#define CIA_IOC_CPU_ERR1 (IDENT_ADDR + 0x8740008040UL)
#define CIA_IOC_CIA_ERR (IDENT_ADDR + 0x8740008200UL)
#define CIA_IOC_CIA_STAT (IDENT_ADDR + 0x8740008240UL)
#define CIA_IOC_ERR_MASK (IDENT_ADDR + 0x8740008280UL)
#define CIA_IOC_CIA_SYN (IDENT_ADDR + 0x8740008300UL)
#define CIA_IOC_MEM_ERR0 (IDENT_ADDR + 0x8740008400UL)
#define CIA_IOC_MEM_ERR1 (IDENT_ADDR + 0x8740008440UL)
#define CIA_IOC_PCI_ERR0 (IDENT_ADDR + 0x8740008800UL)
#define CIA_IOC_PCI_ERR1 (IDENT_ADDR + 0x8740008840UL)
#define CIA_IOC_PCI_ERR3 (IDENT_ADDR + 0x8740008880UL)
/*
* 2117A-CA PCI Address Translation Registers. I've only defined
* the first window fully as that's the only one that we're currently using.
* The other window bases are needed to disable the windows.
*/
#define ALCOR_IOC_PCI_TBIA (IDENT_ADDR + 0x8760000100UL)
#define ALCOR_IOC_PCI_W0_BASE (IDENT_ADDR + 0x8760000400UL)
#define ALCOR_IOC_PCI_W0_MASK (IDENT_ADDR + 0x8760000440UL)
#define ALCOR_IOC_PCI_T0_BASE (IDENT_ADDR + 0x8760000480UL)
#define CIA_IOC_PCI_TBIA (IDENT_ADDR + 0x8760000100UL)
#define CIA_IOC_PCI_W0_BASE (IDENT_ADDR + 0x8760000400UL)
#define CIA_IOC_PCI_W0_MASK (IDENT_ADDR + 0x8760000440UL)
#define CIA_IOC_PCI_T0_BASE (IDENT_ADDR + 0x8760000480UL)
#define ALCOR_IOC_PCI_W1_BASE (IDENT_ADDR + 0x8760000500UL)
#define ALCOR_IOC_PCI_W2_BASE (IDENT_ADDR + 0x8760000600UL)
#define ALCOR_IOC_PCI_W3_BASE (IDENT_ADDR + 0x8760000700UL)
#define CIA_IOC_PCI_W1_BASE (IDENT_ADDR + 0x8760000500UL)
#define CIA_IOC_PCI_W2_BASE (IDENT_ADDR + 0x8760000600UL)
#define CIA_IOC_PCI_W3_BASE (IDENT_ADDR + 0x8760000700UL)
/*
* 21171-CA System configuration registers (p4-3)
*/
#define ALCOR_IOC_MCR (IDENT_ADDR + 0x8750000000UL)
#define ALCOR_IOC_MBA0 (IDENT_ADDR + 0x8750000600UL)
#define ALCOR_IOC_MBA2 (IDENT_ADDR + 0x8750000680UL)
#define ALCOR_IOC_MBA4 (IDENT_ADDR + 0x8750000700UL)
#define ALCOR_IOC_MBA6 (IDENT_ADDR + 0x8750000780UL)
#define ALCOR_IOC_MBA8 (IDENT_ADDR + 0x8750000800UL)
#define ALCOR_IOC_MBAA (IDENT_ADDR + 0x8750000880UL)
#define ALCOR_IOC_MBAC (IDENT_ADDR + 0x8750000900UL)
#define ALCOR_IOC_MBAE (IDENT_ADDR + 0x8750000980UL)
#define ALCOR_IOC_TMG0 (IDENT_ADDR + 0x8750000B00UL)
#define ALCOR_IOC_TMG1 (IDENT_ADDR + 0x8750000B40UL)
#define ALCOR_IOC_TMG2 (IDENT_ADDR + 0x8750000B80UL)
#define CIA_IOC_MCR (IDENT_ADDR + 0x8750000000UL)
#define CIA_IOC_MBA0 (IDENT_ADDR + 0x8750000600UL)
#define CIA_IOC_MBA2 (IDENT_ADDR + 0x8750000680UL)
#define CIA_IOC_MBA4 (IDENT_ADDR + 0x8750000700UL)
#define CIA_IOC_MBA6 (IDENT_ADDR + 0x8750000780UL)
#define CIA_IOC_MBA8 (IDENT_ADDR + 0x8750000800UL)
#define CIA_IOC_MBAA (IDENT_ADDR + 0x8750000880UL)
#define CIA_IOC_MBAC (IDENT_ADDR + 0x8750000900UL)
#define CIA_IOC_MBAE (IDENT_ADDR + 0x8750000980UL)
#define CIA_IOC_TMG0 (IDENT_ADDR + 0x8750000B00UL)
#define CIA_IOC_TMG1 (IDENT_ADDR + 0x8750000B40UL)
#define CIA_IOC_TMG2 (IDENT_ADDR + 0x8750000B80UL)
/*
* Memory spaces:
*/
#define ALCOR_IACK_SC (IDENT_ADDR + 0x8720000000UL)
#define ALCOR_CONF (IDENT_ADDR + 0x8700000000UL)
#define ALCOR_IO (IDENT_ADDR + 0x8580000000UL)
#define ALCOR_SPARSE_MEM (IDENT_ADDR + 0x8000000000UL)
#define ALCOR_DENSE_MEM (IDENT_ADDR + 0x8600000000UL)
#define CIA_IACK_SC (IDENT_ADDR + 0x8720000000UL)
#define CIA_CONF (IDENT_ADDR + 0x8700000000UL)
#define CIA_IO (IDENT_ADDR + 0x8580000000UL)
#define CIA_SPARSE_MEM (IDENT_ADDR + 0x8000000000UL)
#define CIA_DENSE_MEM (IDENT_ADDR + 0x8600000000UL)
/*
* ALCOR's GRU ASIC registers
*/
#define GRU_INT_REQ (IDENT_ADDR + 0x8780000000UL)
#define GRU_INT_MASK (IDENT_ADDR + 0x8780000040UL)
#define GRU_INT_EDGE (IDENT_ADDR + 0x8780000080UL)
#define GRU_INT_HILO (IDENT_ADDR + 0x87800000C0UL)
#define GRU_INT_CLEAR (IDENT_ADDR + 0x8780000100UL)
#define GRU_CACHE_CNFG (IDENT_ADDR + 0x8780000200UL)
#define GRU_SCR (IDENT_ADDR + 0x8780000300UL)
#define GRU_LED (IDENT_ADDR + 0x8780000800UL)
#define GRU_RESET (IDENT_ADDR + 0x8780000900UL)
/*
* Bit definitions for I/O Controller status register 0:
*/
#define ALCOR_IOC_STAT0_CMD 0xf
#define ALCOR_IOC_STAT0_ERR (1<<4)
#define ALCOR_IOC_STAT0_LOST (1<<5)
#define ALCOR_IOC_STAT0_THIT (1<<6)
#define ALCOR_IOC_STAT0_TREF (1<<7)
#define ALCOR_IOC_STAT0_CODE_SHIFT 8
#define ALCOR_IOC_STAT0_CODE_MASK 0x7
#define ALCOR_IOC_STAT0_P_NBR_SHIFT 13
#define ALCOR_IOC_STAT0_P_NBR_MASK 0x7ffff
#define HAE_ADDRESS ALCOR_IOC_HAE_MEM
#define CIA_IOC_STAT0_CMD 0xf
#define CIA_IOC_STAT0_ERR (1<<4)
#define CIA_IOC_STAT0_LOST (1<<5)
#define CIA_IOC_STAT0_THIT (1<<6)
#define CIA_IOC_STAT0_TREF (1<<7)
#define CIA_IOC_STAT0_CODE_SHIFT 8
#define CIA_IOC_STAT0_CODE_MASK 0x7
#define CIA_IOC_STAT0_P_NBR_SHIFT 13
#define CIA_IOC_STAT0_P_NBR_MASK 0x7ffff
#define HAE_ADDRESS CIA_IOC_HAE_MEM
#ifdef __KERNEL__
......@@ -179,18 +193,18 @@
*/
extern inline unsigned long virt_to_bus(void * address)
{
return virt_to_phys(address) + ALCOR_DMA_WIN_BASE;
return virt_to_phys(address) + CIA_DMA_WIN_BASE;
}
extern inline void * bus_to_virt(unsigned long address)
{
return phys_to_virt(address - ALCOR_DMA_WIN_BASE);
return phys_to_virt(address - CIA_DMA_WIN_BASE);
}
/*
* I/O functions:
*
* Alcor (the 2117x PCI/memory support chipset for the EV5 (21164)
* CIA (the 2117x PCI/memory support chipset for the EV5 (21164)
* series of processors uses a sparse address mapping scheme to
* get at PCI memory and I/O.
*/
......@@ -199,7 +213,7 @@ extern inline void * bus_to_virt(unsigned long address)
extern inline unsigned int __inb(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + ALCOR_IO + 0x00);
long result = *(vuip) ((addr << 5) + CIA_IO + 0x00);
result >>= (addr & 3) * 8;
return 0xffUL & result;
}
......@@ -209,13 +223,13 @@ extern inline void __outb(unsigned char b, unsigned long addr)
unsigned int w;
asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + ALCOR_IO + 0x00) = w;
*(vuip) ((addr << 5) + CIA_IO + 0x00) = w;
mb();
}
extern inline unsigned int __inw(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + ALCOR_IO + 0x08);
long result = *(vuip) ((addr << 5) + CIA_IO + 0x08);
result >>= (addr & 3) * 8;
return 0xffffUL & result;
}
......@@ -225,18 +239,18 @@ extern inline void __outw(unsigned short b, unsigned long addr)
unsigned int w;
asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + ALCOR_IO + 0x08) = w;
*(vuip) ((addr << 5) + CIA_IO + 0x08) = w;
mb();
}
extern inline unsigned int __inl(unsigned long addr)
{
return *(vuip) ((addr << 5) + ALCOR_IO + 0x18);
return *(vuip) ((addr << 5) + CIA_IO + 0x18);
}
extern inline void __outl(unsigned int b, unsigned long addr)
{
*(vuip) ((addr << 5) + ALCOR_IO + 0x18) = b;
*(vuip) ((addr << 5) + CIA_IO + 0x18) = b;
mb();
}
......@@ -283,7 +297,7 @@ extern inline unsigned long __readb(unsigned long addr)
if (msb != hae.cache) {
set_hae(msb);
}
result = *(vuip) ((addr << 5) + ALCOR_SPARSE_MEM + 0x00) ;
result = *(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x00) ;
result >>= shift;
return 0xffUL & result;
}
......@@ -298,14 +312,14 @@ extern inline unsigned long __readw(unsigned long addr)
if (msb != hae.cache) {
set_hae(msb);
}
result = *(vuip) ((addr << 5) + ALCOR_SPARSE_MEM + 0x08);
result = *(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x08);
result >>= shift;
return 0xffffUL & result;
}
extern inline unsigned long __readl(unsigned long addr)
{
return *(vuip) (addr + ALCOR_DENSE_MEM);
return *(vuip) (addr + CIA_DENSE_MEM);
}
extern inline void __writeb(unsigned char b, unsigned long addr)
......@@ -317,7 +331,7 @@ extern inline void __writeb(unsigned char b, unsigned long addr)
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + ALCOR_SPARSE_MEM + 0x00) = b * 0x01010101;
*(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x00) = b * 0x01010101;
}
extern inline void __writew(unsigned short b, unsigned long addr)
......@@ -329,12 +343,12 @@ extern inline void __writew(unsigned short b, unsigned long addr)
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + ALCOR_SPARSE_MEM + 0x08) = b * 0x00010001;
*(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x08) = b * 0x00010001;
}
extern inline void __writel(unsigned int b, unsigned long addr)
{
*(vuip) (addr + ALCOR_DENSE_MEM) = b;
*(vuip) (addr + CIA_DENSE_MEM) = b;
}
#define inb(port) \
......@@ -348,15 +362,15 @@ extern inline void __writel(unsigned int b, unsigned long addr)
#undef vuip
extern unsigned long alcor_init (unsigned long mem_start,
extern unsigned long cia_init (unsigned long mem_start,
unsigned long mem_end);
#endif /* __KERNEL__ */
/*
* Data structure for handling ALCOR machine checks:
* Data structure for handling CIA machine checks:
*/
struct el_ALCOR_sysdata_mcheck {
struct el_CIA_sysdata_mcheck {
u_long coma_gcr;
u_long coma_edsr;
u_long coma_ter;
......@@ -404,4 +418,4 @@ struct el_ALCOR_sysdata_mcheck {
#define RTC_ADDR(x) (0x80 | (x))
#define RTC_ALWAYS_BCD 0
#endif /* __ALPHA_ALCOR__H__ */
#endif /* __ALPHA_CIA__H__ */
......@@ -78,8 +78,15 @@
#ifdef CONFIG_ALPHA_XL
/* The maximum address that we can perform a DMA transfer to on Alpha XL,
due to a hardware SIO (PCI<->ISA bus bridge) chip limitation, is 64MB.
see <asm/apecs.h> for more info */
#define MAX_DMA_ADDRESS (0xfffffc0004000000UL)
See <asm/apecs.h> for more info.
*/
/* NOTE: we must define the maximum as something less than 64Mb, to prevent
virt_to_bus() from returning an address in the first window, for a
data area that goes beyond the 64Mb first DMA window. Sigh...
We MUST coordinate the maximum with <asm/apecs.h> for consistency.
For now, this limit is set to 48Mb...
*/
#define MAX_DMA_ADDRESS (0xfffffc0003000000UL)
#else /* CONFIG_ALPHA_XL */
/* The maximum address that we can perform a DMA transfer to on normal
Alpha platforms */
......
......@@ -71,14 +71,14 @@ extern void _sethae (unsigned long addr); /* cached version */
#endif /* !__KERNEL__ */
/*
* There are different version of the Alpha PC motherboards:
* There are different chipsets to interface the Alpha CPUs to the world.
*/
#if defined(CONFIG_ALPHA_LCA)
# include <asm/lca.h> /* get chip-specific definitions */
#elif defined(CONFIG_ALPHA_APECS)
# include <asm/apecs.h> /* get chip-specific definitions */
#elif defined(CONFIG_ALPHA_ALCOR)
# include <asm/alcor.h> /* get chip-specific definitions */
#elif defined(CONFIG_ALPHA_CIA)
# include <asm/cia.h> /* get chip-specific definitions */
#else
# include <asm/jensen.h>
#endif
......
......@@ -12,8 +12,10 @@
#if defined(CONFIG_ALPHA_CABRIOLET) || defined(CONFIG_ALPHA_EB66P) || defined(CONFIG_ALPHA_EB164) || defined(CONFIG_ALPHA_PC164)
# define NR_IRQS 33
#elif defined(CONFIG_ALPHA_EB66) || defined(CONFIG_ALPHA_EB64P)
#elif defined(CONFIG_ALPHA_EB66) || defined(CONFIG_ALPHA_EB64P) || defined(CONFIG_ALPHA_MIKASA)
# define NR_IRQS 32
#elif defined(CONFIG_ALPHA_ALCOR)
# define NR_IRQS 48
#else
# define NR_IRQS 16
#endif
......
......@@ -37,11 +37,11 @@
/* Various timeout loop repetition counts. */
#define BUSY_TIMEOUT 10000000 /* for busy wait */
#define FAST_TIMEOUT 100000 /* ibid. for probing */
#define SLEEP_TIMEOUT 3000 /* for timer wait */
#define MULTI_SEEK_TIMEOUT 500 /* for timer wait */
#define READ_TIMEOUT 3000 /* for poll wait */
#define STOP_TIMEOUT 1000 /* for poll wait */
#define RESET_WAIT 1000 /* busy wait at drive reset */
#define SLEEP_TIMEOUT 6000 /* for timer wait */
#define MULTI_SEEK_TIMEOUT 1000 /* for timer wait */
#define READ_TIMEOUT 6000 /* for poll wait */
#define STOP_TIMEOUT 2000 /* for poll wait */
#define RESET_WAIT 5000 /* busy wait at drive reset */
/* # of buffers for block size conversion. 6 is optimal for my setup (P75),
giving 280 kb/s, with 0.4% CPU usage. Experiment to find your optimal
......
......@@ -496,6 +496,18 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *skb,
*/
extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
return -ENOMEM;
atomic_add(skb->truesize, &sk->rmem_alloc);
skb->sk=sk;
skb_queue_tail(&sk->receive_queue,skb);
if (!sk->dead)
sk->data_ready(sk,skb->len);
return 0;
}
extern __inline__ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
return -ENOMEM;
......
......@@ -69,6 +69,7 @@
#define WRITE_LONG 0x3f
#define CHANGE_DEFINITION 0x40
#define WRITE_SAME 0x41
#define READ_TOC 0x43
#define LOG_SELECT 0x4c
#define LOG_SENSE 0x4d
#define MODE_SELECT_10 0x55
......
......@@ -39,6 +39,29 @@ pgprot_t protection_map[16] = {
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
/*
* Combine the mmap "prot" and "flags" argument into one "vm_flags" used
* internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
* into "VM_xxx".
*/
static inline unsigned long vm_flags(unsigned long prot, unsigned long flags)
{
#define _trans(x,bit1,bit2) \
((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
unsigned long prot_bits, flag_bits;
prot_bits =
_trans(prot, PROT_READ, VM_READ) |
_trans(prot, PROT_WRITE, VM_WRITE) |
_trans(prot, PROT_EXEC, VM_EXEC);
flag_bits =
_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
_trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
return prot_bits | flag_bits;
#undef _trans
}
unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long off)
{
......@@ -126,9 +149,7 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
vma->vm_mm = current->mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
vma->vm_flags |= current->mm->def_flags;
vma->vm_flags = vm_flags(prot,flags) | current->mm->def_flags;
if (file) {
if (file->f_mode & 1)
......
......@@ -113,7 +113,7 @@ static inline void raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
/* Charge it to the socket. */
if (sock_queue_rcv_skb(sk,skb)<0)
if (__sock_queue_rcv_skb(sk,skb)<0)
{
ip_statistics.IpInDiscards++;
skb->sk=NULL;
......
......@@ -641,7 +641,7 @@ static inline void udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
/* I assume this includes the IP options, as per RFC1122 (4.1.3.2). */
/* If not, please let me know. -- MS */
if (sock_queue_rcv_skb(sk,skb)<0) {
if (__sock_queue_rcv_skb(sk,skb)<0) {
udp_statistics.UdpInErrors++;
ip_statistics.IpInDiscards++;
ip_statistics.IpInDelivers--;
......
......@@ -660,7 +660,7 @@ send a problem report to linux-kernel@vger.rutgers.edu or post a
message to the linux.dev.kernel news group.
Please indicate the kernel version you are trying to configure and
which menu you were trying to enter when this error occured.
which menu you were trying to enter when this error occurred.
EOM
cleanup
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment