Commit 5be1d85c authored by Linus Torvalds's avatar Linus Torvalds

Merge refs/heads/upstream from master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev

parents 69be8f18 13593265
......@@ -269,6 +269,8 @@ static struct pci_device_id ahci_pci_tbl[] = {
board_ahci }, /* ESB2 */
{ PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* ESB2 */
{ PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* ICH7-M DH */
{ } /* terminate list */
};
......@@ -584,12 +586,16 @@ static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
static void ahci_eng_timeout(struct ata_port *ap)
{
void *mmio = ap->host_set->mmio_base;
struct ata_host_set *host_set = ap->host_set;
void *mmio = host_set->mmio_base;
void *port_mmio = ahci_port_base(mmio, ap->port_no);
struct ata_queued_cmd *qc;
unsigned long flags;
DPRINTK("ENTER\n");
spin_lock_irqsave(&host_set->lock, flags);
ahci_intr_error(ap, readl(port_mmio + PORT_IRQ_STAT));
qc = ata_qc_from_tag(ap, ap->active_tag);
......@@ -607,6 +613,7 @@ static void ahci_eng_timeout(struct ata_port *ap)
ata_qc_complete(qc, ATA_ERR);
}
spin_unlock_irqrestore(&host_set->lock, flags);
}
static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
......@@ -696,9 +703,6 @@ static int ahci_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
void *port_mmio = (void *) ap->ioaddr.cmd_addr;
writel(1, port_mmio + PORT_SCR_ACT);
readl(port_mmio + PORT_SCR_ACT); /* flush */
writel(1, port_mmio + PORT_CMD_ISSUE);
readl(port_mmio + PORT_CMD_ISSUE); /* flush */
......
......@@ -2376,6 +2376,27 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
return 0;
}
/**
* ata_poll_qc_complete - turn irq back on and finish qc
* @qc: Command to complete
* @drv_stat: ATA status register content
*
* LOCKING:
* None. (grabs host lock)
*/
void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
{
struct ata_port *ap = qc->ap;
unsigned long flags;
spin_lock_irqsave(&ap->host_set->lock, flags);
ap->flags &= ~ATA_FLAG_NOINTR;
ata_irq_on(ap);
ata_qc_complete(qc, drv_stat);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
}
/**
* ata_pio_poll -
* @ap:
......@@ -2438,11 +2459,10 @@ static void ata_pio_complete (struct ata_port *ap)
u8 drv_stat;
/*
* This is purely hueristic. This is a fast path.
* Sometimes when we enter, BSY will be cleared in
* a chk-status or two. If not, the drive is probably seeking
* or something. Snooze for a couple msecs, then
* chk-status again. If still busy, fall back to
* This is purely heuristic. This is a fast path. Sometimes when
* we enter, BSY will be cleared in a chk-status or two. If not,
* the drive is probably seeking or something. Snooze for a couple
* msecs, then chk-status again. If still busy, fall back to
* PIO_ST_POLL state.
*/
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
......@@ -2467,9 +2487,7 @@ static void ata_pio_complete (struct ata_port *ap)
ap->pio_task_state = PIO_ST_IDLE;
ata_irq_on(ap);
ata_qc_complete(qc, drv_stat);
ata_poll_qc_complete(qc, drv_stat);
}
......@@ -2494,6 +2512,20 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
#endif /* __BIG_ENDIAN */
}
/**
* ata_mmio_data_xfer - Transfer data by MMIO
* @ap: port to read/write
* @buf: data buffer
* @buflen: buffer length
* @do_write: read/write
*
* Transfer data from/to the device data register by MMIO.
*
* LOCKING:
* Inherited from caller.
*
*/
static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
unsigned int buflen, int write_data)
{
......@@ -2502,6 +2534,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
u16 *buf16 = (u16 *) buf;
void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
/* Transfer multiple of 2 bytes */
if (write_data) {
for (i = 0; i < words; i++)
writew(le16_to_cpu(buf16[i]), mmio);
......@@ -2509,19 +2542,76 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
for (i = 0; i < words; i++)
buf16[i] = cpu_to_le16(readw(mmio));
}
/* Transfer trailing 1 byte, if any. */
if (unlikely(buflen & 0x01)) {
u16 align_buf[1] = { 0 };
unsigned char *trailing_buf = buf + buflen - 1;
if (write_data) {
memcpy(align_buf, trailing_buf, 1);
writew(le16_to_cpu(align_buf[0]), mmio);
} else {
align_buf[0] = cpu_to_le16(readw(mmio));
memcpy(trailing_buf, align_buf, 1);
}
}
}
/**
* ata_pio_data_xfer - Transfer data by PIO
* @ap: port to read/write
* @buf: data buffer
* @buflen: buffer length
* @do_write: read/write
*
* Transfer data from/to the device data register by PIO.
*
* LOCKING:
* Inherited from caller.
*
*/
static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
unsigned int buflen, int write_data)
{
unsigned int dwords = buflen >> 1;
unsigned int words = buflen >> 1;
/* Transfer multiple of 2 bytes */
if (write_data)
outsw(ap->ioaddr.data_addr, buf, dwords);
outsw(ap->ioaddr.data_addr, buf, words);
else
insw(ap->ioaddr.data_addr, buf, dwords);
insw(ap->ioaddr.data_addr, buf, words);
/* Transfer trailing 1 byte, if any. */
if (unlikely(buflen & 0x01)) {
u16 align_buf[1] = { 0 };
unsigned char *trailing_buf = buf + buflen - 1;
if (write_data) {
memcpy(align_buf, trailing_buf, 1);
outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
} else {
align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
memcpy(trailing_buf, align_buf, 1);
}
}
}
/**
* ata_data_xfer - Transfer data from/to the data register.
* @ap: port to read/write
* @buf: data buffer
* @buflen: buffer length
* @do_write: read/write
*
* Transfer data from/to the device data register.
*
* LOCKING:
* Inherited from caller.
*
*/
static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
unsigned int buflen, int do_write)
{
......@@ -2531,6 +2621,16 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
ata_pio_data_xfer(ap, buf, buflen, do_write);
}
/**
* ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
* @qc: Command on going
*
* Transfer ATA_SECT_SIZE of data from/to the ATA device.
*
* LOCKING:
* Inherited from caller.
*/
static void ata_pio_sector(struct ata_queued_cmd *qc)
{
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
......@@ -2569,6 +2669,18 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
kunmap(page);
}
/**
* __atapi_pio_bytes - Transfer data from/to the ATAPI device.
* @qc: Command on going
* @bytes: number of bytes
*
* Transfer Transfer data from/to the ATAPI device.
*
* LOCKING:
* Inherited from caller.
*
*/
static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
{
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
......@@ -2578,10 +2690,33 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
unsigned char *buf;
unsigned int offset, count;
if (qc->curbytes == qc->nbytes - bytes)
if (qc->curbytes + bytes >= qc->nbytes)
ap->pio_task_state = PIO_ST_LAST;
next_sg:
if (unlikely(qc->cursg >= qc->n_elem)) {
/*
* The end of qc->sg is reached and the device expects
* more data to transfer. In order not to overrun qc->sg
* and fulfill length specified in the byte count register,
* - for read case, discard trailing data from the device
* - for write case, padding zero data to the device
*/
u16 pad_buf[1] = { 0 };
unsigned int words = bytes >> 1;
unsigned int i;
if (words) /* warning if bytes > 1 */
printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
ap->id, bytes);
for (i = 0; i < words; i++)
ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
ap->pio_task_state = PIO_ST_LAST;
return;
}
sg = &qc->sg[qc->cursg];
page = sg->page;
......@@ -2615,11 +2750,21 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
kunmap(page);
if (bytes) {
if (bytes)
goto next_sg;
}
}
/**
* atapi_pio_bytes - Transfer data from/to the ATAPI device.
* @qc: Command on going
*
* Transfer Transfer data from/to the ATAPI device.
*
* LOCKING:
* Inherited from caller.
*
*/
static void atapi_pio_bytes(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
......@@ -2692,9 +2837,7 @@ static void ata_pio_block(struct ata_port *ap)
if ((status & ATA_DRQ) == 0) {
ap->pio_task_state = PIO_ST_IDLE;
ata_irq_on(ap);
ata_qc_complete(qc, status);
ata_poll_qc_complete(qc, status);
return;
}
......@@ -2724,9 +2867,7 @@ static void ata_pio_error(struct ata_port *ap)
ap->pio_task_state = PIO_ST_IDLE;
ata_irq_on(ap);
ata_qc_complete(qc, drv_stat | ATA_ERR);
ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
}
static void ata_pio_task(void *_data)
......@@ -2832,8 +2973,10 @@ static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
static void ata_qc_timeout(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_host_set *host_set = ap->host_set;
struct ata_device *dev = qc->dev;
u8 host_stat = 0, drv_stat;
unsigned long flags;
DPRINTK("ENTER\n");
......@@ -2844,7 +2987,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) {
/* finish completing original command */
spin_lock_irqsave(&host_set->lock, flags);
__ata_qc_complete(qc);
spin_unlock_irqrestore(&host_set->lock, flags);
atapi_request_sense(ap, dev, cmd);
......@@ -2855,6 +3000,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
}
}
spin_lock_irqsave(&host_set->lock, flags);
/* hack alert! We cannot use the supplied completion
* function from inside the ->eh_strategy_handler() thread.
* libata is the only user of ->eh_strategy_handler() in
......@@ -2870,7 +3017,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
host_stat = ap->ops->bmdma_status(ap);
/* before we do anything else, clear DMA-Start bit */
ap->ops->bmdma_stop(ap);
ap->ops->bmdma_stop(qc);
/* fall through */
......@@ -2888,6 +3035,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
ata_qc_complete(qc, drv_stat);
break;
}
spin_unlock_irqrestore(&host_set->lock, flags);
out:
DPRINTK("EXIT\n");
}
......@@ -3061,9 +3211,14 @@ void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
ata_sg_clean(qc);
/* atapi: mark qc as inactive to prevent the interrupt handler
* from completing the command twice later, before the error handler
* is called. (when rc != 0 and atapi request sense is needed)
*/
qc->flags &= ~ATA_QCFLAG_ACTIVE;
/* call completion callback */
rc = qc->complete_fn(qc, drv_stat);
qc->flags &= ~ATA_QCFLAG_ACTIVE;
/* if callback indicates not to complete command (non-zero),
* return immediately
......@@ -3193,11 +3348,13 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
break;
case ATA_PROT_ATAPI_NODATA:
ap->flags |= ATA_FLAG_NOINTR;
ata_tf_to_host_nolock(ap, &qc->tf);
queue_work(ata_wq, &ap->packet_task);
break;
case ATA_PROT_ATAPI_DMA:
ap->flags |= ATA_FLAG_NOINTR;
ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
ap->ops->bmdma_setup(qc); /* set up bmdma */
queue_work(ata_wq, &ap->packet_task);
......@@ -3242,7 +3399,7 @@ static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
}
/**
* ata_bmdma_start - Start a PCI IDE BMDMA transaction
* ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
......@@ -3413,7 +3570,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
/**
* ata_bmdma_stop - Stop PCI IDE BMDMA transfer
* @ap: Port associated with this ATA transaction.
* @qc: Command we are ending DMA for
*
* Clears the ATA_DMA_START flag in the dma control register
*
......@@ -3423,8 +3580,9 @@ u8 ata_bmdma_status(struct ata_port *ap)
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_stop(struct ata_port *ap)
void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
if (ap->flags & ATA_FLAG_MMIO) {
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
......@@ -3476,7 +3634,7 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
goto idle_irq;
/* before we do anything else, clear DMA-Start bit */
ap->ops->bmdma_stop(ap);
ap->ops->bmdma_stop(qc);
/* fall through */
......@@ -3551,7 +3709,8 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
struct ata_port *ap;
ap = host_set->ports[i];
if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
if (ap &&
!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
......@@ -3603,19 +3762,27 @@ static void atapi_packet_task(void *_data)
/* send SCSI cdb */
DPRINTK("send cdb\n");
assert(ap->cdb_len >= 12);
ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
/* if we are DMA'ing, irq handler takes over from here */
if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
unsigned long flags;
/* Once we're done issuing command and kicking bmdma,
* irq handler takes over. To not lose irq, we need
* to clear NOINTR flag before sending cdb, but
* interrupt handler shouldn't be invoked before we're
* finished. Hence, the following locking.
*/
spin_lock_irqsave(&ap->host_set->lock, flags);
ap->flags &= ~ATA_FLAG_NOINTR;
ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
ap->ops->bmdma_start(qc); /* initiate bmdma */
/* non-data commands are also handled via irq */
else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
/* do nothing */
}
spin_unlock_irqrestore(&ap->host_set->lock, flags);
} else {
ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
/* PIO commands are handled by polling */
else {
ap->pio_task_state = PIO_ST;
queue_work(ata_wq, &ap->pio_task);
}
......@@ -3623,7 +3790,7 @@ static void atapi_packet_task(void *_data)
return;
err_out:
ata_qc_complete(qc, ATA_ERR);
ata_poll_qc_complete(qc, ATA_ERR);
}
......
......@@ -391,6 +391,60 @@ int ata_scsi_error(struct Scsi_Host *host)
return 0;
}
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
* @qc: Storage for translated ATA taskfile
* @scsicmd: SCSI command to translate
*
* Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
* (to start). Perhaps these commands should be preceded by
* CHECK POWER MODE to see what power mode the device is already in.
* [See SAT revision 5 at www.t10.org]
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*
* RETURNS:
* Zero on success, non-zero on error.
*/
static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
u8 *scsicmd)
{
struct ata_taskfile *tf = &qc->tf;
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf->protocol = ATA_PROT_NODATA;
if (scsicmd[1] & 0x1) {
; /* ignore IMMED bit, violates sat-r05 */
}
if (scsicmd[4] & 0x2)
return 1; /* LOEJ bit set not supported */
if (((scsicmd[4] >> 4) & 0xf) != 0)
return 1; /* power conditions not supported */
if (scsicmd[4] & 0x1) {
tf->nsect = 1; /* 1 sector, lba=0 */
tf->lbah = 0x0;
tf->lbam = 0x0;
tf->lbal = 0x0;
tf->device |= ATA_LBA;
tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
} else {
tf->nsect = 0; /* time period value (0 implies now) */
tf->command = ATA_CMD_STANDBY;
/* Consider: ATA STANDBY IMMEDIATE command */
}
/*
* Standby and Idle condition timers could be implemented but that
* would require libata to implement the Power condition mode page
* and allow the user to change it. Changing mode pages requires
* MODE SELECT to be implemented.
*/
return 0;
}
/**
* ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
* @qc: Storage for translated ATA taskfile
......@@ -576,11 +630,19 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
tf->lbah = scsicmd[3];
VPRINTK("ten-byte command\n");
if (qc->nsect == 0) /* we don't support length==0 cmds */
return 1;
return 0;
}
if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) {
qc->nsect = tf->nsect = scsicmd[4];
if (!qc->nsect) {
qc->nsect = 256;
if (lba48)
tf->hob_nsect = 1;
}
tf->lbal = scsicmd[3];
tf->lbam = scsicmd[2];
tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */
......@@ -620,6 +682,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
tf->lbah = scsicmd[7];
VPRINTK("sixteen-byte command\n");
if (qc->nsect == 0) /* we don't support length==0 cmds */
return 1;
return 0;
}
......@@ -1435,6 +1499,8 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
case VERIFY:
case VERIFY_16:
return ata_scsi_verify_xlat;
case START_STOP:
return ata_scsi_start_stop_xlat;
}
return NULL;
......
......@@ -20,6 +20,12 @@
* If you do not delete the provisions above, a recipient may use your
* version of this file under either the OSL or the GPL.
*
* 0.08
* - Added support for MCP51 and MCP55.
*
* 0.07
* - Added support for RAID class code.
*
* 0.06
* - Added generic SATA support by using a pci_device_id that filters on
* the IDE storage class code.
......@@ -48,7 +54,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_nv"
#define DRV_VERSION "0.6"
#define DRV_VERSION "0.8"
#define NV_PORTS 2
#define NV_PIO_MASK 0x1f
......@@ -116,7 +122,9 @@ enum nv_host_type
GENERIC,
NFORCE2,
NFORCE3,
CK804
CK804,
MCP51,
MCP55
};
static struct pci_device_id nv_pci_tbl[] = {
......@@ -134,9 +142,18 @@ static struct pci_device_id nv_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
{ 0, } /* terminate list */
};
......@@ -274,7 +291,8 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
struct ata_port *ap;
ap = host_set->ports[i];
if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
if (ap &&
!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
......
......@@ -181,6 +181,10 @@ static struct pci_device_id pdc_ata_pci_tbl[] = {
board_20319 },
{ PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20319 },
{ PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20319 },
{ PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20319 },
{ PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20319 },
......@@ -321,11 +325,15 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
static void pdc_eng_timeout(struct ata_port *ap)
{
struct ata_host_set *host_set = ap->host_set;
u8 drv_stat;
struct ata_queued_cmd *qc;
unsigned long flags;
DPRINTK("ENTER\n");
spin_lock_irqsave(&host_set->lock, flags);
qc = ata_qc_from_tag(ap, ap->active_tag);
if (!qc) {
printk(KERN_ERR "ata%u: BUG: timeout without command\n",
......@@ -359,6 +367,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
}
out:
spin_unlock_irqrestore(&host_set->lock, flags);
DPRINTK("EXIT\n");
}
......@@ -441,7 +450,8 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
VPRINTK("port %u\n", i);
ap = host_set->ports[i];
tmp = mask & (1 << (i + 1));
if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
if (tmp && ap &&
!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
......
......@@ -117,7 +117,7 @@ static void qs_phy_reset(struct ata_port *ap);
static void qs_qc_prep(struct ata_queued_cmd *qc);
static int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
static void qs_bmdma_stop(struct ata_port *ap);
static void qs_bmdma_stop(struct ata_queued_cmd *qc);
static u8 qs_bmdma_status(struct ata_port *ap);
static void qs_irq_clear(struct ata_port *ap);
static void qs_eng_timeout(struct ata_port *ap);
......@@ -198,7 +198,7 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
return 1; /* ATAPI DMA not supported */
}
static void qs_bmdma_stop(struct ata_port *ap)
static void qs_bmdma_stop(struct ata_queued_cmd *qc)
{
/* nothing */
}
......@@ -386,7 +386,8 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST);
handled = 1;
if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
if (ap && !(ap->flags &
(ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_pkt)
......@@ -417,7 +418,8 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
struct ata_port *ap;
ap = host_set->ports[port_no];
if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
if (ap &&
!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_mmio)
......
......@@ -24,6 +24,11 @@
* If you do not delete the provisions above, a recipient may use your
* version of this file under either the OSL or the GPL.
*
* Documentation for SiI 3112:
* http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
*
* Other errata and documentation available under NDA.
*
*/
#include <linux/kernel.h>
......@@ -41,8 +46,11 @@
#define DRV_VERSION "0.9"
enum {
SIL_FLAG_MOD15WRITE = (1 << 30),
sil_3112 = 0,
sil_3114 = 1,
sil_3112_m15w = 1,
sil_3114 = 2,
SIL_FIFO_R0 = 0x40,
SIL_FIFO_W0 = 0x41,
......@@ -76,13 +84,13 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void sil_post_set_mode (struct ata_port *ap);
static struct pci_device_id sil_pci_tbl[] = {
{ 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
{ 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
{ 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
{ 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
{ 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
{ 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
{ } /* terminate list */
};
......@@ -174,6 +182,16 @@ static struct ata_port_info sil_port_info[] = {
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
.port_ops = &sil_ops,
}, /* sil_3112_15w - keep it sync'd w/ sil_3112 */
{
.sht = &sil_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SRST | ATA_FLAG_MMIO |
SIL_FLAG_MOD15WRITE,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
.port_ops = &sil_ops,
}, /* sil_3114 */
{
.sht = &sil_sht,
......@@ -331,7 +349,7 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
}
/* limit requests to 15 sectors */
if (quirks & SIL_QUIRK_MOD15WRITE) {
if ((ap->flags & SIL_FLAG_MOD15WRITE) && (quirks & SIL_QUIRK_MOD15WRITE)) {
printk(KERN_INFO "ata%u(%u): applying Seagate errata fix\n",
ap->id, dev->devno);
ap->host->max_sectors = 15;
......
......@@ -825,7 +825,8 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
ap = host_set->ports[port_no];
tmp = mask & (1 << i);
VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
if (tmp && ap &&
!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
......@@ -847,10 +848,14 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
static void pdc_eng_timeout(struct ata_port *ap)
{
u8 drv_stat;
struct ata_host_set *host_set = ap->host_set;
struct ata_queued_cmd *qc;
unsigned long flags;
DPRINTK("ENTER\n");
spin_lock_irqsave(&host_set->lock, flags);
qc = ata_qc_from_tag(ap, ap->active_tag);
if (!qc) {
printk(KERN_ERR "ata%u: BUG: timeout without command\n",
......@@ -884,6 +889,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
}
out:
spin_unlock_irqrestore(&host_set->lock, flags);
DPRINTK("EXIT\n");
}
......
......@@ -173,7 +173,8 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
struct ata_port *ap;
ap = host_set->ports[i];
if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
if (ap && !(ap->flags &
(ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
......
......@@ -108,6 +108,8 @@ enum {
/* ATA device commands */
ATA_CMD_CHK_POWER = 0xE5, /* check power mode */
ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */
ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
ATA_CMD_EDD = 0x90, /* execute device diagnostic */
ATA_CMD_FLUSH = 0xE7,
ATA_CMD_FLUSH_EXT = 0xEA,
......
......@@ -113,6 +113,8 @@ enum {
ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */
ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
* proper HSM is in place. */
ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
......@@ -363,7 +365,7 @@ struct ata_port_operations {
void (*host_stop) (struct ata_host_set *host_set);
void (*bmdma_stop) (struct ata_port *ap);
void (*bmdma_stop) (struct ata_queued_cmd *qc);
u8 (*bmdma_status) (struct ata_port *ap);
};
......@@ -424,7 +426,7 @@ extern void ata_dev_id_string(u16 *id, unsigned char *s,
extern void ata_dev_config(struct ata_port *ap, unsigned int i);
extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
extern void ata_bmdma_start (struct ata_queued_cmd *qc);
extern void ata_bmdma_stop(struct ata_port *ap);
extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
extern u8 ata_bmdma_status(struct ata_port *ap);
extern void ata_bmdma_irq_clear(struct ata_port *ap);
extern void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat);
......
......@@ -1249,6 +1249,7 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x036F
#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269
#define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment