Commit b312bf35 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev:
  AHCI: Remove an unnecessary flush from ahci_qc_issue
  AHCI: speed up resume
  [libata] Add support for VPD page b1
  ata: endianness annotations in pata drivers
  libata-eh: update atapi_eh_request_sense() to take @dev instead of @qc
  [libata] sata_svw: update code comments relating to data corruption
  libata/ahci: enclosure management support
  libata: improve EH internal command timeout handling
  libata: use ULONG_MAX to terminate reset timeout table
  libata: improve EH retry delay handling
  libata: consistently use msecs for time durations
parents dc221eae 2640d7c0
This diff is collapsed.
...@@ -54,7 +54,6 @@ ...@@ -54,7 +54,6 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/io.h> #include <linux/io.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
...@@ -145,7 +144,7 @@ static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CF ...@@ -145,7 +144,7 @@ static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CF
module_param_named(dma, libata_dma_mask, int, 0444); module_param_named(dma, libata_dma_mask, int, 0444);
MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; static int ata_probe_timeout;
module_param(ata_probe_timeout, int, 0444); module_param(ata_probe_timeout, int, 0444);
MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
...@@ -1533,7 +1532,7 @@ unsigned long ata_id_xfermask(const u16 *id) ...@@ -1533,7 +1532,7 @@ unsigned long ata_id_xfermask(const u16 *id)
* @ap: The ata_port to queue port_task for * @ap: The ata_port to queue port_task for
* @fn: workqueue function to be scheduled * @fn: workqueue function to be scheduled
* @data: data for @fn to use * @data: data for @fn to use
* @delay: delay time for workqueue function * @delay: delay time in msecs for workqueue function
* *
* Schedule @fn(@data) for execution after @delay jiffies using * Schedule @fn(@data) for execution after @delay jiffies using
* port_task. There is one port_task per port and it's the * port_task. There is one port_task per port and it's the
...@@ -1552,7 +1551,7 @@ void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay) ...@@ -1552,7 +1551,7 @@ void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
ap->port_task_data = data; ap->port_task_data = data;
/* may fail if ata_port_flush_task() in progress */ /* may fail if ata_port_flush_task() in progress */
queue_delayed_work(ata_wq, &ap->port_task, delay); queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
} }
/** /**
...@@ -1612,6 +1611,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, ...@@ -1612,6 +1611,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
struct ata_link *link = dev->link; struct ata_link *link = dev->link;
struct ata_port *ap = link->ap; struct ata_port *ap = link->ap;
u8 command = tf->command; u8 command = tf->command;
int auto_timeout = 0;
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
unsigned int tag, preempted_tag; unsigned int tag, preempted_tag;
u32 preempted_sactive, preempted_qc_active; u32 preempted_sactive, preempted_qc_active;
...@@ -1684,8 +1684,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, ...@@ -1684,8 +1684,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
spin_unlock_irqrestore(ap->lock, flags); spin_unlock_irqrestore(ap->lock, flags);
if (!timeout) if (!timeout) {
timeout = ata_probe_timeout * 1000 / HZ; if (ata_probe_timeout)
timeout = ata_probe_timeout * 1000;
else {
timeout = ata_internal_cmd_timeout(dev, command);
auto_timeout = 1;
}
}
rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
...@@ -1761,6 +1767,9 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, ...@@ -1761,6 +1767,9 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
spin_unlock_irqrestore(ap->lock, flags); spin_unlock_irqrestore(ap->lock, flags);
if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
ata_internal_cmd_timed_out(dev, command);
return err_mask; return err_mask;
} }
...@@ -3319,7 +3328,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, ...@@ -3319,7 +3328,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link)) int (*check_ready)(struct ata_link *link))
{ {
unsigned long start = jiffies; unsigned long start = jiffies;
unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT; unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
int warned = 0; int warned = 0;
if (time_after(nodev_deadline, deadline)) if (time_after(nodev_deadline, deadline))
...@@ -3387,7 +3396,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, ...@@ -3387,7 +3396,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link)) int (*check_ready)(struct ata_link *link))
{ {
msleep(ATA_WAIT_AFTER_RESET_MSECS); msleep(ATA_WAIT_AFTER_RESET);
return ata_wait_ready(link, deadline, check_ready); return ata_wait_ready(link, deadline, check_ready);
} }
...@@ -3417,13 +3426,13 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, ...@@ -3417,13 +3426,13 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int sata_link_debounce(struct ata_link *link, const unsigned long *params, int sata_link_debounce(struct ata_link *link, const unsigned long *params,
unsigned long deadline) unsigned long deadline)
{ {
unsigned long interval_msec = params[0]; unsigned long interval = params[0];
unsigned long duration = msecs_to_jiffies(params[1]); unsigned long duration = params[1];
unsigned long last_jiffies, t; unsigned long last_jiffies, t;
u32 last, cur; u32 last, cur;
int rc; int rc;
t = jiffies + msecs_to_jiffies(params[2]); t = ata_deadline(jiffies, params[2]);
if (time_before(t, deadline)) if (time_before(t, deadline))
deadline = t; deadline = t;
...@@ -3435,7 +3444,7 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, ...@@ -3435,7 +3444,7 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
last_jiffies = jiffies; last_jiffies = jiffies;
while (1) { while (1) {
msleep(interval_msec); msleep(interval);
if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
return rc; return rc;
cur &= 0xf; cur &= 0xf;
...@@ -3444,7 +3453,8 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, ...@@ -3444,7 +3453,8 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
if (cur == last) { if (cur == last) {
if (cur == 1 && time_before(jiffies, deadline)) if (cur == 1 && time_before(jiffies, deadline))
continue; continue;
if (time_after(jiffies, last_jiffies + duration)) if (time_after(jiffies,
ata_deadline(last_jiffies, duration)))
return 0; return 0;
continue; continue;
} }
...@@ -3636,7 +3646,8 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, ...@@ -3636,7 +3646,8 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
if (check_ready) { if (check_ready) {
unsigned long pmp_deadline; unsigned long pmp_deadline;
pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT; pmp_deadline = ata_deadline(jiffies,
ATA_TMOUT_PMP_SRST_WAIT);
if (time_after(pmp_deadline, deadline)) if (time_after(pmp_deadline, deadline))
pmp_deadline = deadline; pmp_deadline = deadline;
ata_wait_ready(link, pmp_deadline, check_ready); ata_wait_ready(link, pmp_deadline, check_ready);
...@@ -6073,8 +6084,6 @@ static void __init ata_parse_force_param(void) ...@@ -6073,8 +6084,6 @@ static void __init ata_parse_force_param(void)
static int __init ata_init(void) static int __init ata_init(void)
{ {
ata_probe_timeout *= HZ;
ata_parse_force_param(); ata_parse_force_param();
ata_wq = create_workqueue("ata"); ata_wq = create_workqueue("ata");
...@@ -6127,8 +6136,8 @@ int ata_ratelimit(void) ...@@ -6127,8 +6136,8 @@ int ata_ratelimit(void)
* @reg: IO-mapped register * @reg: IO-mapped register
* @mask: Mask to apply to read register value * @mask: Mask to apply to read register value
* @val: Wait condition * @val: Wait condition
* @interval_msec: polling interval in milliseconds * @interval: polling interval in milliseconds
* @timeout_msec: timeout in milliseconds * @timeout: timeout in milliseconds
* *
* Waiting for some bits of register to change is a common * Waiting for some bits of register to change is a common
* operation for ATA controllers. This function reads 32bit LE * operation for ATA controllers. This function reads 32bit LE
...@@ -6146,10 +6155,9 @@ int ata_ratelimit(void) ...@@ -6146,10 +6155,9 @@ int ata_ratelimit(void)
* The final register value. * The final register value.
*/ */
u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
unsigned long interval_msec, unsigned long interval, unsigned long timeout)
unsigned long timeout_msec)
{ {
unsigned long timeout; unsigned long deadline;
u32 tmp; u32 tmp;
tmp = ioread32(reg); tmp = ioread32(reg);
...@@ -6158,10 +6166,10 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, ...@@ -6158,10 +6166,10 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
* preceding writes reach the controller before starting to * preceding writes reach the controller before starting to
* eat away the timeout. * eat away the timeout.
*/ */
timeout = jiffies + (timeout_msec * HZ) / 1000; deadline = ata_deadline(jiffies, timeout);
while ((tmp & mask) == val && time_before(jiffies, timeout)) { while ((tmp & mask) == val && time_before(jiffies, deadline)) {
msleep(interval_msec); msleep(interval);
tmp = ioread32(reg); tmp = ioread32(reg);
} }
......
This diff is collapsed.
...@@ -727,19 +727,12 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, ...@@ -727,19 +727,12 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
} }
if (tries) { if (tries) {
int sleep = ehc->i.flags & ATA_EHI_DID_RESET;
/* consecutive revalidation failures? speed down */ /* consecutive revalidation failures? speed down */
if (reval_failed) if (reval_failed)
sata_down_spd_limit(link); sata_down_spd_limit(link);
else else
reval_failed = 1; reval_failed = 1;
ata_dev_printk(dev, KERN_WARNING,
"retrying reset%s\n",
sleep ? " in 5 secs" : "");
if (sleep)
ssleep(5);
ehc->i.action |= ATA_EH_RESET; ehc->i.action |= ATA_EH_RESET;
goto retry; goto retry;
} else { } else {
...@@ -785,7 +778,8 @@ static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap) ...@@ -785,7 +778,8 @@ static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap)
* SError.N working. * SError.N working.
*/ */
sata_link_hardreset(link, sata_deb_timing_normal, sata_link_hardreset(link, sata_deb_timing_normal,
jiffies + ATA_TMOUT_INTERNAL_QUICK, NULL, NULL); ata_deadline(jiffies, ATA_TMOUT_INTERNAL_QUICK),
NULL, NULL);
/* unconditionally clear SError.N */ /* unconditionally clear SError.N */
rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
...@@ -990,10 +984,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap) ...@@ -990,10 +984,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
goto retry; goto retry;
if (--pmp_tries) { if (--pmp_tries) {
ata_port_printk(ap, KERN_WARNING,
"failed to recover PMP, retrying in 5 secs\n");
pmp_ehc->i.action |= ATA_EH_RESET; pmp_ehc->i.action |= ATA_EH_RESET;
ssleep(5);
goto retry; goto retry;
} }
......
...@@ -190,6 +190,85 @@ static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) ...@@ -190,6 +190,85 @@ static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
} }
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
return ap->ops->em_store(ap, buf, count);
return -EINVAL;
}
static ssize_t
ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
return ap->ops->em_show(ap, buf);
return -EINVAL;
}
DEVICE_ATTR(em_message, S_IRUGO | S_IWUGO,
ata_scsi_em_message_show, ata_scsi_em_message_store);
EXPORT_SYMBOL_GPL(dev_attr_em_message);
static ssize_t
ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
return snprintf(buf, 23, "%d\n", ap->em_message_type);
}
DEVICE_ATTR(em_message_type, S_IRUGO,
ata_scsi_em_message_type_show, NULL);
EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
static ssize_t
ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
return ap->ops->sw_activity_show(atadev, buf);
return -EINVAL;
}
static ssize_t
ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
enum sw_activity val;
int rc;
if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
val = simple_strtoul(buf, NULL, 0);
switch (val) {
case OFF: case BLINK_ON: case BLINK_OFF:
rc = ap->ops->sw_activity_store(atadev, val);
if (!rc)
return count;
else
return rc;
}
}
return -EINVAL;
}
DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
ata_scsi_activity_store);
EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *)) void (*done)(struct scsi_cmnd *))
{ {
...@@ -1779,7 +1858,9 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) ...@@ -1779,7 +1858,9 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
const u8 pages[] = { const u8 pages[] = {
0x00, /* page 0x00, this page */ 0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */ 0x80, /* page 0x80, unit serial no page */
0x83 /* page 0x83, device ident page */ 0x83, /* page 0x83, device ident page */
0x89, /* page 0x89, ata info page */
0xb1, /* page 0xb1, block device characteristics page */
}; };
rbuf[3] = sizeof(pages); /* number of supported VPD pages */ rbuf[3] = sizeof(pages); /* number of supported VPD pages */
...@@ -1900,6 +1981,19 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) ...@@ -1900,6 +1981,19 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
return 0; return 0;
} }
static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
{
rbuf[1] = 0xb1;
rbuf[3] = 0x3c;
if (ata_id_major_version(args->id) > 7) {
rbuf[4] = args->id[217] >> 8;
rbuf[5] = args->id[217];
rbuf[7] = args->id[168] & 0xf;
}
return 0;
}
/** /**
* ata_scsiop_noop - Command handler that simply returns success. * ata_scsiop_noop - Command handler that simply returns success.
* @args: device IDENTIFY data / SCSI command of interest. * @args: device IDENTIFY data / SCSI command of interest.
...@@ -2921,6 +3015,9 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, ...@@ -2921,6 +3015,9 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
case 0x89: case 0x89:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
break; break;
case 0xb1:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
break;
default: default:
ata_scsi_invalid_field(cmd, done); ata_scsi_invalid_field(cmd, done);
break; break;
......
...@@ -345,8 +345,8 @@ void ata_sff_dma_pause(struct ata_port *ap) ...@@ -345,8 +345,8 @@ void ata_sff_dma_pause(struct ata_port *ap)
/** /**
* ata_sff_busy_sleep - sleep until BSY clears, or timeout * ata_sff_busy_sleep - sleep until BSY clears, or timeout
* @ap: port containing status register to be polled * @ap: port containing status register to be polled
* @tmout_pat: impatience timeout * @tmout_pat: impatience timeout in msecs
* @tmout: overall timeout * @tmout: overall timeout in msecs
* *
* Sleep until ATA Status register bit BSY clears, * Sleep until ATA Status register bit BSY clears,
* or a timeout occurs. * or a timeout occurs.
...@@ -365,7 +365,7 @@ int ata_sff_busy_sleep(struct ata_port *ap, ...@@ -365,7 +365,7 @@ int ata_sff_busy_sleep(struct ata_port *ap,
status = ata_sff_busy_wait(ap, ATA_BUSY, 300); status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
timer_start = jiffies; timer_start = jiffies;
timeout = timer_start + tmout_pat; timeout = ata_deadline(timer_start, tmout_pat);
while (status != 0xff && (status & ATA_BUSY) && while (status != 0xff && (status & ATA_BUSY) &&
time_before(jiffies, timeout)) { time_before(jiffies, timeout)) {
msleep(50); msleep(50);
...@@ -377,7 +377,7 @@ int ata_sff_busy_sleep(struct ata_port *ap, ...@@ -377,7 +377,7 @@ int ata_sff_busy_sleep(struct ata_port *ap,
"port is slow to respond, please be patient " "port is slow to respond, please be patient "
"(Status 0x%x)\n", status); "(Status 0x%x)\n", status);
timeout = timer_start + tmout; timeout = ata_deadline(timer_start, tmout);
while (status != 0xff && (status & ATA_BUSY) && while (status != 0xff && (status & ATA_BUSY) &&
time_before(jiffies, timeout)) { time_before(jiffies, timeout)) {
msleep(50); msleep(50);
...@@ -390,7 +390,7 @@ int ata_sff_busy_sleep(struct ata_port *ap, ...@@ -390,7 +390,7 @@ int ata_sff_busy_sleep(struct ata_port *ap,
if (status & ATA_BUSY) { if (status & ATA_BUSY) {
ata_port_printk(ap, KERN_ERR, "port failed to respond " ata_port_printk(ap, KERN_ERR, "port failed to respond "
"(%lu secs, Status 0x%x)\n", "(%lu secs, Status 0x%x)\n",
tmout / HZ, status); DIV_ROUND_UP(tmout, 1000), status);
return -EBUSY; return -EBUSY;
} }
...@@ -1888,7 +1888,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, ...@@ -1888,7 +1888,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
unsigned int dev1 = devmask & (1 << 1); unsigned int dev1 = devmask & (1 << 1);
int rc, ret = 0; int rc, ret = 0;
msleep(ATA_WAIT_AFTER_RESET_MSECS); msleep(ATA_WAIT_AFTER_RESET);
/* always check readiness of the master device */ /* always check readiness of the master device */
rc = ata_sff_wait_ready(link, deadline); rc = ata_sff_wait_ready(link, deadline);
...@@ -2371,7 +2371,8 @@ void ata_bus_reset(struct ata_port *ap) ...@@ -2371,7 +2371,8 @@ void ata_bus_reset(struct ata_port *ap)
/* issue bus reset */ /* issue bus reset */
if (ap->flags & ATA_FLAG_SRST) { if (ap->flags & ATA_FLAG_SRST) {
rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ); rc = ata_bus_softreset(ap, devmask,
ata_deadline(jiffies, 40000));
if (rc && rc != -ENODEV) if (rc && rc != -ENODEV)
goto err_out; goto err_out;
} }
......
...@@ -151,6 +151,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work); ...@@ -151,6 +151,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_bus_probe(struct ata_port *ap); extern int ata_bus_probe(struct ata_port *ap);
/* libata-eh.c */ /* libata-eh.c */
extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern void ata_scsi_error(struct Scsi_Host *host); extern void ata_scsi_error(struct Scsi_Host *host);
extern void ata_port_wait_eh(struct ata_port *ap); extern void ata_port_wait_eh(struct ata_port *ap);
......
...@@ -1011,7 +1011,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) ...@@ -1011,7 +1011,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
unsigned int dev0 = devmask & (1 << 0); unsigned int dev0 = devmask & (1 << 0);
unsigned int dev1 = devmask & (1 << 1); unsigned int dev1 = devmask & (1 << 1);
unsigned long timeout; unsigned long deadline;
/* if device 0 was found in ata_devchk, wait for its /* if device 0 was found in ata_devchk, wait for its
* BSY bit to clear * BSY bit to clear
...@@ -1022,7 +1022,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) ...@@ -1022,7 +1022,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
/* if device 1 was found in ata_devchk, wait for /* if device 1 was found in ata_devchk, wait for
* register access, then wait for BSY to clear * register access, then wait for BSY to clear
*/ */
timeout = jiffies + ATA_TMOUT_BOOT; deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
while (dev1) { while (dev1) {
u8 nsect, lbal; u8 nsect, lbal;
...@@ -1031,7 +1031,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) ...@@ -1031,7 +1031,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
lbal = read_atapi_register(base, ATA_REG_LBAL); lbal = read_atapi_register(base, ATA_REG_LBAL);
if ((nsect == 1) && (lbal == 1)) if ((nsect == 1) && (lbal == 1))
break; break;
if (time_after(jiffies, timeout)) { if (time_after(jiffies, deadline)) {
dev1 = 0; dev1 = 0;
break; break;
} }
......
...@@ -305,7 +305,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev, ...@@ -305,7 +305,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) { if (unlikely(slop)) {
u32 pad; __le32 pad;
if (rw == READ) { if (rw == READ) {
pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
memcpy(buf + buflen - slop, &pad, slop); memcpy(buf + buflen - slop, &pad, slop);
...@@ -746,14 +746,12 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf, ...@@ -746,14 +746,12 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) { if (unlikely(slop)) {
u32 pad; __le32 pad;
if (rw == WRITE) { if (rw == WRITE) {
memcpy(&pad, buf + buflen - slop, slop); memcpy(&pad, buf + buflen - slop, slop);
pad = le32_to_cpu(pad); iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
iowrite32(pad, ap->ioaddr.data_addr);
} else { } else {
pad = ioread32(ap->ioaddr.data_addr); pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
pad = cpu_to_le32(pad);
memcpy(buf + buflen - slop, &pad, slop); memcpy(buf + buflen - slop, &pad, slop);
} }
} }
......
...@@ -137,7 +137,7 @@ static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf, ...@@ -137,7 +137,7 @@ static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf,
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) { if (unlikely(slop)) {
u32 pad; __le32 pad;
if (rw == READ) { if (rw == READ) {
pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
memcpy(buf + buflen - slop, &pad, slop); memcpy(buf + buflen - slop, &pad, slop);
......
...@@ -696,7 +696,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc) ...@@ -696,7 +696,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc)
if (reg & INTSTS_BMSINT) { if (reg & INTSTS_BMSINT) {
unsigned int classes; unsigned int classes;
unsigned long deadline = jiffies + ATA_TMOUT_BOOT; unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
/* TBD: SW reset */ /* TBD: SW reset */
......
...@@ -105,7 +105,7 @@ static unsigned int winbond_data_xfer(struct ata_device *dev, ...@@ -105,7 +105,7 @@ static unsigned int winbond_data_xfer(struct ata_device *dev,
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) { if (unlikely(slop)) {
u32 pad; __le32 pad;
if (rw == READ) { if (rw == READ) {
pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
memcpy(buf + buflen - slop, &pad, slop); memcpy(buf + buflen - slop, &pad, slop);
......
...@@ -253,21 +253,29 @@ static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc) ...@@ -253,21 +253,29 @@ static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc)
/* start host DMA transaction */ /* start host DMA transaction */
dmactl = readb(mmio + ATA_DMA_CMD); dmactl = readb(mmio + ATA_DMA_CMD);
writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
/* There is a race condition in certain SATA controllers that can /* This works around possible data corruption.
be seen when the r/w command is given to the controller before the
host DMA is started. On a Read command, the controller would initiate On certain SATA controllers that can be seen when the r/w
the command to the drive even before it sees the DMA start. When there command is given to the controller before the host DMA is
are very fast drives connected to the controller, or when the data request started.
hits in the drive cache, there is the possibility that the drive returns a part
or all of the requested data to the controller before the DMA start is issued. On a Read command, the controller would initiate the
In this case, the controller would become confused as to what to do with the data. command to the drive even before it sees the DMA
In the worst case when all the data is returned back to the controller, the start. When there are very fast drives connected to the
controller could hang. In other cases it could return partial data returning controller, or when the data request hits in the drive
in data corruption. This problem has been seen in PPC systems and can also appear cache, there is the possibility that the drive returns a
on an system with very fast disks, where the SATA controller is sitting behind a part or all of the requested data to the controller before
number of bridges, and hence there is significant latency between the r/w command the DMA start is issued. In this case, the controller
and the start command. */ would become confused as to what to do with the data. In
/* issue r/w command if the access is to ATA*/ the worst case when all the data is returned back to the
controller, the controller could hang. In other cases it
could return partial data returning in data
corruption. This problem has been seen in PPC systems and
can also appear on an system with very fast disks, where
the SATA controller is sitting behind a number of bridges,
and hence there is significant latency between the r/w
command and the start command. */
/* issue r/w command if the access is to ATA */
if (qc->tf.protocol == ATA_PROT_DMA) if (qc->tf.protocol == ATA_PROT_DMA)
ap->ops->sff_exec_command(ap, &qc->tf); ap->ops->sff_exec_command(ap, &qc->tf);
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define __LINUX_LIBATA_H__ #define __LINUX_LIBATA_H__
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
...@@ -115,7 +116,7 @@ enum { ...@@ -115,7 +116,7 @@ enum {
/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
ATA_MAX_QUEUE = 32, ATA_MAX_QUEUE = 32,
ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
ATA_SHORT_PAUSE = (HZ >> 6) + 1, ATA_SHORT_PAUSE = 16,
ATAPI_MAX_DRAIN = 16 << 10, ATAPI_MAX_DRAIN = 16 << 10,
...@@ -168,6 +169,7 @@ enum { ...@@ -168,6 +169,7 @@ enum {
ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB, ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB,
ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */
ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */
ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
/* struct ata_port flags */ /* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
...@@ -190,6 +192,10 @@ enum { ...@@ -190,6 +192,10 @@ enum {
ATA_FLAG_AN = (1 << 18), /* controller supports AN */ ATA_FLAG_AN = (1 << 18), /* controller supports AN */
ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */ ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */
ATA_FLAG_EM = (1 << 21), /* driver supports enclosure
* management */
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
* led */
/* The following flag belongs to ap->pflags but is kept in /* The following flag belongs to ap->pflags but is kept in
* ap->flags because it's referenced in many LLDs and will be * ap->flags because it's referenced in many LLDs and will be
...@@ -234,17 +240,16 @@ enum { ...@@ -234,17 +240,16 @@ enum {
/* bits 24:31 of host->flags are reserved for LLD specific flags */ /* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */ /* various lengths of time */
ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ ATA_TMOUT_BOOT = 30000, /* heuristic */
ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
ATA_TMOUT_INTERNAL = 30 * HZ, ATA_TMOUT_INTERNAL_QUICK = 5000,
ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
/* FIXME: GoVault needs 2s but we can't afford that without /* FIXME: GoVault needs 2s but we can't afford that without
* parallel probing. 800ms is enough for iVDR disk * parallel probing. 800ms is enough for iVDR disk
* HHD424020F7SV00. Increase to 2secs when parallel probing * HHD424020F7SV00. Increase to 2secs when parallel probing
* is in place. * is in place.
*/ */
ATA_TMOUT_FF_WAIT = 4 * HZ / 5, ATA_TMOUT_FF_WAIT = 800,
/* Spec mandates to wait for ">= 2ms" before checking status /* Spec mandates to wait for ">= 2ms" before checking status
* after reset. We wait 150ms, because that was the magic * after reset. We wait 150ms, because that was the magic
...@@ -256,14 +261,14 @@ enum { ...@@ -256,14 +261,14 @@ enum {
* *
* Old drivers/ide uses the 2mS rule and then waits for ready. * Old drivers/ide uses the 2mS rule and then waits for ready.
*/ */
ATA_WAIT_AFTER_RESET_MSECS = 150, ATA_WAIT_AFTER_RESET = 150,
/* If PMP is supported, we have to do follow-up SRST. As some /* If PMP is supported, we have to do follow-up SRST. As some
* PMPs don't send D2H Reg FIS after hardreset, LLDs are * PMPs don't send D2H Reg FIS after hardreset, LLDs are
* advised to wait only for the following duration before * advised to wait only for the following duration before
* doing SRST. * doing SRST.
*/ */
ATA_TMOUT_PMP_SRST_WAIT = 1 * HZ, ATA_TMOUT_PMP_SRST_WAIT = 1000,
/* ATA bus states */ /* ATA bus states */
BUS_UNKNOWN = 0, BUS_UNKNOWN = 0,
...@@ -340,6 +345,11 @@ enum { ...@@ -340,6 +345,11 @@ enum {
SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */ SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 5,
/* Horkage types. May be set by libata or controller on drives /* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependant */ (some horkage may be drive/controller pair dependant */
...@@ -441,6 +451,15 @@ enum link_pm { ...@@ -441,6 +451,15 @@ enum link_pm {
MEDIUM_POWER, MEDIUM_POWER,
}; };
extern struct device_attribute dev_attr_link_power_management_policy; extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity;
enum sw_activity {
OFF,
BLINK_ON,
BLINK_OFF,
};
#ifdef CONFIG_ATA_SFF #ifdef CONFIG_ATA_SFF
struct ata_ioports { struct ata_ioports {
...@@ -597,10 +616,14 @@ struct ata_eh_info { ...@@ -597,10 +616,14 @@ struct ata_eh_info {
struct ata_eh_context { struct ata_eh_context {
struct ata_eh_info i; struct ata_eh_info i;
int tries[ATA_MAX_DEVICES]; int tries[ATA_MAX_DEVICES];
int cmd_timeout_idx[ATA_MAX_DEVICES]
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
unsigned int classes[ATA_MAX_DEVICES]; unsigned int classes[ATA_MAX_DEVICES];
unsigned int did_probe_mask; unsigned int did_probe_mask;
unsigned int saved_ncq_enabled; unsigned int saved_ncq_enabled;
u8 saved_xfer_mode[ATA_MAX_DEVICES]; u8 saved_xfer_mode[ATA_MAX_DEVICES];
/* timestamp for the last reset attempt or success */
unsigned long last_reset;
}; };
struct ata_acpi_drive struct ata_acpi_drive
...@@ -692,6 +715,7 @@ struct ata_port { ...@@ -692,6 +715,7 @@ struct ata_port {
struct timer_list fastdrain_timer; struct timer_list fastdrain_timer;
unsigned long fastdrain_cnt; unsigned long fastdrain_cnt;
int em_message_type;
void *private_data; void *private_data;
#ifdef CONFIG_ATA_ACPI #ifdef CONFIG_ATA_ACPI
...@@ -783,6 +807,12 @@ struct ata_port_operations { ...@@ -783,6 +807,12 @@ struct ata_port_operations {
u8 (*bmdma_status)(struct ata_port *ap); u8 (*bmdma_status)(struct ata_port *ap);
#endif /* CONFIG_ATA_SFF */ #endif /* CONFIG_ATA_SFF */
ssize_t (*em_show)(struct ata_port *ap, char *buf);
ssize_t (*em_store)(struct ata_port *ap, const char *message,
size_t size);
ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf);
ssize_t (*sw_activity_store)(struct ata_device *dev,
enum sw_activity val);
/* /*
* Obsolete * Obsolete
*/ */
...@@ -895,8 +925,7 @@ extern void ata_host_resume(struct ata_host *host); ...@@ -895,8 +925,7 @@ extern void ata_host_resume(struct ata_host *host);
#endif #endif
extern int ata_ratelimit(void); extern int ata_ratelimit(void);
extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
unsigned long interval_msec, unsigned long interval, unsigned long timeout);
unsigned long timeout_msec);
extern int atapi_cmd_type(u8 opcode); extern int atapi_cmd_type(u8 opcode);
extern void ata_tf_to_fis(const struct ata_taskfile *tf, extern void ata_tf_to_fis(const struct ata_taskfile *tf,
u8 pmp, int is_cmd, u8 *fis); u8 pmp, int is_cmd, u8 *fis);
...@@ -1389,6 +1418,12 @@ static inline int ata_check_ready(u8 status) ...@@ -1389,6 +1418,12 @@ static inline int ata_check_ready(u8 status)
return 0; return 0;
} }
static inline unsigned long ata_deadline(unsigned long from_jiffies,
unsigned long timeout_msecs)
{
return from_jiffies + msecs_to_jiffies(timeout_msecs);
}
/************************************************************************** /**************************************************************************
* PMP - drivers/ata/libata-pmp.c * PMP - drivers/ata/libata-pmp.c
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment