Commit efcb3cf7 authored by Tejun Heo's avatar Tejun Heo Committed by Linus Torvalds

libata: use WARN_ON_ONCE on hot paths

Convert WARN_ON() on command issue/completion paths to WARN_ON_ONCE()
so that libata doesn't spam the machine even when one of those
conditions triggers repeatedly.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 43529c97
...@@ -4556,7 +4556,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc) ...@@ -4556,7 +4556,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
struct scatterlist *sg = qc->sg; struct scatterlist *sg = qc->sg;
int dir = qc->dma_dir; int dir = qc->dma_dir;
WARN_ON(sg == NULL); WARN_ON_ONCE(sg == NULL);
VPRINTK("unmapping %u sg elements\n", qc->n_elem); VPRINTK("unmapping %u sg elements\n", qc->n_elem);
...@@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) ...@@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
unsigned int tag; unsigned int tag;
WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
qc->flags = 0; qc->flags = 0;
tag = qc->tag; tag = qc->tag;
...@@ -4791,8 +4791,8 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) ...@@ -4791,8 +4791,8 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct ata_link *link = qc->dev->link; struct ata_link *link = qc->dev->link;
WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
ata_sg_clean(qc); ata_sg_clean(qc);
...@@ -4878,7 +4878,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) ...@@ -4878,7 +4878,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_device *dev = qc->dev; struct ata_device *dev = qc->dev;
struct ata_eh_info *ehi = &dev->link->eh_info; struct ata_eh_info *ehi = &dev->link->eh_info;
WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
if (unlikely(qc->err_mask)) if (unlikely(qc->err_mask))
qc->flags |= ATA_QCFLAG_FAILED; qc->flags |= ATA_QCFLAG_FAILED;
...@@ -5000,16 +5000,16 @@ void ata_qc_issue(struct ata_queued_cmd *qc) ...@@ -5000,16 +5000,16 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
* check is skipped for old EH because it reuses active qc to * check is skipped for old EH because it reuses active qc to
* request ATAPI sense. * request ATAPI sense.
*/ */
WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
if (ata_is_ncq(prot)) { if (ata_is_ncq(prot)) {
WARN_ON(link->sactive & (1 << qc->tag)); WARN_ON_ONCE(link->sactive & (1 << qc->tag));
if (!link->sactive) if (!link->sactive)
ap->nr_active_links++; ap->nr_active_links++;
link->sactive |= 1 << qc->tag; link->sactive |= 1 << qc->tag;
} else { } else {
WARN_ON(link->sactive); WARN_ON_ONCE(link->sactive);
ap->nr_active_links++; ap->nr_active_links++;
link->active_tag = qc->tag; link->active_tag = qc->tag;
......
...@@ -578,7 +578,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) ...@@ -578,7 +578,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
} }
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
WARN_ON(!ioaddr->ctl_addr); WARN_ON_ONCE(!ioaddr->ctl_addr);
iowrite8(tf->hob_feature, ioaddr->feature_addr); iowrite8(tf->hob_feature, ioaddr->feature_addr);
iowrite8(tf->hob_nsect, ioaddr->nsect_addr); iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
iowrite8(tf->hob_lbal, ioaddr->lbal_addr); iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
...@@ -651,7 +651,7 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) ...@@ -651,7 +651,7 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
iowrite8(tf->ctl, ioaddr->ctl_addr); iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl; ap->last_ctl = tf->ctl;
} else } else
WARN_ON(1); WARN_ON_ONCE(1);
} }
} }
EXPORT_SYMBOL_GPL(ata_sff_tf_read); EXPORT_SYMBOL_GPL(ata_sff_tf_read);
...@@ -891,7 +891,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) ...@@ -891,7 +891,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
/* READ/WRITE MULTIPLE */ /* READ/WRITE MULTIPLE */
unsigned int nsect; unsigned int nsect;
WARN_ON(qc->dev->multi_count == 0); WARN_ON_ONCE(qc->dev->multi_count == 0);
nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
qc->dev->multi_count); qc->dev->multi_count);
...@@ -918,7 +918,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) ...@@ -918,7 +918,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
{ {
/* send SCSI cdb */ /* send SCSI cdb */
DPRINTK("send cdb\n"); DPRINTK("send cdb\n");
WARN_ON(qc->dev->cdb_len < 12); WARN_ON_ONCE(qc->dev->cdb_len < 12);
ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
ata_sff_sync(ap); ata_sff_sync(ap);
...@@ -1014,7 +1014,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) ...@@ -1014,7 +1014,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
} }
/* consumed can be larger than count only for the last transfer */ /* consumed can be larger than count only for the last transfer */
WARN_ON(qc->cursg && count != consumed); WARN_ON_ONCE(qc->cursg && count != consumed);
if (bytes) if (bytes)
goto next_sg; goto next_sg;
...@@ -1172,13 +1172,13 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, ...@@ -1172,13 +1172,13 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
unsigned long flags = 0; unsigned long flags = 0;
int poll_next; int poll_next;
WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
/* Make sure ata_sff_qc_issue() does not throw things /* Make sure ata_sff_qc_issue() does not throw things
* like DMA polling into the workqueue. Notice that * like DMA polling into the workqueue. Notice that
* in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
*/ */
WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
fsm_start: fsm_start:
DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
...@@ -1387,7 +1387,7 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, ...@@ -1387,7 +1387,7 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
ap->print_id, qc->dev->devno, status); ap->print_id, qc->dev->devno, status);
WARN_ON(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
ap->hsm_task_state = HSM_ST_IDLE; ap->hsm_task_state = HSM_ST_IDLE;
...@@ -1423,7 +1423,7 @@ void ata_pio_task(struct work_struct *work) ...@@ -1423,7 +1423,7 @@ void ata_pio_task(struct work_struct *work)
int poll_next; int poll_next;
fsm_start: fsm_start:
WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
/* /*
* This is purely heuristic. This is a fast path. * This is purely heuristic. This is a fast path.
...@@ -1512,7 +1512,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) ...@@ -1512,7 +1512,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
break; break;
case ATA_PROT_DMA: case ATA_PROT_DMA:
WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
ap->ops->bmdma_setup(qc); /* set up bmdma */ ap->ops->bmdma_setup(qc); /* set up bmdma */
...@@ -1564,7 +1564,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) ...@@ -1564,7 +1564,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
break; break;
case ATAPI_PROT_DMA: case ATAPI_PROT_DMA:
WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
ap->ops->bmdma_setup(qc); /* set up bmdma */ ap->ops->bmdma_setup(qc); /* set up bmdma */
...@@ -1576,7 +1576,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) ...@@ -1576,7 +1576,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
break; break;
default: default:
WARN_ON(1); WARN_ON_ONCE(1);
return AC_ERR_SYSTEM; return AC_ERR_SYSTEM;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment