Commit 258c9fde authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Tejun Heo

sata_nv: don't use block layer bounce buffer

sata_nv sets the block bounce limit to the reduce dma mask for ATAPI
devices, which means that the iommu or swiotlb already take care of
the bounce buffering, and the block bouncing can be removed.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent dc85ca57
...@@ -740,32 +740,16 @@ static int nv_adma_slave_config(struct scsi_device *sdev) ...@@ -740,32 +740,16 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
sdev1 = ap->host->ports[1]->link.device[0].sdev; sdev1 = ap->host->ports[1]->link.device[0].sdev;
if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
/** We have to set the DMA mask to 32-bit if either port is in /*
ATAPI mode, since they are on the same PCI device which is * We have to set the DMA mask to 32-bit if either port is in
used for DMA mapping. If we set the mask we also need to set * ATAPI mode, since they are on the same PCI device which is
the bounce limit on both ports to ensure that the block * used for DMA mapping. If either SCSI device is not allocated
layer doesn't feed addresses that cause DMA mapping to * yet, it's OK since that port will discover its correct
choke. If either SCSI device is not allocated yet, it's OK * setting when it does get allocated.
since that port will discover its correct setting when it */
does get allocated. rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
Note: Setting 32-bit mask should not fail. */
if (sdev0)
blk_queue_bounce_limit(sdev0->request_queue,
ATA_DMA_MASK);
if (sdev1)
blk_queue_bounce_limit(sdev1->request_queue,
ATA_DMA_MASK);
dma_set_mask(&pdev->dev, ATA_DMA_MASK);
} else { } else {
/** This shouldn't fail as it was set to this value before */ rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
dma_set_mask(&pdev->dev, pp->adma_dma_mask);
if (sdev0)
blk_queue_bounce_limit(sdev0->request_queue,
pp->adma_dma_mask);
if (sdev1)
blk_queue_bounce_limit(sdev1->request_queue,
pp->adma_dma_mask);
} }
blk_queue_segment_boundary(sdev->request_queue, segment_boundary); blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
...@@ -1131,12 +1115,11 @@ static int nv_adma_port_start(struct ata_port *ap) ...@@ -1131,12 +1115,11 @@ static int nv_adma_port_start(struct ata_port *ap)
VPRINTK("ENTER\n"); VPRINTK("ENTER\n");
/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and /*
pad buffers */ * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); * pad buffers.
if (rc) */
return rc; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) if (rc)
return rc; return rc;
...@@ -1156,13 +1139,16 @@ static int nv_adma_port_start(struct ata_port *ap) ...@@ -1156,13 +1139,16 @@ static int nv_adma_port_start(struct ata_port *ap)
pp->notifier_clear_block = pp->gen_block + pp->notifier_clear_block = pp->gen_block +
NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
/* Now that the legacy PRD and padding buffer are allocated we can /*
safely raise the DMA mask to allocate the CPB/APRD table. * Now that the legacy PRD and padding buffer are allocated we can
These are allowed to fail since we store the value that ends up * try to raise the DMA mask to allocate the CPB/APRD table.
being used to set as the bounce limit in slave_config later if */
needed. */ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (rc) {
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
}
pp->adma_dma_mask = *dev->dma_mask; pp->adma_dma_mask = *dev->dma_mask;
mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment