Commit 80885468 authored by Christoph Hellwig's avatar Christoph Hellwig

swiotlb: do not panic on mapping failures

All properly written drivers now have error handling in the
dma_map_single / dma_map_page callers.  As swiotlb_tbl_map_single already
prints a useful warning when running out of swiotlb pool space we can
also remove swiotlb_full entirely as it serves no purpose now.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
parent b65125c6
...@@ -761,34 +761,6 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, ...@@ -761,34 +761,6 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size,
return true; return true;
} }
static void
swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
int do_panic)
{
if (swiotlb_force == SWIOTLB_NO_FORCE)
return;
/*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
* unless they check for dma_mapping_error (most don't)
* When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big.
*/
dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
size);
if (size <= io_tlb_overflow || !do_panic)
return;
if (dir == DMA_BIDIRECTIONAL)
panic("DMA: Random memory could be DMA accessed\n");
if (dir == DMA_FROM_DEVICE)
panic("DMA: Random memory could be DMA written\n");
if (dir == DMA_TO_DEVICE)
panic("DMA: Random memory could be DMA read\n");
}
/* /*
* Map a single buffer of the indicated size for DMA in streaming mode. The * Map a single buffer of the indicated size for DMA in streaming mode. The
* physical address to use is returned. * physical address to use is returned.
...@@ -817,10 +789,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, ...@@ -817,10 +789,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
/* Oh well, have to allocate and map a bounce buffer. */ /* Oh well, have to allocate and map a bounce buffer. */
map = map_single(dev, phys, size, dir, attrs); map = map_single(dev, phys, size, dir, attrs);
if (map == SWIOTLB_MAP_ERROR) { if (map == SWIOTLB_MAP_ERROR)
swiotlb_full(dev, size, dir, 1);
return __phys_to_dma(dev, io_tlb_overflow_buffer); return __phys_to_dma(dev, io_tlb_overflow_buffer);
}
dev_addr = __phys_to_dma(dev, map); dev_addr = __phys_to_dma(dev, map);
...@@ -948,7 +918,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, ...@@ -948,7 +918,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
if (map == SWIOTLB_MAP_ERROR) { if (map == SWIOTLB_MAP_ERROR) {
/* Don't panic here, we expect map_sg users /* Don't panic here, we expect map_sg users
to do proper error handling. */ to do proper error handling. */
swiotlb_full(hwdev, sg->length, dir, 0);
attrs |= DMA_ATTR_SKIP_CPU_SYNC; attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs); attrs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment