Commit c03ab37c authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Linus Torvalds

intel-iommu sg chaining support

x86_64 defines ARCH_HAS_SG_CHAIN. So if IOMMU implementations don't
support sg chaining, we will get data corruption.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: default avatarAnil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 358dd8ac
...@@ -1963,7 +1963,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, ...@@ -1963,7 +1963,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
} }
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg, static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, int dir) int nelems, int dir)
{ {
int i; int i;
...@@ -1973,16 +1973,17 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg, ...@@ -1973,16 +1973,17 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
struct iova *iova; struct iova *iova;
size_t size = 0; size_t size = 0;
void *addr; void *addr;
struct scatterlist *sg;
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return; return;
domain = find_domain(pdev); domain = find_domain(pdev);
iova = find_iova(&domain->iovad, IOVA_PFN(sg[0].dma_address)); iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
if (!iova) if (!iova)
return; return;
for (i = 0; i < nelems; i++, sg++) { for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg); addr = SG_ENT_VIRT_ADDRESS(sg);
size += aligned_size((u64)addr, sg->length); size += aligned_size((u64)addr, sg->length);
} }
...@@ -2003,21 +2004,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg, ...@@ -2003,21 +2004,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
} }
static int intel_nontranslate_map_sg(struct device *hddev, static int intel_nontranslate_map_sg(struct device *hddev,
struct scatterlist *sg, int nelems, int dir) struct scatterlist *sglist, int nelems, int dir)
{ {
int i; int i;
struct scatterlist *sg;
for (i = 0; i < nelems; i++) { for_each_sg(sglist, sg, nelems, i) {
struct scatterlist *s = &sg[i]; BUG_ON(!sg->page);
BUG_ON(!s->page); sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
s->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(s)); sg->dma_length = sg->length;
s->dma_length = s->length;
} }
return nelems; return nelems;
} }
static int intel_map_sg(struct device *hwdev, struct scatterlist *sg, static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, int dir) int nelems, int dir)
{ {
void *addr; void *addr;
int i; int i;
...@@ -2028,18 +2029,18 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg, ...@@ -2028,18 +2029,18 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
size_t offset = 0; size_t offset = 0;
struct iova *iova = NULL; struct iova *iova = NULL;
int ret; int ret;
struct scatterlist *orig_sg = sg; struct scatterlist *sg;
unsigned long start_addr; unsigned long start_addr;
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return intel_nontranslate_map_sg(hwdev, sg, nelems, dir); return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev); domain = get_valid_domain_for_dev(pdev);
if (!domain) if (!domain)
return 0; return 0;
for (i = 0; i < nelems; i++, sg++) { for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg); addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr); addr = (void *)virt_to_phys(addr);
size += aligned_size((u64)addr, sg->length); size += aligned_size((u64)addr, sg->length);
...@@ -2047,7 +2048,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg, ...@@ -2047,7 +2048,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
iova = __intel_alloc_iova(hwdev, domain, size); iova = __intel_alloc_iova(hwdev, domain, size);
if (!iova) { if (!iova) {
orig_sg->dma_length = 0; sglist->dma_length = 0;
return 0; return 0;
} }
...@@ -2063,8 +2064,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg, ...@@ -2063,8 +2064,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
start_addr = iova->pfn_lo << PAGE_SHIFT_4K; start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
offset = 0; offset = 0;
sg = orig_sg; for_each_sg(sglist, sg, nelems, i) {
for (i = 0; i < nelems; i++, sg++) {
addr = SG_ENT_VIRT_ADDRESS(sg); addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr); addr = (void *)virt_to_phys(addr);
size = aligned_size((u64)addr, sg->length); size = aligned_size((u64)addr, sg->length);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment