Commit 06e6295b authored by Stefano Stabellini's avatar Stefano Stabellini

arm: make SWIOTLB available

IOMMU_HELPER is needed because SWIOTLB calls iommu_is_span_boundary,
provided by lib/iommu_helper.c.
Signed-off-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
CC: will.deacon@arm.com
CC: linux@arm.linux.org.uk


Changes in v8:
- use __phys_to_pfn and __pfn_to_phys.

Changes in v7:
- dma_mark_clean: empty implementation;
- in dma_capable use coherent_dma_mask if dma_mask hasn't been
  allocated.

Changes in v6:
- check for dev->dma_mask being NULL in dma_capable.

Changes in v5:
- implement dma_mark_clean using dmac_flush_range.

Changes in v3:
- dma_capable: do not treat dma_mask as a limit;
- remove SWIOTLB dependency on NEED_SG_DMA_LENGTH.
parent 61e6cfa8
...@@ -1872,6 +1872,12 @@ config CC_STACKPROTECTOR ...@@ -1872,6 +1872,12 @@ config CC_STACKPROTECTOR
neutralized via a kernel panic. neutralized via a kernel panic.
This feature requires gcc version 4.2 or above. This feature requires gcc version 4.2 or above.
config SWIOTLB
def_bool y
config IOMMU_HELPER
def_bool SWIOTLB
config XEN_DOM0 config XEN_DOM0
def_bool y def_bool y
depends on XEN depends on XEN
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm-generic/dma-coherent.h> #include <asm-generic/dma-coherent.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/cacheflush.h>
#define DMA_ERROR_CODE (~0) #define DMA_ERROR_CODE (~0)
extern struct dma_map_ops arm_dma_ops; extern struct dma_map_ops arm_dma_ops;
...@@ -86,6 +87,42 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) ...@@ -86,6 +87,42 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
} }
#endif #endif
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
unsigned int offset = paddr & ~PAGE_MASK;
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
}
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
{
unsigned int offset = dev_addr & ~PAGE_MASK;
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
u64 limit, mask;
if (dev->dma_mask)
mask = *dev->dma_mask;
else
mask = dev->coherent_dma_mask;
if (mask == 0)
return 0;
limit = (mask + 1) & ~mask;
if (limit && size > limit)
return 0;
if ((addr | (addr + size - 1)) & ~mask)
return 0;
return 1;
}
static inline void dma_mark_clean(void *addr, size_t size) { }
/* /*
* DMA errors are defined by all-bits-set in the DMA address. * DMA errors are defined by all-bits-set in the DMA address.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment