Commit a0f2dee0 authored by Stefano Stabellini's avatar Stefano Stabellini Committed by David Vrabel

xen: add a dma_addr_t dev_addr argument to xen_dma_map_page

dev_addr is the machine address of the page.

The new parameter can be used by the ARM and ARM64 implementations of
xen_dma_map_page to find out if the page is a local page (pfn == mfn) or
a foreign page (pfn != mfn).

dev_addr could be retrieved again from the physical address, using
pfn_to_mfn, but it requires accessing an rbtree. Since we already have
the dev_addr in our hands at the call site there is no need to get the
mfn twice.
Signed-off-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 2e2a7817
...@@ -29,8 +29,8 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, ...@@ -29,8 +29,8 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
} }
static inline void xen_dma_map_page(struct device *hwdev, struct page *page, static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir, dma_addr_t dev_addr, unsigned long offset, size_t size,
struct dma_attrs *attrs) enum dma_data_direction dir, struct dma_attrs *attrs)
{ {
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
} }
......
...@@ -20,8 +20,8 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, ...@@ -20,8 +20,8 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
} }
static inline void xen_dma_map_page(struct device *hwdev, struct page *page, static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir, dma_addr_t dev_addr, unsigned long offset, size_t size,
struct dma_attrs *attrs) enum dma_data_direction dir, struct dma_attrs *attrs)
{ {
} }
......
...@@ -22,8 +22,8 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, ...@@ -22,8 +22,8 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
} }
static inline void xen_dma_map_page(struct device *hwdev, struct page *page, static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir, dma_addr_t dev_addr, unsigned long offset, size_t size,
struct dma_attrs *attrs) { } enum dma_data_direction dir, struct dma_attrs *attrs) { }
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir,
......
...@@ -403,7 +403,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -403,7 +403,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/* we are not interested in the dma_addr returned by /* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed * xen_dma_map_page, only in the potential cache flushes executed
* by the function. */ * by the function. */
xen_dma_map_page(dev, page, offset, size, dir, attrs); xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
return dev_addr; return dev_addr;
} }
...@@ -417,7 +417,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -417,7 +417,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_ERROR_CODE; return DMA_ERROR_CODE;
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
map & ~PAGE_MASK, size, dir, attrs); dev_addr, map & ~PAGE_MASK, size, dir, attrs);
dev_addr = xen_phys_to_bus(map); dev_addr = xen_phys_to_bus(map);
/* /*
...@@ -574,6 +574,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, ...@@ -574,6 +574,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
return 0; return 0;
} }
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr,
map & ~PAGE_MASK, map & ~PAGE_MASK,
sg->length, sg->length,
dir, dir,
...@@ -584,6 +585,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, ...@@ -584,6 +585,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
* xen_dma_map_page, only in the potential cache flushes executed * xen_dma_map_page, only in the potential cache flushes executed
* by the function. */ * by the function. */
xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
dev_addr,
paddr & ~PAGE_MASK, paddr & ~PAGE_MASK,
sg->length, sg->length,
dir, dir,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment