Commit 797a7568 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by David S. Miller

sparc: use dma_map_page instead of dma_map_single

This patch converts dma_map_single and dma_unmap_single to use
map_page and unmap_page respectively and removes unnecessary
map_single and unmap_single. map_page can be used to implement
map_single but the opposite is impossible. Having only dma_map_page in
struct dma_ops is enough.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Tested-by: default avatarRobert Reif <reif@earthlink.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2e8fcfe2
......@@ -9,12 +9,12 @@ struct dma_ops {
dma_addr_t *dma_handle, gfp_t flag);
void (*free_coherent)(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
size_t size,
enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction);
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction);
void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction);
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
......@@ -51,29 +51,30 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
size_t size,
enum dma_data_direction direction)
{
return dma_ops->map_single(dev, cpu_addr, size, direction);
return dma_ops->map_page(dev, virt_to_page(cpu_addr),
(unsigned long)cpu_addr & ~PAGE_MASK, size,
direction);
}
static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction)
{
dma_ops->unmap_single(dev, dma_addr, size, direction);
dma_ops->unmap_page(dev, dma_addr, size, direction);
}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
return dma_ops->map_single(dev, page_address(page) + offset,
size, direction);
return dma_ops->map_page(dev, page, offset, size, direction);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction)
{
dma_ops->unmap_single(dev, dma_address, size, direction);
dma_ops->unmap_page(dev, dma_address, size, direction);
}
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
......
......@@ -351,8 +351,9 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)cpu, order);
}
static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
enum dma_data_direction direction)
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
......@@ -368,7 +369,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
oaddr = (unsigned long)ptr;
oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
......@@ -472,8 +473,8 @@ static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
vaddr, ctx, npages);
}
static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction)
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
......@@ -824,8 +825,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
static const struct dma_ops sun4u_dma_ops = {
.alloc_coherent = dma_4u_alloc_coherent,
.free_coherent = dma_4u_free_coherent,
.map_single = dma_4u_map_single,
.unmap_single = dma_4u_unmap_single,
.map_page = dma_4u_map_page,
.unmap_page = dma_4u_unmap_page,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
......
......@@ -230,8 +230,9 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
free_pages((unsigned long)cpu, order);
}
static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
enum dma_data_direction direction)
static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction)
{
struct iommu *iommu;
unsigned long flags, npages, oaddr;
......@@ -245,7 +246,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
if (unlikely(direction == DMA_NONE))
goto bad;
oaddr = (unsigned long)ptr;
oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
......@@ -294,8 +295,8 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
return DMA_ERROR_CODE;
}
static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction)
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
......@@ -537,8 +538,8 @@ static void dma_4v_sync_sg_for_cpu(struct device *dev,
static const struct dma_ops sun4v_dma_ops = {
.alloc_coherent = dma_4v_alloc_coherent,
.free_coherent = dma_4v_free_coherent,
.map_single = dma_4v_map_single,
.unmap_single = dma_4v_unmap_single,
.map_page = dma_4v_map_page,
.unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
.sync_single_for_cpu = dma_4v_sync_single_for_cpu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment