Commit 5299709d authored by Bart Van Assche's avatar Bart Van Assche Committed by Doug Ledford

treewide: Constify most dma_map_ops structures

Most dma_map_ops structures are never modified. Constify these
structures such that these can be write-protected. This patch
has been generated as follows:

git grep -l 'struct dma_map_ops' |
  xargs -d\\n sed -i \
    -e 's/struct dma_map_ops/const struct dma_map_ops/g' \
    -e 's/const struct dma_map_ops {/struct dma_map_ops {/g' \
    -e 's/^const struct dma_map_ops;$/struct dma_map_ops;/' \
    -e 's/const const struct dma_map_ops /const struct dma_map_ops /g';
sed -i -e 's/const \(struct dma_map_ops intel_dma_ops\)/\1/' \
  $(git grep -l 'struct dma_map_ops intel_dma_ops');
sed -i -e 's/const \(struct dma_map_ops dma_iommu_ops\)/\1/' \
  $(git grep -l 'struct dma_map_ops' | grep ^arch/powerpc);
sed -i -e '/^struct vmd_dev {$/,/^};$/ s/const \(struct dma_map_ops[[:blank:]]dma_ops;\)/\1/' \
       -e '/^static void vmd_setup_dma_ops/,/^}$/ s/const \(struct dma_map_ops \*dest\)/\1/' \
       -e 's/const \(struct dma_map_ops \*dest = \&vmd->dma_ops\)/\1/' \
    drivers/pci/host/*.c
sed -i -e '/^void __init pci_iommu_alloc(void)$/,/^}$/ s/dma_ops->/intel_dma_ops./' arch/ia64/kernel/pci-dma.c
sed -i -e 's/static const struct dma_map_ops sn_dma_ops/static struct dma_map_ops sn_dma_ops/' arch/ia64/sn/pci/pci_dma.c
sed -i -e 's/(const struct dma_map_ops \*)//' drivers/misc/mic/bus/vop_bus.c
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: Russell King <linux@armlinux.org.uk>
Cc: x86@kernel.org
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 102c5ce0
#ifndef _ALPHA_DMA_MAPPING_H #ifndef _ALPHA_DMA_MAPPING_H
#define _ALPHA_DMA_MAPPING_H #define _ALPHA_DMA_MAPPING_H
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return dma_ops; return dma_ops;
} }
......
...@@ -128,7 +128,7 @@ static int alpha_noop_supported(struct device *dev, u64 mask) ...@@ -128,7 +128,7 @@ static int alpha_noop_supported(struct device *dev, u64 mask)
return mask < 0x00ffffffUL ? 0 : 1; return mask < 0x00ffffffUL ? 0 : 1;
} }
struct dma_map_ops alpha_noop_ops = { const struct dma_map_ops alpha_noop_ops = {
.alloc = alpha_noop_alloc_coherent, .alloc = alpha_noop_alloc_coherent,
.free = dma_noop_free_coherent, .free = dma_noop_free_coherent,
.map_page = dma_noop_map_page, .map_page = dma_noop_map_page,
...@@ -137,5 +137,5 @@ struct dma_map_ops alpha_noop_ops = { ...@@ -137,5 +137,5 @@ struct dma_map_ops alpha_noop_ops = {
.dma_supported = alpha_noop_supported, .dma_supported = alpha_noop_supported,
}; };
struct dma_map_ops *dma_ops = &alpha_noop_ops; const struct dma_map_ops *dma_ops = &alpha_noop_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
...@@ -939,7 +939,7 @@ static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -939,7 +939,7 @@ static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
return dma_addr == 0; return dma_addr == 0;
} }
struct dma_map_ops alpha_pci_ops = { const struct dma_map_ops alpha_pci_ops = {
.alloc = alpha_pci_alloc_coherent, .alloc = alpha_pci_alloc_coherent,
.free = alpha_pci_free_coherent, .free = alpha_pci_free_coherent,
.map_page = alpha_pci_map_page, .map_page = alpha_pci_map_page,
...@@ -950,5 +950,5 @@ struct dma_map_ops alpha_pci_ops = { ...@@ -950,5 +950,5 @@ struct dma_map_ops alpha_pci_ops = {
.dma_supported = alpha_pci_supported, .dma_supported = alpha_pci_supported,
}; };
struct dma_map_ops *dma_ops = &alpha_pci_ops; const struct dma_map_ops *dma_ops = &alpha_pci_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
#include <plat/dma.h> #include <plat/dma.h>
#endif #endif
extern struct dma_map_ops arc_dma_ops; extern const struct dma_map_ops arc_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &arc_dma_ops; return &arc_dma_ops;
} }
......
...@@ -218,7 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask) ...@@ -218,7 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
return dma_mask == DMA_BIT_MASK(32); return dma_mask == DMA_BIT_MASK(32);
} }
struct dma_map_ops arc_dma_ops = { const struct dma_map_ops arc_dma_ops = {
.alloc = arc_dma_alloc, .alloc = arc_dma_alloc,
.free = arc_dma_free, .free = arc_dma_free,
.mmap = arc_dma_mmap, .mmap = arc_dma_mmap,
......
...@@ -452,7 +452,7 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask) ...@@ -452,7 +452,7 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
return arm_dma_ops.set_dma_mask(dev, dma_mask); return arm_dma_ops.set_dma_mask(dev, dma_mask);
} }
static struct dma_map_ops dmabounce_ops = { static const struct dma_map_ops dmabounce_ops = {
.alloc = arm_dma_alloc, .alloc = arm_dma_alloc,
.free = arm_dma_free, .free = arm_dma_free,
.mmap = arm_dma_mmap, .mmap = arm_dma_mmap,
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#define ASMARM_DEVICE_H #define ASMARM_DEVICE_H
struct dev_archdata { struct dev_archdata {
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE #ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce; struct dmabounce_device_info *dmabounce;
#endif #endif
......
...@@ -13,17 +13,17 @@ ...@@ -13,17 +13,17 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops arm_dma_ops; extern const struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops; extern const struct dma_map_ops arm_coherent_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops; return dev->archdata.dma_ops;
return &arm_dma_ops; return &arm_dma_ops;
} }
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (xen_initial_domain()) if (xen_initial_domain())
return xen_dma_ops; return xen_dma_ops;
...@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) ...@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return __generic_dma_ops(dev); return __generic_dma_ops(dev);
} }
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{ {
BUG_ON(!dev); BUG_ON(!dev);
dev->archdata.dma_ops = ops; dev->archdata.dma_ops = ops;
......
...@@ -180,7 +180,7 @@ static void arm_dma_sync_single_for_device(struct device *dev, ...@@ -180,7 +180,7 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
} }
struct dma_map_ops arm_dma_ops = { const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc, .alloc = arm_dma_alloc,
.free = arm_dma_free, .free = arm_dma_free,
.mmap = arm_dma_mmap, .mmap = arm_dma_mmap,
...@@ -204,7 +204,7 @@ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -204,7 +204,7 @@ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size, void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs); unsigned long attrs);
struct dma_map_ops arm_coherent_dma_ops = { const struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc, .alloc = arm_coherent_dma_alloc,
.free = arm_coherent_dma_free, .free = arm_coherent_dma_free,
.mmap = arm_coherent_dma_mmap, .mmap = arm_coherent_dma_mmap,
...@@ -1067,7 +1067,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, ...@@ -1067,7 +1067,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s; struct scatterlist *s;
int i, j; int i, j;
...@@ -1101,7 +1101,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -1101,7 +1101,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s; struct scatterlist *s;
int i; int i;
...@@ -1120,7 +1120,7 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -1120,7 +1120,7 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir) int nents, enum dma_data_direction dir)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s; struct scatterlist *s;
int i; int i;
...@@ -1139,7 +1139,7 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -1139,7 +1139,7 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir) int nents, enum dma_data_direction dir)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s; struct scatterlist *s;
int i; int i;
...@@ -2099,7 +2099,7 @@ static void arm_iommu_sync_single_for_device(struct device *dev, ...@@ -2099,7 +2099,7 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
} }
struct dma_map_ops iommu_ops = { const struct dma_map_ops iommu_ops = {
.alloc = arm_iommu_alloc_attrs, .alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs, .free = arm_iommu_free_attrs,
.mmap = arm_iommu_mmap_attrs, .mmap = arm_iommu_mmap_attrs,
...@@ -2119,7 +2119,7 @@ struct dma_map_ops iommu_ops = { ...@@ -2119,7 +2119,7 @@ struct dma_map_ops iommu_ops = {
.unmap_resource = arm_iommu_unmap_resource, .unmap_resource = arm_iommu_unmap_resource,
}; };
struct dma_map_ops iommu_coherent_ops = { const struct dma_map_ops iommu_coherent_ops = {
.alloc = arm_coherent_iommu_alloc_attrs, .alloc = arm_coherent_iommu_alloc_attrs,
.free = arm_coherent_iommu_free_attrs, .free = arm_coherent_iommu_free_attrs,
.mmap = arm_coherent_iommu_mmap_attrs, .mmap = arm_coherent_iommu_mmap_attrs,
...@@ -2319,7 +2319,7 @@ void arm_iommu_detach_device(struct device *dev) ...@@ -2319,7 +2319,7 @@ void arm_iommu_detach_device(struct device *dev)
} }
EXPORT_SYMBOL_GPL(arm_iommu_detach_device); EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
{ {
return coherent ? &iommu_coherent_ops : &iommu_ops; return coherent ? &iommu_coherent_ops : &iommu_ops;
} }
...@@ -2374,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { } ...@@ -2374,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
#endif /* CONFIG_ARM_DMA_USE_IOMMU */ #endif /* CONFIG_ARM_DMA_USE_IOMMU */
static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{ {
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
} }
...@@ -2382,7 +2382,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) ...@@ -2382,7 +2382,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) const struct iommu_ops *iommu, bool coherent)
{ {
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent; dev->archdata.dma_coherent = coherent;
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
......
...@@ -182,10 +182,10 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) ...@@ -182,10 +182,10 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
} }
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
struct dma_map_ops *xen_dma_ops; const struct dma_map_ops *xen_dma_ops;
EXPORT_SYMBOL(xen_dma_ops); EXPORT_SYMBOL(xen_dma_ops);
static struct dma_map_ops xen_swiotlb_dma_ops = { static const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc = xen_swiotlb_alloc_coherent, .alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent, .free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#define __ASM_DEVICE_H #define __ASM_DEVICE_H
struct dev_archdata { struct dev_archdata {
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */ void *iommu; /* private IOMMU data */
#endif #endif
......
...@@ -25,9 +25,9 @@ ...@@ -25,9 +25,9 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0) #define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops dummy_dma_ops; extern const struct dma_map_ops dummy_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops; return dev->archdata.dma_ops;
...@@ -39,7 +39,7 @@ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) ...@@ -39,7 +39,7 @@ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
return &dummy_dma_ops; return &dummy_dma_ops;
} }
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (xen_initial_domain()) if (xen_initial_domain())
return xen_dma_ops; return xen_dma_ops;
......
...@@ -352,7 +352,7 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask) ...@@ -352,7 +352,7 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
return 1; return 1;
} }
static struct dma_map_ops swiotlb_dma_ops = { static const struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc, .alloc = __dma_alloc,
.free = __dma_free, .free = __dma_free,
.mmap = __swiotlb_mmap, .mmap = __swiotlb_mmap,
...@@ -505,7 +505,7 @@ static int __dummy_dma_supported(struct device *hwdev, u64 mask) ...@@ -505,7 +505,7 @@ static int __dummy_dma_supported(struct device *hwdev, u64 mask)
return 0; return 0;
} }
struct dma_map_ops dummy_dma_ops = { const struct dma_map_ops dummy_dma_ops = {
.alloc = __dummy_alloc, .alloc = __dummy_alloc,
.free = __dummy_free, .free = __dummy_free,
.mmap = __dummy_mmap, .mmap = __dummy_mmap,
...@@ -784,7 +784,7 @@ static void __iommu_unmap_sg_attrs(struct device *dev, ...@@ -784,7 +784,7 @@ static void __iommu_unmap_sg_attrs(struct device *dev,
iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
} }
static struct dma_map_ops iommu_dma_ops = { static const struct dma_map_ops iommu_dma_ops = {
.alloc = __iommu_alloc_attrs, .alloc = __iommu_alloc_attrs,
.free = __iommu_free_attrs, .free = __iommu_free_attrs,
.mmap = __iommu_mmap_attrs, .mmap = __iommu_mmap_attrs,
......
...@@ -4,9 +4,9 @@ ...@@ -4,9 +4,9 @@
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
int direction); int direction);
extern struct dma_map_ops avr32_dma_ops; extern const struct dma_map_ops avr32_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &avr32_dma_ops; return &avr32_dma_ops;
} }
......
...@@ -191,7 +191,7 @@ static void avr32_dma_sync_sg_for_device(struct device *dev, ...@@ -191,7 +191,7 @@ static void avr32_dma_sync_sg_for_device(struct device *dev,
dma_cache_sync(dev, sg_virt(sg), sg->length, direction); dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
} }
struct dma_map_ops avr32_dma_ops = { const struct dma_map_ops avr32_dma_ops = {
.alloc = avr32_dma_alloc, .alloc = avr32_dma_alloc,
.free = avr32_dma_free, .free = avr32_dma_free,
.map_page = avr32_dma_map_page, .map_page = avr32_dma_map_page,
......
...@@ -36,9 +36,9 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) ...@@ -36,9 +36,9 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
__dma_sync(addr, size, dir); __dma_sync(addr, size, dir);
} }
extern struct dma_map_ops bfin_dma_ops; extern const struct dma_map_ops bfin_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &bfin_dma_ops; return &bfin_dma_ops;
} }
......
...@@ -159,7 +159,7 @@ static inline void bfin_dma_sync_single_for_device(struct device *dev, ...@@ -159,7 +159,7 @@ static inline void bfin_dma_sync_single_for_device(struct device *dev,
_dma_sync(handle, size, dir); _dma_sync(handle, size, dir);
} }
struct dma_map_ops bfin_dma_ops = { const struct dma_map_ops bfin_dma_ops = {
.alloc = bfin_dma_alloc, .alloc = bfin_dma_alloc,
.free = bfin_dma_free, .free = bfin_dma_free,
......
...@@ -17,9 +17,9 @@ ...@@ -17,9 +17,9 @@
*/ */
#define DMA_ERROR_CODE ~0 #define DMA_ERROR_CODE ~0
extern struct dma_map_ops c6x_dma_ops; extern const struct dma_map_ops c6x_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &c6x_dma_ops; return &c6x_dma_ops;
} }
......
...@@ -123,7 +123,7 @@ static void c6x_dma_sync_sg_for_device(struct device *dev, ...@@ -123,7 +123,7 @@ static void c6x_dma_sync_sg_for_device(struct device *dev,
} }
struct dma_map_ops c6x_dma_ops = { const struct dma_map_ops c6x_dma_ops = {
.alloc = c6x_dma_alloc, .alloc = c6x_dma_alloc,
.free = c6x_dma_free, .free = c6x_dma_free,
.map_page = c6x_dma_map_page, .map_page = c6x_dma_map_page,
......
...@@ -69,7 +69,7 @@ static inline int v32_dma_supported(struct device *dev, u64 mask) ...@@ -69,7 +69,7 @@ static inline int v32_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
struct dma_map_ops v32_dma_ops = { const struct dma_map_ops v32_dma_ops = {
.alloc = v32_dma_alloc, .alloc = v32_dma_alloc,
.free = v32_dma_free, .free = v32_dma_free,
.map_page = v32_dma_map_page, .map_page = v32_dma_map_page,
......
...@@ -2,14 +2,14 @@ ...@@ -2,14 +2,14 @@
#define _ASM_CRIS_DMA_MAPPING_H #define _ASM_CRIS_DMA_MAPPING_H
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
extern struct dma_map_ops v32_dma_ops; extern const struct dma_map_ops v32_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &v32_dma_ops; return &v32_dma_ops;
} }
#else #else
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
BUG(); BUG();
return NULL; return NULL;
......
...@@ -7,9 +7,9 @@ ...@@ -7,9 +7,9 @@
extern unsigned long __nongprelbss dma_coherent_mem_start; extern unsigned long __nongprelbss dma_coherent_mem_start;
extern unsigned long __nongprelbss dma_coherent_mem_end; extern unsigned long __nongprelbss dma_coherent_mem_end;
extern struct dma_map_ops frv_dma_ops; extern const struct dma_map_ops frv_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &frv_dma_ops; return &frv_dma_ops;
} }
......
...@@ -164,7 +164,7 @@ static int frv_dma_supported(struct device *dev, u64 mask) ...@@ -164,7 +164,7 @@ static int frv_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
struct dma_map_ops frv_dma_ops = { const struct dma_map_ops frv_dma_ops = {
.alloc = frv_dma_alloc, .alloc = frv_dma_alloc,
.free = frv_dma_free, .free = frv_dma_free,
.map_page = frv_dma_map_page, .map_page = frv_dma_map_page,
......
...@@ -106,7 +106,7 @@ static int frv_dma_supported(struct device *dev, u64 mask) ...@@ -106,7 +106,7 @@ static int frv_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
struct dma_map_ops frv_dma_ops = { const struct dma_map_ops frv_dma_ops = {
.alloc = frv_dma_alloc, .alloc = frv_dma_alloc,
.free = frv_dma_free, .free = frv_dma_free,
.map_page = frv_dma_map_page, .map_page = frv_dma_map_page,
......
#ifndef _H8300_DMA_MAPPING_H #ifndef _H8300_DMA_MAPPING_H
#define _H8300_DMA_MAPPING_H #define _H8300_DMA_MAPPING_H
extern struct dma_map_ops h8300_dma_map_ops; extern const struct dma_map_ops h8300_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &h8300_dma_map_ops; return &h8300_dma_map_ops;
} }
......
...@@ -60,7 +60,7 @@ static int map_sg(struct device *dev, struct scatterlist *sgl, ...@@ -60,7 +60,7 @@ static int map_sg(struct device *dev, struct scatterlist *sgl,
return nents; return nents;
} }
struct dma_map_ops h8300_dma_map_ops = { const struct dma_map_ops h8300_dma_map_ops = {
.alloc = dma_alloc, .alloc = dma_alloc,
.free = dma_free, .free = dma_free,
.map_page = map_page, .map_page = map_page,
......
...@@ -32,9 +32,9 @@ struct device; ...@@ -32,9 +32,9 @@ struct device;
extern int bad_dma_address; extern int bad_dma_address;
#define DMA_ERROR_CODE bad_dma_address #define DMA_ERROR_CODE bad_dma_address
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (unlikely(dev == NULL)) if (unlikely(dev == NULL))
return NULL; return NULL;
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/page.h> #include <asm/page.h>
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
int bad_dma_address; /* globals are automatically initialized to zero */ int bad_dma_address; /* globals are automatically initialized to zero */
...@@ -203,7 +203,7 @@ static void hexagon_sync_single_for_device(struct device *dev, ...@@ -203,7 +203,7 @@ static void hexagon_sync_single_for_device(struct device *dev,
dma_sync(dma_addr_to_virt(dma_handle), size, dir); dma_sync(dma_addr_to_virt(dma_handle), size, dir);
} }
struct dma_map_ops hexagon_dma_ops = { const struct dma_map_ops hexagon_dma_ops = {
.alloc = hexagon_dma_alloc_coherent, .alloc = hexagon_dma_alloc_coherent,
.free = hexagon_free_coherent, .free = hexagon_free_coherent,
.map_sg = hexagon_map_sg, .map_sg = hexagon_map_sg,
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <asm/machvec.h> #include <asm/machvec.h>
extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
/* swiotlb declarations & definitions: */ /* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size); extern int swiotlb_late_init_with_default_size (size_t size);
...@@ -34,7 +34,7 @@ static inline int use_swiotlb(struct device *dev) ...@@ -34,7 +34,7 @@ static inline int use_swiotlb(struct device *dev)
!sba_dma_ops.dma_supported(dev, *dev->dma_mask); !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
} }
struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
{ {
if (use_swiotlb(dev)) if (use_swiotlb(dev))
return &swiotlb_dma_ops; return &swiotlb_dma_ops;
......
...@@ -2096,7 +2096,7 @@ static int __init acpi_sba_ioc_init_acpi(void) ...@@ -2096,7 +2096,7 @@ static int __init acpi_sba_ioc_init_acpi(void)
/* This has to run before acpi_scan_init(). */ /* This has to run before acpi_scan_init(). */
arch_initcall(acpi_sba_ioc_init_acpi); arch_initcall(acpi_sba_ioc_init_acpi);
extern struct dma_map_ops swiotlb_dma_ops; extern const struct dma_map_ops swiotlb_dma_ops;
static int __init static int __init
sba_init(void) sba_init(void)
...@@ -2216,7 +2216,7 @@ sba_page_override(char *str) ...@@ -2216,7 +2216,7 @@ sba_page_override(char *str)
__setup("sbapagesize=",sba_page_override); __setup("sbapagesize=",sba_page_override);
struct dma_map_ops sba_dma_ops = { const struct dma_map_ops sba_dma_ops = {
.alloc = sba_alloc_coherent, .alloc = sba_alloc_coherent,
.free = sba_free_coherent, .free = sba_free_coherent,
.map_page = sba_map_page, .map_page = sba_map_page,
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#define DMA_ERROR_CODE 0 #define DMA_ERROR_CODE 0
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv; extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void); extern void set_iommu_machvec(void);
......
...@@ -44,7 +44,7 @@ typedef void ia64_mv_kernel_launch_event_t(void); ...@@ -44,7 +44,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
/* DMA-mapping interface: */ /* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void); typedef void ia64_mv_dma_init (void);
typedef u64 ia64_mv_dma_get_required_mask (struct device *); typedef u64 ia64_mv_dma_get_required_mask (struct device *);
typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
/* /*
* WARNING: The legacy I/O space is _architected_. Platforms are * WARNING: The legacy I/O space is _architected_. Platforms are
...@@ -248,7 +248,7 @@ extern void machvec_init_from_cmdline(const char *cmdline); ...@@ -248,7 +248,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
# endif /* CONFIG_IA64_GENERIC */ # endif /* CONFIG_IA64_GENERIC */
extern void swiotlb_dma_init(void); extern void swiotlb_dma_init(void);
extern struct dma_map_ops *dma_get_ops(struct device *); extern const struct dma_map_ops *dma_get_ops(struct device *);
/* /*
* Define default versions so we can extend machvec for new platforms without having * Define default versions so we can extend machvec for new platforms without having
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
/* Set this to 1 if there is a HW IOMMU in the system */ /* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly; int iommu_detected __read_mostly;
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
...@@ -17,7 +17,7 @@ static int __init dma_init(void) ...@@ -17,7 +17,7 @@ static int __init dma_init(void)
} }
fs_initcall(dma_init); fs_initcall(dma_init);
struct dma_map_ops *dma_get_ops(struct device *dev) const struct dma_map_ops *dma_get_ops(struct device *dev)
{ {
return dma_ops; return dma_ops;
} }
......
...@@ -90,11 +90,11 @@ void __init pci_iommu_alloc(void) ...@@ -90,11 +90,11 @@ void __init pci_iommu_alloc(void)
{ {
dma_ops = &intel_dma_ops; dma_ops = &intel_dma_ops;
dma_ops->sync_single_for_cpu = machvec_dma_sync_single; intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single;
dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg;
dma_ops->sync_single_for_device = machvec_dma_sync_single; intel_dma_ops.sync_single_for_device = machvec_dma_sync_single;
dma_ops->sync_sg_for_device = machvec_dma_sync_sg; intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg;
dma_ops->dma_supported = iommu_dma_supported; intel_dma_ops.dma_supported = iommu_dma_supported;
/* /*
* The order of these functions is important for * The order of these functions is important for
......
...@@ -30,7 +30,7 @@ static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, ...@@ -30,7 +30,7 @@ static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
swiotlb_free_coherent(dev, size, vaddr, dma_addr); swiotlb_free_coherent(dev, size, vaddr, dma_addr);
} }
struct dma_map_ops swiotlb_dma_ops = { const struct dma_map_ops swiotlb_dma_ops = {
.alloc = ia64_swiotlb_alloc_coherent, .alloc = ia64_swiotlb_alloc_coherent,
.free = ia64_swiotlb_free_coherent, .free = ia64_swiotlb_free_coherent,
.map_page = swiotlb_map_page, .map_page = swiotlb_map_page,
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* This file is released under the GPLv2 * This file is released under the GPLv2
*/ */
struct dev_archdata { struct dev_archdata {
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
}; };
struct pdev_archdata { struct pdev_archdata {
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops; return dev->archdata.dma_ops;
......
#ifndef _M68K_DMA_MAPPING_H #ifndef _M68K_DMA_MAPPING_H
#define _M68K_DMA_MAPPING_H #define _M68K_DMA_MAPPING_H
extern struct dma_map_ops m68k_dma_ops; extern const struct dma_map_ops m68k_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &m68k_dma_ops; return &m68k_dma_ops;
} }
......
...@@ -158,7 +158,7 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -158,7 +158,7 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
return nents; return nents;
} }
struct dma_map_ops m68k_dma_ops = { const struct dma_map_ops m68k_dma_ops = {
.alloc = m68k_dma_alloc, .alloc = m68k_dma_alloc,
.free = m68k_dma_free, .free = m68k_dma_free,
.map_page = m68k_dma_map_page, .map_page = m68k_dma_map_page,
......
#ifndef _ASM_METAG_DMA_MAPPING_H #ifndef _ASM_METAG_DMA_MAPPING_H
#define _ASM_METAG_DMA_MAPPING_H #define _ASM_METAG_DMA_MAPPING_H
extern struct dma_map_ops metag_dma_ops; extern const struct dma_map_ops metag_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &metag_dma_ops; return &metag_dma_ops;
} }
......
...@@ -575,7 +575,7 @@ static void metag_dma_sync_sg_for_device(struct device *dev, ...@@ -575,7 +575,7 @@ static void metag_dma_sync_sg_for_device(struct device *dev,
dma_sync_for_device(sg_virt(sg), sg->length, direction); dma_sync_for_device(sg_virt(sg), sg->length, direction);
} }
struct dma_map_ops metag_dma_ops = { const struct dma_map_ops metag_dma_ops = {
.alloc = metag_dma_alloc, .alloc = metag_dma_alloc,
.free = metag_dma_free, .free = metag_dma_free,
.map_page = metag_dma_map_page, .map_page = metag_dma_map_page,
......
...@@ -36,9 +36,9 @@ ...@@ -36,9 +36,9 @@
/* /*
* Available generic sets of operations * Available generic sets of operations
*/ */
extern struct dma_map_ops dma_direct_ops; extern const struct dma_map_ops dma_direct_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &dma_direct_ops; return &dma_direct_ops;
} }
......
...@@ -187,7 +187,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, ...@@ -187,7 +187,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
#endif #endif
} }
struct dma_map_ops dma_direct_ops = { const struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent, .alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent, .free = dma_direct_free_coherent,
.mmap = dma_direct_mmap_coherent, .mmap = dma_direct_mmap_coherent,
......
...@@ -205,7 +205,7 @@ static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr ...@@ -205,7 +205,7 @@ static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr
} }
struct octeon_dma_map_ops { struct octeon_dma_map_ops {
struct dma_map_ops dma_map_ops; const struct dma_map_ops dma_map_ops;
dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr); dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr); phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
}; };
...@@ -333,7 +333,7 @@ static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = { ...@@ -333,7 +333,7 @@ static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
}, },
}; };
struct dma_map_ops *octeon_pci_dma_map_ops; const struct dma_map_ops *octeon_pci_dma_map_ops;
void __init octeon_pci_dma_init(void) void __init octeon_pci_dma_init(void)
{ {
......
...@@ -10,7 +10,7 @@ struct dma_map_ops; ...@@ -10,7 +10,7 @@ struct dma_map_ops;
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */ /* DMA operations on that device */
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMA_PERDEV_COHERENT #ifdef CONFIG_DMA_PERDEV_COHERENT
/* Non-zero if DMA is coherent with CPU caches */ /* Non-zero if DMA is coherent with CPU caches */
......
...@@ -9,9 +9,9 @@ ...@@ -9,9 +9,9 @@
#include <dma-coherence.h> #include <dma-coherence.h>
#endif #endif
extern struct dma_map_ops *mips_dma_map_ops; extern const struct dma_map_ops *mips_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops; return dev->archdata.dma_ops;
......
...@@ -65,7 +65,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); ...@@ -65,7 +65,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
struct dma_map_ops; struct dma_map_ops;
extern struct dma_map_ops *octeon_pci_dma_map_ops; extern const struct dma_map_ops *octeon_pci_dma_map_ops;
extern char *octeon_swiotlb; extern char *octeon_swiotlb;
#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */ #endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
...@@ -88,7 +88,7 @@ extern struct plat_smp_ops nlm_smp_ops; ...@@ -88,7 +88,7 @@ extern struct plat_smp_ops nlm_smp_ops;
extern char nlm_reset_entry[], nlm_reset_entry_end[]; extern char nlm_reset_entry[], nlm_reset_entry_end[];
/* SWIOTLB */ /* SWIOTLB */
extern struct dma_map_ops nlm_swiotlb_dma_ops; extern const struct dma_map_ops nlm_swiotlb_dma_ops;
extern unsigned int nlm_threads_per_core; extern unsigned int nlm_threads_per_core;
extern cpumask_t nlm_cpumask; extern cpumask_t nlm_cpumask;
......
...@@ -122,7 +122,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) ...@@ -122,7 +122,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr; return daddr;
} }
static struct dma_map_ops loongson_dma_map_ops = { static const struct dma_map_ops loongson_dma_map_ops = {
.alloc = loongson_dma_alloc_coherent, .alloc = loongson_dma_alloc_coherent,
.free = loongson_dma_free_coherent, .free = loongson_dma_free_coherent,
.map_page = loongson_dma_map_page, .map_page = loongson_dma_map_page,
......
...@@ -417,7 +417,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, ...@@ -417,7 +417,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
EXPORT_SYMBOL(dma_cache_sync); EXPORT_SYMBOL(dma_cache_sync);
static struct dma_map_ops mips_default_dma_map_ops = { static const struct dma_map_ops mips_default_dma_map_ops = {
.alloc = mips_dma_alloc_coherent, .alloc = mips_dma_alloc_coherent,
.free = mips_dma_free_coherent, .free = mips_dma_free_coherent,
.mmap = mips_dma_mmap, .mmap = mips_dma_mmap,
...@@ -433,7 +433,7 @@ static struct dma_map_ops mips_default_dma_map_ops = { ...@@ -433,7 +433,7 @@ static struct dma_map_ops mips_default_dma_map_ops = {
.dma_supported = mips_dma_supported .dma_supported = mips_dma_supported
}; };
struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
EXPORT_SYMBOL(mips_dma_map_ops); EXPORT_SYMBOL(mips_dma_map_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
......
...@@ -67,7 +67,7 @@ static void nlm_dma_free_coherent(struct device *dev, size_t size, ...@@ -67,7 +67,7 @@ static void nlm_dma_free_coherent(struct device *dev, size_t size,
swiotlb_free_coherent(dev, size, vaddr, dma_handle); swiotlb_free_coherent(dev, size, vaddr, dma_handle);
} }
struct dma_map_ops nlm_swiotlb_dma_ops = { const struct dma_map_ops nlm_swiotlb_dma_ops = {
.alloc = nlm_dma_alloc_coherent, .alloc = nlm_dma_alloc_coherent,
.free = nlm_dma_free_coherent, .free = nlm_dma_free_coherent,
.map_page = swiotlb_map_page, .map_page = swiotlb_map_page,
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/io.h> #include <asm/io.h>
extern struct dma_map_ops mn10300_dma_ops; extern const struct dma_map_ops mn10300_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &mn10300_dma_ops; return &mn10300_dma_ops;
} }
......
...@@ -121,7 +121,7 @@ static int mn10300_dma_supported(struct device *dev, u64 mask) ...@@ -121,7 +121,7 @@ static int mn10300_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
struct dma_map_ops mn10300_dma_ops = { const struct dma_map_ops mn10300_dma_ops = {
.alloc = mn10300_dma_alloc, .alloc = mn10300_dma_alloc,
.free = mn10300_dma_free, .free = mn10300_dma_free,
.map_page = mn10300_dma_map_page, .map_page = mn10300_dma_map_page,
......
...@@ -10,9 +10,9 @@ ...@@ -10,9 +10,9 @@
#ifndef _ASM_NIOS2_DMA_MAPPING_H #ifndef _ASM_NIOS2_DMA_MAPPING_H
#define _ASM_NIOS2_DMA_MAPPING_H #define _ASM_NIOS2_DMA_MAPPING_H
extern struct dma_map_ops nios2_dma_ops; extern const struct dma_map_ops nios2_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &nios2_dma_ops; return &nios2_dma_ops;
} }
......
...@@ -192,7 +192,7 @@ static void nios2_dma_sync_sg_for_device(struct device *dev, ...@@ -192,7 +192,7 @@ static void nios2_dma_sync_sg_for_device(struct device *dev,
} }
struct dma_map_ops nios2_dma_ops = { const struct dma_map_ops nios2_dma_ops = {
.alloc = nios2_dma_alloc, .alloc = nios2_dma_alloc,
.free = nios2_dma_free, .free = nios2_dma_free,
.map_page = nios2_dma_map_page, .map_page = nios2_dma_map_page,
......
...@@ -28,9 +28,9 @@ ...@@ -28,9 +28,9 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops or1k_dma_map_ops; extern const struct dma_map_ops or1k_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &or1k_dma_map_ops; return &or1k_dma_map_ops;
} }
......
...@@ -232,7 +232,7 @@ or1k_sync_single_for_device(struct device *dev, ...@@ -232,7 +232,7 @@ or1k_sync_single_for_device(struct device *dev,
mtspr(SPR_DCBFR, cl); mtspr(SPR_DCBFR, cl);
} }
struct dma_map_ops or1k_dma_map_ops = { const struct dma_map_ops or1k_dma_map_ops = {
.alloc = or1k_dma_alloc, .alloc = or1k_dma_alloc,
.free = or1k_dma_free, .free = or1k_dma_free,
.map_page = or1k_map_page, .map_page = or1k_map_page,
......
...@@ -21,13 +21,13 @@ ...@@ -21,13 +21,13 @@
*/ */
#ifdef CONFIG_PA11 #ifdef CONFIG_PA11
extern struct dma_map_ops pcxl_dma_ops; extern const struct dma_map_ops pcxl_dma_ops;
extern struct dma_map_ops pcx_dma_ops; extern const struct dma_map_ops pcx_dma_ops;
#endif #endif
extern struct dma_map_ops *hppa_dma_ops; extern const struct dma_map_ops *hppa_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return hppa_dma_ops; return hppa_dma_ops;
} }
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <asm/parisc-device.h> #include <asm/parisc-device.h>
/* See comments in include/asm-parisc/pci.h */ /* See comments in include/asm-parisc/pci.h */
struct dma_map_ops *hppa_dma_ops __read_mostly; const struct dma_map_ops *hppa_dma_ops __read_mostly;
EXPORT_SYMBOL(hppa_dma_ops); EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = { static struct device root = {
......
...@@ -572,7 +572,7 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist * ...@@ -572,7 +572,7 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
flush_kernel_vmap_range(sg_virt(sg), sg->length); flush_kernel_vmap_range(sg_virt(sg), sg->length);
} }
struct dma_map_ops pcxl_dma_ops = { const struct dma_map_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported, .dma_supported = pa11_dma_supported,
.alloc = pa11_dma_alloc, .alloc = pa11_dma_alloc,
.free = pa11_dma_free, .free = pa11_dma_free,
...@@ -608,7 +608,7 @@ static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, ...@@ -608,7 +608,7 @@ static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
return; return;
} }
struct dma_map_ops pcx_dma_ops = { const struct dma_map_ops pcx_dma_ops = {
.dma_supported = pa11_dma_supported, .dma_supported = pa11_dma_supported,
.alloc = pcx_dma_alloc, .alloc = pcx_dma_alloc,
.free = pcx_dma_free, .free = pcx_dma_free,
......
...@@ -21,7 +21,7 @@ struct iommu_table; ...@@ -21,7 +21,7 @@ struct iommu_table;
*/ */
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */ /* DMA operations on that device */
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
/* /*
* These two used to be a union. However, with the hybrid ops we need * These two used to be a union. However, with the hybrid ops we need
......
...@@ -76,9 +76,9 @@ static inline unsigned long device_to_mask(struct device *dev) ...@@ -76,9 +76,9 @@ static inline unsigned long device_to_mask(struct device *dev)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
extern struct dma_map_ops dma_iommu_ops; extern struct dma_map_ops dma_iommu_ops;
#endif #endif
extern struct dma_map_ops dma_direct_ops; extern const struct dma_map_ops dma_direct_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
/* We don't handle the NULL dev case for ISA for now. We could /* We don't handle the NULL dev case for ISA for now. We could
* do it via an out of line call but it is not needed for now. The * do it via an out of line call but it is not needed for now. The
...@@ -91,7 +91,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) ...@@ -91,7 +91,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dev->archdata.dma_ops; return dev->archdata.dma_ops;
} }
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{ {
dev->archdata.dma_ops = ops; dev->archdata.dma_ops = ops;
} }
......
...@@ -53,8 +53,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) ...@@ -53,8 +53,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
} }
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
extern struct dma_map_ops *get_pci_dma_ops(void); extern const struct dma_map_ops *get_pci_dma_ops(void);
#else /* CONFIG_PCI */ #else /* CONFIG_PCI */
#define set_pci_dma_ops(d) #define set_pci_dma_ops(d)
#define get_pci_dma_ops() NULL #define get_pci_dma_ops() NULL
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
extern struct dma_map_ops swiotlb_dma_ops; extern const struct dma_map_ops swiotlb_dma_ops;
static inline void dma_mark_clean(void *addr, size_t size) {} static inline void dma_mark_clean(void *addr, size_t size) {}
......
...@@ -46,7 +46,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev) ...@@ -46,7 +46,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
* map_page, and unmap_page on highmem, use normal dma_ops * map_page, and unmap_page on highmem, use normal dma_ops
* for everything else. * for everything else.
*/ */
struct dma_map_ops swiotlb_dma_ops = { const struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_direct_alloc_coherent, .alloc = __dma_direct_alloc_coherent,
.free = __dma_direct_free_coherent, .free = __dma_direct_free_coherent,
.mmap = dma_direct_mmap_coherent, .mmap = dma_direct_mmap_coherent,
......
...@@ -274,7 +274,7 @@ static inline void dma_direct_sync_single(struct device *dev, ...@@ -274,7 +274,7 @@ static inline void dma_direct_sync_single(struct device *dev,
} }
#endif #endif
struct dma_map_ops dma_direct_ops = { const struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent, .alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent, .free = dma_direct_free_coherent,
.mmap = dma_direct_mmap_coherent, .mmap = dma_direct_mmap_coherent,
...@@ -316,7 +316,7 @@ EXPORT_SYMBOL(dma_set_coherent_mask); ...@@ -316,7 +316,7 @@ EXPORT_SYMBOL(dma_set_coherent_mask);
int __dma_set_mask(struct device *dev, u64 dma_mask) int __dma_set_mask(struct device *dev, u64 dma_mask)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); const struct dma_map_ops *dma_ops = get_dma_ops(dev);
if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
return dma_ops->set_dma_mask(dev, dma_mask); return dma_ops->set_dma_mask(dev, dma_mask);
...@@ -344,7 +344,7 @@ EXPORT_SYMBOL(dma_set_mask); ...@@ -344,7 +344,7 @@ EXPORT_SYMBOL(dma_set_mask);
u64 __dma_get_required_mask(struct device *dev) u64 __dma_get_required_mask(struct device *dev)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); const struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (unlikely(dma_ops == NULL)) if (unlikely(dma_ops == NULL))
return 0; return 0;
......
...@@ -59,14 +59,14 @@ resource_size_t isa_mem_base; ...@@ -59,14 +59,14 @@ resource_size_t isa_mem_base;
EXPORT_SYMBOL(isa_mem_base); EXPORT_SYMBOL(isa_mem_base);
static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
void set_pci_dma_ops(struct dma_map_ops *dma_ops) void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
{ {
pci_dma_ops = dma_ops; pci_dma_ops = dma_ops;
} }
struct dma_map_ops *get_pci_dma_ops(void) const struct dma_map_ops *get_pci_dma_ops(void)
{ {
return pci_dma_ops; return pci_dma_ops;
} }
......
...@@ -651,7 +651,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask) ...@@ -651,7 +651,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
static struct dma_map_ops dma_iommu_fixed_ops = { static const struct dma_map_ops dma_iommu_fixed_ops = {
.alloc = dma_fixed_alloc_coherent, .alloc = dma_fixed_alloc_coherent,
.free = dma_fixed_free_coherent, .free = dma_fixed_free_coherent,
.map_sg = dma_fixed_map_sg, .map_sg = dma_fixed_map_sg,
...@@ -1172,7 +1172,7 @@ __setup("iommu_fixed=", setup_iommu_fixed); ...@@ -1172,7 +1172,7 @@ __setup("iommu_fixed=", setup_iommu_fixed);
static u64 cell_dma_get_required_mask(struct device *dev) static u64 cell_dma_get_required_mask(struct device *dev)
{ {
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
if (!dev->dma_mask) if (!dev->dma_mask)
return 0; return 0;
......
...@@ -115,7 +115,7 @@ static u64 dma_npu_get_required_mask(struct device *dev) ...@@ -115,7 +115,7 @@ static u64 dma_npu_get_required_mask(struct device *dev)
return 0; return 0;
} }
static struct dma_map_ops dma_npu_ops = { static const struct dma_map_ops dma_npu_ops = {
.map_page = dma_npu_map_page, .map_page = dma_npu_map_page,
.map_sg = dma_npu_map_sg, .map_sg = dma_npu_map_sg,
.alloc = dma_npu_alloc, .alloc = dma_npu_alloc,
......
...@@ -701,7 +701,7 @@ static u64 ps3_dma_get_required_mask(struct device *_dev) ...@@ -701,7 +701,7 @@ static u64 ps3_dma_get_required_mask(struct device *_dev)
return DMA_BIT_MASK(32); return DMA_BIT_MASK(32);
} }
static struct dma_map_ops ps3_sb_dma_ops = { static const struct dma_map_ops ps3_sb_dma_ops = {
.alloc = ps3_alloc_coherent, .alloc = ps3_alloc_coherent,
.free = ps3_free_coherent, .free = ps3_free_coherent,
.map_sg = ps3_sb_map_sg, .map_sg = ps3_sb_map_sg,
...@@ -712,7 +712,7 @@ static struct dma_map_ops ps3_sb_dma_ops = { ...@@ -712,7 +712,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
.unmap_page = ps3_unmap_page, .unmap_page = ps3_unmap_page,
}; };
static struct dma_map_ops ps3_ioc0_dma_ops = { static const struct dma_map_ops ps3_ioc0_dma_ops = {
.alloc = ps3_alloc_coherent, .alloc = ps3_alloc_coherent,
.free = ps3_free_coherent, .free = ps3_free_coherent,
.map_sg = ps3_ioc0_map_sg, .map_sg = ps3_ioc0_map_sg,
......
...@@ -136,7 +136,7 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev) ...@@ -136,7 +136,7 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev)
return DMA_BIT_MASK(64); return DMA_BIT_MASK(64);
} }
static struct dma_map_ops ibmebus_dma_ops = { static const struct dma_map_ops ibmebus_dma_ops = {
.alloc = ibmebus_alloc_coherent, .alloc = ibmebus_alloc_coherent,
.free = ibmebus_free_coherent, .free = ibmebus_free_coherent,
.map_sg = ibmebus_map_sg, .map_sg = ibmebus_map_sg,
......
...@@ -615,7 +615,7 @@ static u64 vio_dma_get_required_mask(struct device *dev) ...@@ -615,7 +615,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
return dma_iommu_ops.get_required_mask(dev); return dma_iommu_ops.get_required_mask(dev);
} }
static struct dma_map_ops vio_dma_mapping_ops = { static const struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent, .alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent, .free = vio_dma_iommu_free_coherent,
.mmap = dma_direct_mmap_coherent, .mmap = dma_direct_mmap_coherent,
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* This file is released under the GPLv2 * This file is released under the GPLv2
*/ */
struct dev_archdata { struct dev_archdata {
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
}; };
struct pdev_archdata { struct pdev_archdata {
......
...@@ -10,9 +10,9 @@ ...@@ -10,9 +10,9 @@
#define DMA_ERROR_CODE (~(dma_addr_t) 0x0) #define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
extern struct dma_map_ops s390_pci_dma_ops; extern const struct dma_map_ops s390_pci_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops; return dev->archdata.dma_ops;
......
...@@ -650,7 +650,7 @@ static int __init dma_debug_do_init(void) ...@@ -650,7 +650,7 @@ static int __init dma_debug_do_init(void)
} }
fs_initcall(dma_debug_do_init); fs_initcall(dma_debug_do_init);
struct dma_map_ops s390_pci_dma_ops = { const struct dma_map_ops s390_pci_dma_ops = {
.alloc = s390_dma_alloc, .alloc = s390_dma_alloc,
.free = s390_dma_free, .free = s390_dma_free,
.map_sg = s390_dma_map_sg, .map_sg = s390_dma_map_sg,
......
#ifndef __ASM_SH_DMA_MAPPING_H #ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H #define __ASM_SH_DMA_MAPPING_H
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
extern void no_iommu_init(void); extern void no_iommu_init(void);
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return dma_ops; return dma_ops;
} }
......
...@@ -65,7 +65,7 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, ...@@ -65,7 +65,7 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
} }
#endif #endif
struct dma_map_ops nommu_dma_ops = { const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent, .alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent, .free = dma_generic_free_coherent,
.map_page = nommu_map_page, .map_page = nommu_map_page,
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#define PREALLOC_DMA_DEBUG_ENTRIES 4096 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
static int __init dma_init(void) static int __init dma_init(void)
......
...@@ -18,13 +18,13 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, ...@@ -18,13 +18,13 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
*/ */
} }
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
extern struct dma_map_ops *leon_dma_ops; extern const struct dma_map_ops *leon_dma_ops;
extern struct dma_map_ops pci32_dma_ops; extern const struct dma_map_ops pci32_dma_ops;
extern struct bus_type pci_bus_type; extern struct bus_type pci_bus_type;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
#ifdef CONFIG_SPARC_LEON #ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon) if (sparc_cpu_model == sparc_leon)
......
...@@ -741,7 +741,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, ...@@ -741,7 +741,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
static struct dma_map_ops sun4u_dma_ops = { static const struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent, .alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent, .free = dma_4u_free_coherent,
.map_page = dma_4u_map_page, .map_page = dma_4u_map_page,
...@@ -752,7 +752,7 @@ static struct dma_map_ops sun4u_dma_ops = { ...@@ -752,7 +752,7 @@ static struct dma_map_ops sun4u_dma_ops = {
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
}; };
struct dma_map_ops *dma_ops = &sun4u_dma_ops; const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
int dma_supported(struct device *dev, u64 device_mask) int dma_supported(struct device *dev, u64 device_mask)
......
...@@ -401,7 +401,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -401,7 +401,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
BUG(); BUG();
} }
static struct dma_map_ops sbus_dma_ops = { static const struct dma_map_ops sbus_dma_ops = {
.alloc = sbus_alloc_coherent, .alloc = sbus_alloc_coherent,
.free = sbus_free_coherent, .free = sbus_free_coherent,
.map_page = sbus_map_page, .map_page = sbus_map_page,
...@@ -637,7 +637,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * ...@@ -637,7 +637,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
} }
} }
struct dma_map_ops pci32_dma_ops = { const struct dma_map_ops pci32_dma_ops = {
.alloc = pci32_alloc_coherent, .alloc = pci32_alloc_coherent,
.free = pci32_free_coherent, .free = pci32_free_coherent,
.map_page = pci32_map_page, .map_page = pci32_map_page,
...@@ -652,10 +652,10 @@ struct dma_map_ops pci32_dma_ops = { ...@@ -652,10 +652,10 @@ struct dma_map_ops pci32_dma_ops = {
EXPORT_SYMBOL(pci32_dma_ops); EXPORT_SYMBOL(pci32_dma_ops);
/* leon re-uses pci32_dma_ops */ /* leon re-uses pci32_dma_ops */
struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
EXPORT_SYMBOL(leon_dma_ops); EXPORT_SYMBOL(leon_dma_ops);
struct dma_map_ops *dma_ops = &sbus_dma_ops; const struct dma_map_ops *dma_ops = &sbus_dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
......
...@@ -669,7 +669,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -669,7 +669,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
local_irq_restore(flags); local_irq_restore(flags);
} }
static struct dma_map_ops sun4v_dma_ops = { static const struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent, .alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent, .free = dma_4v_free_coherent,
.map_page = dma_4v_map_page, .map_page = dma_4v_map_page,
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */ /* DMA operations on that device */
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
/* Offset of the DMA address from the PA. */ /* Offset of the DMA address from the PA. */
dma_addr_t dma_offset; dma_addr_t dma_offset;
......
...@@ -24,12 +24,12 @@ ...@@ -24,12 +24,12 @@
#define ARCH_HAS_DMA_GET_REQUIRED_MASK #define ARCH_HAS_DMA_GET_REQUIRED_MASK
#endif #endif
extern struct dma_map_ops *tile_dma_map_ops; extern const struct dma_map_ops *tile_dma_map_ops;
extern struct dma_map_ops *gx_pci_dma_map_ops; extern const struct dma_map_ops *gx_pci_dma_map_ops;
extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; extern const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops; extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops; return dev->archdata.dma_ops;
...@@ -59,7 +59,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) ...@@ -59,7 +59,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
static inline void dma_mark_clean(void *addr, size_t size) {} static inline void dma_mark_clean(void *addr, size_t size) {}
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{ {
dev->archdata.dma_ops = ops; dev->archdata.dma_ops = ops;
} }
......
...@@ -329,7 +329,7 @@ tile_dma_supported(struct device *dev, u64 mask) ...@@ -329,7 +329,7 @@ tile_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
static struct dma_map_ops tile_default_dma_map_ops = { static const struct dma_map_ops tile_default_dma_map_ops = {
.alloc = tile_dma_alloc_coherent, .alloc = tile_dma_alloc_coherent,
.free = tile_dma_free_coherent, .free = tile_dma_free_coherent,
.map_page = tile_dma_map_page, .map_page = tile_dma_map_page,
...@@ -344,7 +344,7 @@ static struct dma_map_ops tile_default_dma_map_ops = { ...@@ -344,7 +344,7 @@ static struct dma_map_ops tile_default_dma_map_ops = {
.dma_supported = tile_dma_supported .dma_supported = tile_dma_supported
}; };
struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
EXPORT_SYMBOL(tile_dma_map_ops); EXPORT_SYMBOL(tile_dma_map_ops);
/* Generic PCI DMA mapping functions */ /* Generic PCI DMA mapping functions */
...@@ -516,7 +516,7 @@ tile_pci_dma_supported(struct device *dev, u64 mask) ...@@ -516,7 +516,7 @@ tile_pci_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
static struct dma_map_ops tile_pci_default_dma_map_ops = { static const struct dma_map_ops tile_pci_default_dma_map_ops = {
.alloc = tile_pci_dma_alloc_coherent, .alloc = tile_pci_dma_alloc_coherent,
.free = tile_pci_dma_free_coherent, .free = tile_pci_dma_free_coherent,
.map_page = tile_pci_dma_map_page, .map_page = tile_pci_dma_map_page,
...@@ -531,7 +531,7 @@ static struct dma_map_ops tile_pci_default_dma_map_ops = { ...@@ -531,7 +531,7 @@ static struct dma_map_ops tile_pci_default_dma_map_ops = {
.dma_supported = tile_pci_dma_supported .dma_supported = tile_pci_dma_supported
}; };
struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
EXPORT_SYMBOL(gx_pci_dma_map_ops); EXPORT_SYMBOL(gx_pci_dma_map_ops);
/* PCI DMA mapping functions for legacy PCI devices */ /* PCI DMA mapping functions for legacy PCI devices */
...@@ -552,7 +552,7 @@ static void tile_swiotlb_free_coherent(struct device *dev, size_t size, ...@@ -552,7 +552,7 @@ static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
swiotlb_free_coherent(dev, size, vaddr, dma_addr); swiotlb_free_coherent(dev, size, vaddr, dma_addr);
} }
static struct dma_map_ops pci_swiotlb_dma_ops = { static const struct dma_map_ops pci_swiotlb_dma_ops = {
.alloc = tile_swiotlb_alloc_coherent, .alloc = tile_swiotlb_alloc_coherent,
.free = tile_swiotlb_free_coherent, .free = tile_swiotlb_free_coherent,
.map_page = swiotlb_map_page, .map_page = swiotlb_map_page,
...@@ -567,7 +567,7 @@ static struct dma_map_ops pci_swiotlb_dma_ops = { ...@@ -567,7 +567,7 @@ static struct dma_map_ops pci_swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error, .mapping_error = swiotlb_dma_mapping_error,
}; };
static struct dma_map_ops pci_hybrid_dma_ops = { static const struct dma_map_ops pci_hybrid_dma_ops = {
.alloc = tile_swiotlb_alloc_coherent, .alloc = tile_swiotlb_alloc_coherent,
.free = tile_swiotlb_free_coherent, .free = tile_swiotlb_free_coherent,
.map_page = tile_pci_dma_map_page, .map_page = tile_pci_dma_map_page,
...@@ -582,18 +582,18 @@ static struct dma_map_ops pci_hybrid_dma_ops = { ...@@ -582,18 +582,18 @@ static struct dma_map_ops pci_hybrid_dma_ops = {
.dma_supported = tile_pci_dma_supported .dma_supported = tile_pci_dma_supported
}; };
struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
#else #else
struct dma_map_ops *gx_legacy_pci_dma_map_ops; const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
struct dma_map_ops *gx_hybrid_pci_dma_map_ops; const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
#endif #endif
EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
int dma_set_mask(struct device *dev, u64 mask) int dma_set_mask(struct device *dev, u64 mask)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); const struct dma_map_ops *dma_ops = get_dma_ops(dev);
/* /*
* For PCI devices with 64-bit DMA addressing capability, promote * For PCI devices with 64-bit DMA addressing capability, promote
...@@ -623,7 +623,7 @@ EXPORT_SYMBOL(dma_set_mask); ...@@ -623,7 +623,7 @@ EXPORT_SYMBOL(dma_set_mask);
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
int dma_set_coherent_mask(struct device *dev, u64 mask) int dma_set_coherent_mask(struct device *dev, u64 mask)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); const struct dma_map_ops *dma_ops = get_dma_ops(dev);
/* /*
* For PCI devices with 64-bit DMA addressing capability, promote * For PCI devices with 64-bit DMA addressing capability, promote
......
...@@ -21,9 +21,9 @@ ...@@ -21,9 +21,9 @@
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
extern struct dma_map_ops swiotlb_dma_map_ops; extern const struct dma_map_ops swiotlb_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &swiotlb_dma_map_ops; return &swiotlb_dma_map_ops;
} }
......
...@@ -31,7 +31,7 @@ static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, ...@@ -31,7 +31,7 @@ static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
swiotlb_free_coherent(dev, size, vaddr, dma_addr); swiotlb_free_coherent(dev, size, vaddr, dma_addr);
} }
struct dma_map_ops swiotlb_dma_map_ops = { const struct dma_map_ops swiotlb_dma_map_ops = {
.alloc = unicore_swiotlb_alloc_coherent, .alloc = unicore_swiotlb_alloc_coherent,
.free = unicore_swiotlb_free_coherent, .free = unicore_swiotlb_free_coherent,
.map_sg = swiotlb_map_sg_attrs, .map_sg = swiotlb_map_sg_attrs,
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
struct dev_archdata { struct dev_archdata {
#ifdef CONFIG_X86_DEV_DMA_OPS #ifdef CONFIG_X86_DEV_DMA_OPS
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
#endif #endif
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */ void *iommu; /* hook for IOMMU specific extension */
...@@ -13,7 +13,7 @@ struct dev_archdata { ...@@ -13,7 +13,7 @@ struct dev_archdata {
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
struct dma_domain { struct dma_domain {
struct list_head node; struct list_head node;
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
int domain_nr; int domain_nr;
}; };
void add_dma_domain(struct dma_domain *domain); void add_dma_domain(struct dma_domain *domain);
......
...@@ -25,9 +25,9 @@ extern int iommu_merge; ...@@ -25,9 +25,9 @@ extern int iommu_merge;
extern struct device x86_dma_fallback_dev; extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow; extern int panic_on_overflow;
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
#ifndef CONFIG_X86_DEV_DMA_OPS #ifndef CONFIG_X86_DEV_DMA_OPS
return dma_ops; return dma_ops;
......
#ifndef _ASM_X86_IOMMU_H #ifndef _ASM_X86_IOMMU_H
#define _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H
extern struct dma_map_ops nommu_dma_ops; extern const struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
extern int iommu_pass_through; extern int iommu_pass_through;
......
...@@ -695,7 +695,7 @@ static __init int init_amd_gatt(struct agp_kern_info *info) ...@@ -695,7 +695,7 @@ static __init int init_amd_gatt(struct agp_kern_info *info)
return -1; return -1;
} }
static struct dma_map_ops gart_dma_ops = { static const struct dma_map_ops gart_dma_ops = {
.map_sg = gart_map_sg, .map_sg = gart_map_sg,
.unmap_sg = gart_unmap_sg, .unmap_sg = gart_unmap_sg,
.map_page = gart_map_page, .map_page = gart_map_page,
......
...@@ -478,7 +478,7 @@ static void calgary_free_coherent(struct device *dev, size_t size, ...@@ -478,7 +478,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
} }
static struct dma_map_ops calgary_dma_ops = { static const struct dma_map_ops calgary_dma_ops = {
.alloc = calgary_alloc_coherent, .alloc = calgary_alloc_coherent,
.free = calgary_free_coherent, .free = calgary_free_coherent,
.map_sg = calgary_map_sg, .map_sg = calgary_map_sg,
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
static int forbid_dac __read_mostly; static int forbid_dac __read_mostly;
struct dma_map_ops *dma_ops = &nommu_dma_ops; const struct dma_map_ops *dma_ops = &nommu_dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
static int iommu_sac_force __read_mostly; static int iommu_sac_force __read_mostly;
...@@ -214,7 +214,7 @@ early_param("iommu", iommu_setup); ...@@ -214,7 +214,7 @@ early_param("iommu", iommu_setup);
int dma_supported(struct device *dev, u64 mask) int dma_supported(struct device *dev, u64 mask)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) { if (mask > 0xffffffff && forbid_dac > 0) {
......
...@@ -88,7 +88,7 @@ static void nommu_sync_sg_for_device(struct device *dev, ...@@ -88,7 +88,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
flush_write_buffers(); flush_write_buffers();
} }
struct dma_map_ops nommu_dma_ops = { const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent, .alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent, .free = dma_generic_free_coherent,
.map_sg = nommu_map_sg, .map_sg = nommu_map_sg,
......
...@@ -45,7 +45,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size, ...@@ -45,7 +45,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
} }
static struct dma_map_ops swiotlb_dma_ops = { static const struct dma_map_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error, .mapping_error = swiotlb_dma_mapping_error,
.alloc = x86_swiotlb_alloc_coherent, .alloc = x86_swiotlb_alloc_coherent,
.free = x86_swiotlb_free_coherent, .free = x86_swiotlb_free_coherent,
......
...@@ -179,7 +179,7 @@ static void *sta2x11_swiotlb_alloc_coherent(struct device *dev, ...@@ -179,7 +179,7 @@ static void *sta2x11_swiotlb_alloc_coherent(struct device *dev,
} }
/* We have our own dma_ops: the same as swiotlb but from alloc (above) */ /* We have our own dma_ops: the same as swiotlb but from alloc (above) */
static struct dma_map_ops sta2x11_dma_ops = { static const struct dma_map_ops sta2x11_dma_ops = {
.alloc = sta2x11_swiotlb_alloc_coherent, .alloc = sta2x11_swiotlb_alloc_coherent,
.free = x86_swiotlb_free_coherent, .free = x86_swiotlb_free_coherent,
.map_page = swiotlb_map_page, .map_page = swiotlb_map_page,
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
int xen_swiotlb __read_mostly; int xen_swiotlb __read_mostly;
static struct dma_map_ops xen_swiotlb_dma_ops = { static const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc = xen_swiotlb_alloc_coherent, .alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent, .free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
......
...@@ -10,7 +10,7 @@ struct dma_map_ops; ...@@ -10,7 +10,7 @@ struct dma_map_ops;
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */ /* DMA operations on that device */
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
}; };
struct pdev_archdata { struct pdev_archdata {
......
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops xtensa_dma_map_ops; extern const struct dma_map_ops xtensa_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops; return dev->archdata.dma_ops;
......
...@@ -249,7 +249,7 @@ int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -249,7 +249,7 @@ int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0; return 0;
} }
struct dma_map_ops xtensa_dma_map_ops = { const struct dma_map_ops xtensa_dma_map_ops = {
.alloc = xtensa_dma_alloc, .alloc = xtensa_dma_alloc,
.free = xtensa_dma_free, .free = xtensa_dma_free,
.map_page = xtensa_map_page, .map_page = xtensa_map_page,
......
...@@ -117,7 +117,7 @@ static const struct iommu_ops amd_iommu_ops; ...@@ -117,7 +117,7 @@ static const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier); static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1; int amd_iommu_max_glx_val = -1;
static struct dma_map_ops amd_iommu_dma_ops; static const struct dma_map_ops amd_iommu_dma_ops;
/* /*
* This struct contains device specific data for the IOMMU * This struct contains device specific data for the IOMMU
...@@ -2728,7 +2728,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) ...@@ -2728,7 +2728,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
return check_device(dev); return check_device(dev);
} }
static struct dma_map_ops amd_iommu_dma_ops = { static const struct dma_map_ops amd_iommu_dma_ops = {
.alloc = alloc_coherent, .alloc = alloc_coherent,
.free = free_coherent, .free = free_coherent,
.map_page = map_page, .map_page = map_page,
......
...@@ -143,7 +143,7 @@ static void mbus_release_dev(struct device *d) ...@@ -143,7 +143,7 @@ static void mbus_release_dev(struct device *d)
} }
struct mbus_device * struct mbus_device *
mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
struct mbus_hw_ops *hw_ops, int index, struct mbus_hw_ops *hw_ops, int index,
void __iomem *mmio_va) void __iomem *mmio_va)
{ {
......
...@@ -138,7 +138,7 @@ static void scif_release_dev(struct device *d) ...@@ -138,7 +138,7 @@ static void scif_release_dev(struct device *d)
} }
struct scif_hw_dev * struct scif_hw_dev *
scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
struct mic_mw *mmio, struct mic_mw *aper, void *dp, struct mic_mw *mmio, struct mic_mw *aper, void *dp,
void __iomem *rdp, struct dma_chan **chan, int num_chan, void __iomem *rdp, struct dma_chan **chan, int num_chan,
......
...@@ -113,7 +113,7 @@ int scif_register_driver(struct scif_driver *driver); ...@@ -113,7 +113,7 @@ int scif_register_driver(struct scif_driver *driver);
void scif_unregister_driver(struct scif_driver *driver); void scif_unregister_driver(struct scif_driver *driver);
struct scif_hw_dev * struct scif_hw_dev *
scif_register_device(struct device *pdev, int id, scif_register_device(struct device *pdev, int id,
struct dma_map_ops *dma_ops, const struct dma_map_ops *dma_ops,
struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
struct mic_mw *mmio, struct mic_mw *aper, struct mic_mw *mmio, struct mic_mw *aper,
void *dp, void __iomem *rdp, void *dp, void __iomem *rdp,
......
...@@ -154,7 +154,7 @@ vop_register_device(struct device *pdev, int id, ...@@ -154,7 +154,7 @@ vop_register_device(struct device *pdev, int id,
vdev->dev.parent = pdev; vdev->dev.parent = pdev;
vdev->id.device = id; vdev->id.device = id;
vdev->id.vendor = VOP_DEV_ANY_ID; vdev->id.vendor = VOP_DEV_ANY_ID;
vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops; vdev->dev.archdata.dma_ops = dma_ops;
vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask; vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
dma_set_mask(&vdev->dev, DMA_BIT_MASK(64)); dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
vdev->dev.release = vop_release_dev; vdev->dev.release = vop_release_dev;
......
...@@ -245,7 +245,7 @@ static void __mic_dma_unmap_sg(struct device *dev, ...@@ -245,7 +245,7 @@ static void __mic_dma_unmap_sg(struct device *dev,
dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir); dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir);
} }
static struct dma_map_ops __mic_dma_ops = { static const struct dma_map_ops __mic_dma_ops = {
.alloc = __mic_dma_alloc, .alloc = __mic_dma_alloc,
.free = __mic_dma_free, .free = __mic_dma_free,
.map_page = __mic_dma_map_page, .map_page = __mic_dma_map_page,
...@@ -344,7 +344,7 @@ mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, ...@@ -344,7 +344,7 @@ mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
mic_unmap_single(mdev, dma_addr, size); mic_unmap_single(mdev, dma_addr, size);
} }
static struct dma_map_ops mic_dma_ops = { static const struct dma_map_ops mic_dma_ops = {
.map_page = mic_dma_map_page, .map_page = mic_dma_map_page,
.unmap_page = mic_dma_unmap_page, .unmap_page = mic_dma_unmap_page,
}; };
......
...@@ -1011,7 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ...@@ -1011,7 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
} }
static struct dma_map_ops ccio_ops = { static const struct dma_map_ops ccio_ops = {
.dma_supported = ccio_dma_supported, .dma_supported = ccio_dma_supported,
.alloc = ccio_alloc, .alloc = ccio_alloc,
.free = ccio_free, .free = ccio_free,
......
...@@ -1069,7 +1069,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ...@@ -1069,7 +1069,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
} }
static struct dma_map_ops sba_ops = { static const struct dma_map_ops sba_ops = {
.dma_supported = sba_dma_supported, .dma_supported = sba_dma_supported,
.alloc = sba_alloc, .alloc = sba_alloc,
.free = sba_free, .free = sba_free,
......
...@@ -282,7 +282,7 @@ static struct device *to_vmd_dev(struct device *dev) ...@@ -282,7 +282,7 @@ static struct device *to_vmd_dev(struct device *dev)
return &vmd->dev->dev; return &vmd->dev->dev;
} }
static struct dma_map_ops *vmd_dma_ops(struct device *dev) static const struct dma_map_ops *vmd_dma_ops(struct device *dev)
{ {
return get_dma_ops(to_vmd_dev(dev)); return get_dma_ops(to_vmd_dev(dev));
} }
......
...@@ -127,7 +127,7 @@ struct dma_map_ops { ...@@ -127,7 +127,7 @@ struct dma_map_ops {
int is_phys; int is_phys;
}; };
extern struct dma_map_ops dma_noop_ops; extern const struct dma_map_ops dma_noop_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
...@@ -170,8 +170,8 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, ...@@ -170,8 +170,8 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
* dma dependent code. Code that depends on the dma-mapping * dma dependent code. Code that depends on the dma-mapping
* API needs to set 'depends on HAS_DMA' in its Kconfig * API needs to set 'depends on HAS_DMA' in its Kconfig
*/ */
extern struct dma_map_ops bad_dma_ops; extern const struct dma_map_ops bad_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return &bad_dma_ops; return &bad_dma_ops;
} }
...@@ -182,7 +182,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, ...@@ -182,7 +182,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr; dma_addr_t addr;
kmemcheck_mark_initialized(ptr, size); kmemcheck_mark_initialized(ptr, size);
...@@ -201,7 +201,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, ...@@ -201,7 +201,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page) if (ops->unmap_page)
...@@ -217,7 +217,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, ...@@ -217,7 +217,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, int nents, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
int i, ents; int i, ents;
struct scatterlist *s; struct scatterlist *s;
...@@ -235,7 +235,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg ...@@ -235,7 +235,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
int nents, enum dma_data_direction dir, int nents, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir); debug_dma_unmap_sg(dev, sg, nents, dir);
...@@ -249,7 +249,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, ...@@ -249,7 +249,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr; dma_addr_t addr;
kmemcheck_mark_initialized(page_address(page) + offset, size); kmemcheck_mark_initialized(page_address(page) + offset, size);
...@@ -265,7 +265,7 @@ static inline void dma_unmap_page_attrs(struct device *dev, ...@@ -265,7 +265,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page) if (ops->unmap_page)
...@@ -279,7 +279,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev, ...@@ -279,7 +279,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr; dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
...@@ -300,7 +300,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, ...@@ -300,7 +300,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_resource) if (ops->unmap_resource)
...@@ -312,7 +312,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, ...@@ -312,7 +312,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu) if (ops->sync_single_for_cpu)
...@@ -324,7 +324,7 @@ static inline void dma_sync_single_for_device(struct device *dev, ...@@ -324,7 +324,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, dma_addr_t addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device) if (ops->sync_single_for_device)
...@@ -364,7 +364,7 @@ static inline void ...@@ -364,7 +364,7 @@ static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir) int nelems, enum dma_data_direction dir)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_cpu) if (ops->sync_sg_for_cpu)
...@@ -376,7 +376,7 @@ static inline void ...@@ -376,7 +376,7 @@ static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir) int nelems, enum dma_data_direction dir)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_device) if (ops->sync_sg_for_device)
...@@ -421,7 +421,7 @@ static inline int ...@@ -421,7 +421,7 @@ static inline int
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size, unsigned long attrs) dma_addr_t dma_addr, size_t size, unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops); BUG_ON(!ops);
if (ops->mmap) if (ops->mmap)
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
...@@ -439,7 +439,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, ...@@ -439,7 +439,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
dma_addr_t dma_addr, size_t size, dma_addr_t dma_addr, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops); BUG_ON(!ops);
if (ops->get_sgtable) if (ops->get_sgtable)
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
...@@ -457,7 +457,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, ...@@ -457,7 +457,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr; void *cpu_addr;
BUG_ON(!ops); BUG_ON(!ops);
...@@ -479,7 +479,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size, ...@@ -479,7 +479,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle, void *cpu_addr, dma_addr_t dma_handle,
unsigned long attrs) unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops); BUG_ON(!ops);
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
...@@ -537,7 +537,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -537,7 +537,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#ifndef HAVE_ARCH_DMA_SUPPORTED #ifndef HAVE_ARCH_DMA_SUPPORTED
static inline int dma_supported(struct device *dev, u64 mask) static inline int dma_supported(struct device *dev, u64 mask)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (!ops) if (!ops)
return 0; return 0;
...@@ -550,7 +550,7 @@ static inline int dma_supported(struct device *dev, u64 mask) ...@@ -550,7 +550,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
#ifndef HAVE_ARCH_DMA_SET_MASK #ifndef HAVE_ARCH_DMA_SET_MASK
static inline int dma_set_mask(struct device *dev, u64 mask) static inline int dma_set_mask(struct device *dev, u64 mask)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (ops->set_dma_mask) if (ops->set_dma_mask)
return ops->set_dma_mask(dev, mask); return ops->set_dma_mask(dev, mask);
......
...@@ -90,7 +90,7 @@ struct mbus_hw_ops { ...@@ -90,7 +90,7 @@ struct mbus_hw_ops {
}; };
struct mbus_device * struct mbus_device *
mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
struct mbus_hw_ops *hw_ops, int index, struct mbus_hw_ops *hw_ops, int index,
void __iomem *mmio_va); void __iomem *mmio_va);
void mbus_unregister_device(struct mbus_device *mbdev); void mbus_unregister_device(struct mbus_device *mbdev);
......
...@@ -18,7 +18,7 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) ...@@ -18,7 +18,7 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return PARAVIRT_LAZY_NONE; return PARAVIRT_LAZY_NONE;
} }
extern struct dma_map_ops *xen_dma_ops; extern const struct dma_map_ops *xen_dma_ops;
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
void __init xen_early_init(void); void __init xen_early_init(void);
......
...@@ -64,7 +64,7 @@ static int dma_noop_supported(struct device *dev, u64 mask) ...@@ -64,7 +64,7 @@ static int dma_noop_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
struct dma_map_ops dma_noop_ops = { const struct dma_map_ops dma_noop_ops = {
.alloc = dma_noop_alloc, .alloc = dma_noop_alloc,
.free = dma_noop_free, .free = dma_noop_free,
.map_page = dma_noop_map_page, .map_page = dma_noop_map_page,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment