Commit 53c92d79 authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel

iommu: of: enforce const-ness of struct iommu_ops

As a set of driver-provided callbacks and static data, there is no
compelling reason for struct iommu_ops to be mutable in core code, so
enforce const-ness throughout.
Acked-by: default avatarThierry Reding <treding@nvidia.com>
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 3c3e8943
......@@ -118,7 +118,7 @@ static inline unsigned long dma_max_pfn(struct device *dev)
#define arch_setup_dma_ops arch_setup_dma_ops
extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent);
const struct iommu_ops *iommu, bool coherent);
#define arch_teardown_dma_ops arch_teardown_dma_ops
extern void arch_teardown_dma_ops(struct device *dev);
......
......@@ -2215,7 +2215,7 @@ static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
}
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu)
const struct iommu_ops *iommu)
{
struct dma_iommu_mapping *mapping;
......@@ -2253,7 +2253,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
#else
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu)
const struct iommu_ops *iommu)
{
return false;
}
......@@ -2270,7 +2270,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
const struct iommu_ops *iommu, bool coherent)
{
struct dma_map_ops *dma_ops;
......
......@@ -48,7 +48,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent);
const struct iommu_ops *iommu, bool coherent);
#define arch_setup_dma_ops arch_setup_dma_ops
#ifdef CONFIG_IOMMU_DMA
......
......@@ -979,13 +979,13 @@ void arch_teardown_dma_ops(struct device *dev)
#else
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu)
const struct iommu_ops *iommu)
{ }
#endif /* CONFIG_IOMMU_DMA */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
const struct iommu_ops *iommu, bool coherent)
{
if (!dev->archdata.dma_ops)
dev->archdata.dma_ops = &swiotlb_dma_ops;
......
......@@ -98,12 +98,12 @@ EXPORT_SYMBOL_GPL(of_get_dma_window);
struct of_iommu_node {
struct list_head list;
struct device_node *np;
struct iommu_ops *ops;
const struct iommu_ops *ops;
};
static LIST_HEAD(of_iommu_list);
static DEFINE_SPINLOCK(of_iommu_lock);
void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
{
struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
......@@ -119,10 +119,10 @@ void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
spin_unlock(&of_iommu_lock);
}
struct iommu_ops *of_iommu_get_ops(struct device_node *np)
const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
{
struct of_iommu_node *node;
struct iommu_ops *ops = NULL;
const struct iommu_ops *ops = NULL;
spin_lock(&of_iommu_lock);
list_for_each_entry(node, &of_iommu_list, list)
......@@ -134,12 +134,12 @@ struct iommu_ops *of_iommu_get_ops(struct device_node *np)
return ops;
}
struct iommu_ops *of_iommu_configure(struct device *dev,
const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
struct of_phandle_args iommu_spec;
struct device_node *np;
struct iommu_ops *ops = NULL;
const struct iommu_ops *ops = NULL;
int idx = 0;
/*
......
......@@ -88,7 +88,7 @@ void of_dma_configure(struct device *dev, struct device_node *np)
int ret;
bool coherent;
unsigned long offset;
struct iommu_ops *iommu;
const struct iommu_ops *iommu;
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
......
......@@ -514,7 +514,7 @@ extern u64 dma_get_required_mask(struct device *dev);
#ifndef arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, struct iommu_ops *iommu,
u64 size, const struct iommu_ops *iommu,
bool coherent) { }
#endif
......
......@@ -12,7 +12,7 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix,
size_t *size);
extern void of_iommu_init(void);
extern struct iommu_ops *of_iommu_configure(struct device *dev,
extern const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np);
#else
......@@ -25,7 +25,7 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
}
static inline void of_iommu_init(void) { }
static inline struct iommu_ops *of_iommu_configure(struct device *dev,
static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
return NULL;
......@@ -33,8 +33,8 @@ static inline struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */
void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
struct iommu_ops *of_iommu_get_ops(struct device_node *np);
void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops);
const struct iommu_ops *of_iommu_get_ops(struct device_node *np);
extern struct of_device_id __iommu_of_table;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment