Commit 40f1ce7f authored by Anton Blanchard's avatar Anton Blanchard Committed by Benjamin Herrenschmidt

powerpc: Remove ioremap_flags

We have a confusing number of ioremap functions. Make things just a
bit simpler by merging ioremap_flags and ioremap_prot.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent be135f40
...@@ -624,9 +624,8 @@ static inline void iosync(void) ...@@ -624,9 +624,8 @@ static inline void iosync(void)
* * ioremap is the standard one and provides non-cacheable guarded mappings * * ioremap is the standard one and provides non-cacheable guarded mappings
* and can be hooked by the platform via ppc_md * and can be hooked by the platform via ppc_md
* *
* * ioremap_flags allows to specify the page flags as an argument and can * * ioremap_prot allows to specify the page flags as an argument and can
* also be hooked by the platform via ppc_md. ioremap_prot is the exact * also be hooked by the platform via ppc_md.
* same thing as ioremap_flags.
* *
* * ioremap_nocache is identical to ioremap * * ioremap_nocache is identical to ioremap
* *
...@@ -639,7 +638,7 @@ static inline void iosync(void) ...@@ -639,7 +638,7 @@ static inline void iosync(void)
* currently be hooked. Must be page aligned. * currently be hooked. Must be page aligned.
* *
* * __ioremap is the low level implementation used by ioremap and * * __ioremap is the low level implementation used by ioremap and
* ioremap_flags and cannot be hooked (but can be used by a hook on one * ioremap_prot and cannot be hooked (but can be used by a hook on one
* of the previous ones) * of the previous ones)
* *
* * __ioremap_caller is the same as above but takes an explicit caller * * __ioremap_caller is the same as above but takes an explicit caller
...@@ -650,11 +649,10 @@ static inline void iosync(void) ...@@ -650,11 +649,10 @@ static inline void iosync(void)
* *
*/ */
extern void __iomem *ioremap(phys_addr_t address, unsigned long size); extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size, extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
unsigned long flags); unsigned long flags);
extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
#define ioremap_nocache(addr, size) ioremap((addr), (size)) #define ioremap_nocache(addr, size) ioremap((addr), (size))
#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot))
extern void iounmap(volatile void __iomem *addr); extern void iounmap(volatile void __iomem *addr);
......
...@@ -9,11 +9,11 @@ ...@@ -9,11 +9,11 @@
#include <linux/device.h> /* devres_*(), devm_ioremap_release() */ #include <linux/device.h> /* devres_*(), devm_ioremap_release() */
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/io.h> /* ioremap_flags() */ #include <linux/io.h> /* ioremap_prot() */
#include <linux/module.h> /* EXPORT_SYMBOL() */ #include <linux/module.h> /* EXPORT_SYMBOL() */
/** /**
* devm_ioremap_prot - Managed ioremap_flags() * devm_ioremap_prot - Managed ioremap_prot()
* @dev: Generic device to remap IO address for * @dev: Generic device to remap IO address for
* @offset: BUS offset to map * @offset: BUS offset to map
* @size: Size of map * @size: Size of map
...@@ -31,7 +31,7 @@ void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset, ...@@ -31,7 +31,7 @@ void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset,
if (!ptr) if (!ptr)
return NULL; return NULL;
addr = ioremap_flags(offset, size, flags); addr = ioremap_prot(offset, size, flags);
if (addr) { if (addr) {
*ptr = addr; *ptr = addr;
devres_add(dev, ptr); devres_add(dev, ptr);
......
...@@ -141,7 +141,7 @@ ioremap_wc(phys_addr_t addr, unsigned long size) ...@@ -141,7 +141,7 @@ ioremap_wc(phys_addr_t addr, unsigned long size)
EXPORT_SYMBOL(ioremap_wc); EXPORT_SYMBOL(ioremap_wc);
void __iomem * void __iomem *
ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
{ {
/* writeable implies dirty for kernel addresses */ /* writeable implies dirty for kernel addresses */
if (flags & _PAGE_RW) if (flags & _PAGE_RW)
...@@ -160,7 +160,7 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) ...@@ -160,7 +160,7 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
} }
EXPORT_SYMBOL(ioremap_flags); EXPORT_SYMBOL(ioremap_prot);
void __iomem * void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
......
...@@ -265,7 +265,7 @@ void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) ...@@ -265,7 +265,7 @@ void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
return __ioremap_caller(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller);
} }
void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
unsigned long flags) unsigned long flags)
{ {
void *caller = __builtin_return_address(0); void *caller = __builtin_return_address(0);
...@@ -322,7 +322,7 @@ void iounmap(volatile void __iomem *token) ...@@ -322,7 +322,7 @@ void iounmap(volatile void __iomem *token)
EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap);
EXPORT_SYMBOL(ioremap_wc); EXPORT_SYMBOL(ioremap_wc);
EXPORT_SYMBOL(ioremap_flags); EXPORT_SYMBOL(ioremap_prot);
EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(__ioremap_at); EXPORT_SYMBOL(__ioremap_at);
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
......
...@@ -197,7 +197,7 @@ static void spu_unmap(struct spu *spu) ...@@ -197,7 +197,7 @@ static void spu_unmap(struct spu *spu)
* The current HV requires the spu shadow regs to be mapped with the * The current HV requires the spu shadow regs to be mapped with the
* PTE page protection bits set as read-only (PP=3). This implementation * PTE page protection bits set as read-only (PP=3). This implementation
* uses the low level __ioremap() to bypass the page protection settings * uses the low level __ioremap() to bypass the page protection settings
* inforced by ioremap_flags() to get the needed PTE bits set for the * inforced by ioremap_prot() to get the needed PTE bits set for the
* shadow regs. * shadow regs.
*/ */
...@@ -214,7 +214,7 @@ static int __init setup_areas(struct spu *spu) ...@@ -214,7 +214,7 @@ static int __init setup_areas(struct spu *spu)
goto fail_ioremap; goto fail_ioremap;
} }
spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys, spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,
LS_SIZE, _PAGE_NO_CACHE); LS_SIZE, _PAGE_NO_CACHE);
if (!spu->local_store) { if (!spu->local_store) {
......
...@@ -216,7 +216,7 @@ static int axon_ram_probe(struct platform_device *device) ...@@ -216,7 +216,7 @@ static int axon_ram_probe(struct platform_device *device)
AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20); AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);
bank->ph_addr = resource.start; bank->ph_addr = resource.start;
bank->io_addr = (unsigned long) ioremap_flags( bank->io_addr = (unsigned long) ioremap_prot(
bank->ph_addr, bank->size, _PAGE_NO_CACHE); bank->ph_addr, bank->size, _PAGE_NO_CACHE);
if (bank->io_addr == 0) { if (bank->io_addr == 0) {
dev_err(&device->dev, "ioremap() failed\n"); dev_err(&device->dev, "ioremap() failed\n");
......
...@@ -106,10 +106,10 @@ int __init instantiate_cache_sram(struct platform_device *dev, ...@@ -106,10 +106,10 @@ int __init instantiate_cache_sram(struct platform_device *dev,
goto out_free; goto out_free;
} }
cache_sram->base_virt = ioremap_flags(cache_sram->base_phys, cache_sram->base_virt = ioremap_prot(cache_sram->base_phys,
cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL);
if (!cache_sram->base_virt) { if (!cache_sram->base_virt) {
dev_err(&dev->dev, "%s: ioremap_flags failed\n", dev_err(&dev->dev, "%s: ioremap_prot failed\n",
dev->dev.of_node->full_name); dev->dev.of_node->full_name);
ret = -ENOMEM; ret = -ENOMEM;
goto out_release; goto out_release;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment