Commit 5ace77e0 authored by Christoph Hellwig's avatar Christoph Hellwig

nios2: remove __ioremap

The cacheflag argument to __ioremap is always 0, so just implement
ioremap directly.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent dda85fba
...@@ -25,29 +25,17 @@ ...@@ -25,29 +25,17 @@
#define writew_relaxed(x, addr) writew(x, addr) #define writew_relaxed(x, addr) writew(x, addr)
#define writel_relaxed(x, addr) writel(x, addr) #define writel_relaxed(x, addr) writel(x, addr)
extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, void __iomem *ioremap(unsigned long physaddr, unsigned long size);
unsigned long cacheflag);
extern void __iounmap(void __iomem *addr); extern void __iounmap(void __iomem *addr);
static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
{
return __ioremap(physaddr, size, 0);
}
static inline void __iomem *ioremap_nocache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, 0);
}
static inline void iounmap(void __iomem *addr) static inline void iounmap(void __iomem *addr)
{ {
__iounmap(addr); __iounmap(addr);
} }
#define ioremap_nocache ioremap_nocache #define ioremap_nocache ioremap
#define ioremap_wc ioremap_nocache #define ioremap_wc ioremap
#define ioremap_wt ioremap_nocache #define ioremap_wt ioremap
/* Pages to physical address... */ /* Pages to physical address... */
#define page_to_phys(page) virt_to_phys(page_to_virt(page)) #define page_to_phys(page) virt_to_phys(page_to_virt(page))
......
...@@ -112,8 +112,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, ...@@ -112,8 +112,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
/* /*
* Map some physical address range into the kernel address space. * Map some physical address range into the kernel address space.
*/ */
void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
unsigned long cacheflag)
{ {
struct vm_struct *area; struct vm_struct *area;
unsigned long offset; unsigned long offset;
...@@ -139,15 +138,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -139,15 +138,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
return NULL; return NULL;
} }
/*
* Map uncached objects in the low part of address space to
* CONFIG_NIOS2_IO_REGION_BASE
*/
if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
IS_MAPPABLE_UNCACHEABLE(last_addr) &&
!(cacheflag & _PAGE_CACHED))
return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
/* Mappings have to be page-aligned */ /* Mappings have to be page-aligned */
offset = phys_addr & ~PAGE_MASK; offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK; phys_addr &= PAGE_MASK;
...@@ -158,14 +148,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -158,14 +148,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
if (!area) if (!area)
return NULL; return NULL;
addr = area->addr; addr = area->addr;
if (remap_area_pages((unsigned long) addr, phys_addr, size, if (remap_area_pages((unsigned long) addr, phys_addr, size, 0)) {
cacheflag)) {
vunmap(addr); vunmap(addr);
return NULL; return NULL;
} }
return (void __iomem *) (offset + (char *)addr); return (void __iomem *) (offset + (char *)addr);
} }
EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(ioremap);
/* /*
* __iounmap unmaps nearly everything, so be careful * __iounmap unmaps nearly everything, so be careful
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment