Commit 37bda1da authored by Paul Mundt's avatar Paul Mundt

sh: Convert remaining remap_area_pages() users to ioremap_page_range().

A couple of these were missed.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 79890c51
......@@ -111,8 +111,9 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
vma->phys_addr = map->addr;
if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr,
map->size, flags)) {
if (ioremap_page_range((unsigned long)vma->addr,
(unsigned long)vma->addr + map->size,
vma->phys_addr, __pgprot(flags))) {
vunmap(vma->addr);
return -EAGAIN;
}
......@@ -176,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
ret = __sq_remap(map, flags);
ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
if (unlikely(ret != 0))
goto out;
......
......@@ -107,7 +107,7 @@ void __init p3_cache_init(void)
emit_cache_params();
if (remap_area_pages(P3SEG, 0, PAGE_SIZE * 4, _PAGE_CACHABLE))
if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
panic("%s failed.", __FUNCTION__);
for (i = 0; i < cpu_data->dcache.n_aliases; i++)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment