Commit f381d571 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: Move ioremap functions out of pgtable_32/64.c

Create ioremap_32.c and ioremap_64.c and move respective ioremap
functions out of pgtable_32.c and pgtable_64.c

In the meantime, fix a few comments and changes a printk() to
pr_warn(). Also fix a few oversplitted lines.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b5c8b02ccefd4ede64c61b53cf64fb5dacb35740.1566309263.git.christophe.leroy@c-s.fr
parent 7cd9b317
......@@ -7,7 +7,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y := fault.o mem.o pgtable.o mmap.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
init-common.o mmu_context.o drmem.o
obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/
obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/
......
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <mm/mmu_decl.h>
void __iomem *ioremap_wt(phys_addr_t addr, unsigned long size)
{
pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL);
return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_wt);
void __iomem *
__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
{
unsigned long v, i;
phys_addr_t p;
int err;
/*
* Choose an address to map it to.
* Once the vmalloc system is running, we use it.
* Before then, we use space going down from IOREMAP_TOP
* (ioremap_bot records where we're up to).
*/
p = addr & PAGE_MASK;
size = PAGE_ALIGN(addr + size) - p;
/*
* If the address lies within the first 16 MB, assume it's in ISA
* memory space
*/
if (p < 16 * 1024 * 1024)
p += _ISA_MEM_BASE;
#ifndef CONFIG_CRASH_DUMP
/*
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*/
if (slab_is_available() && p <= virt_to_phys(high_memory - 1) &&
page_is_ram(__phys_to_pfn(p))) {
pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__,
(unsigned long long)p, __builtin_return_address(0));
return NULL;
}
#endif
if (size == 0)
return NULL;
/*
* Is it already mapped? Perhaps overlapped by a previous
* mapping.
*/
v = p_block_mapped(p);
if (v)
goto out;
if (slab_is_available()) {
struct vm_struct *area;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (area == 0)
return NULL;
area->phys_addr = p;
v = (unsigned long)area->addr;
} else {
v = (ioremap_bot -= size);
}
/*
* Should check if it is a candidate for a BAT mapping
*/
err = 0;
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
err = map_kernel_page(v + i, p + i, prot);
if (err) {
if (slab_is_available())
vunmap((void *)v);
return NULL;
}
out:
return (void __iomem *)(v + ((unsigned long)addr & ~PAGE_MASK));
}
void iounmap(volatile void __iomem *addr)
{
/*
* If mapped by BATs then there is nothing to do.
* Calling vfree() generates a benign warning.
*/
if (v_block_mapped((unsigned long)addr))
return;
if (addr > high_memory && (unsigned long)addr < ioremap_bot)
vunmap((void *)(PAGE_MASK & (unsigned long)addr));
}
EXPORT_SYMBOL(iounmap);
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
pgprot_t prot, int nid)
{
unsigned long i;
for (i = 0; i < size; i += PAGE_SIZE) {
int err = map_kernel_page(ea + i, pa + i, prot);
if (err) {
if (slab_is_available())
unmap_kernel_range(ea, size);
else
WARN_ON_ONCE(1); /* Should clean up */
return err;
}
}
return 0;
}
/**
* Low level function to establish the page tables for an IO mapping
*/
void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
{
/* We don't support the 4K PFN hack with ioremap */
if (pgprot_val(prot) & H_PAGE_4K_PFN)
return NULL;
if ((ea + size) >= (void *)IOREMAP_END) {
pr_warn("Outside the supported range\n");
return NULL;
}
WARN_ON(pa & ~PAGE_MASK);
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
WARN_ON(size & ~PAGE_MASK);
if (ioremap_range((unsigned long)ea, pa, size, prot, NUMA_NO_NODE))
return NULL;
return (void __iomem *)ea;
}
EXPORT_SYMBOL(__ioremap_at);
/**
* Low level function to tear down the page tables for an IO mapping. This is
* used for mappings that are manipulated manually, like partial unmapping of
* PCI IOs or ISA space.
*/
void __iounmap_at(void *ea, unsigned long size)
{
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
WARN_ON(size & ~PAGE_MASK);
unmap_kernel_range((unsigned long)ea, size);
}
EXPORT_SYMBOL(__iounmap_at);
void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
pgprot_t prot, void *caller)
{
phys_addr_t paligned;
void __iomem *ret;
/*
* Choose an address to map it to. Once the vmalloc system is running,
* we use it. Before that, we map using addresses going up from
* ioremap_bot. vmalloc will use the addresses from IOREMAP_BASE
* through ioremap_bot.
*/
paligned = addr & PAGE_MASK;
size = PAGE_ALIGN(addr + size) - paligned;
if (size == 0 || paligned == 0)
return NULL;
if (slab_is_available()) {
struct vm_struct *area;
area = __get_vm_area_caller(size, VM_IOREMAP, ioremap_bot,
IOREMAP_END, caller);
if (area == NULL)
return NULL;
area->phys_addr = paligned;
ret = __ioremap_at(paligned, area->addr, size, prot);
} else {
ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
if (ret)
ioremap_bot += size;
}
if (ret)
ret += addr & ~PAGE_MASK;
return ret;
}
/*
* Unmap an IO region and remove it from vmalloc'd list.
* Access to IO memory should be serialized by driver.
*/
void iounmap(volatile void __iomem *token)
{
void *addr;
if (!slab_is_available())
return;
addr = (void *)((unsigned long __force)PCI_FIX_ADDR(token) & PAGE_MASK);
if ((unsigned long)addr < ioremap_bot) {
pr_warn("Attempt to iounmap early bolted mapping at 0x%p\n", addr);
return;
}
vunmap(addr);
}
EXPORT_SYMBOL(iounmap);
......@@ -27,7 +27,6 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/sections.h>
......@@ -35,104 +34,6 @@
extern char etext[], _stext[], _sinittext[], _einittext[];
void __iomem *
ioremap_wt(phys_addr_t addr, unsigned long size)
{
pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL);
return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_wt);
void __iomem *
__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
{
unsigned long v, i;
phys_addr_t p;
int err;
/*
* Choose an address to map it to.
* Once the vmalloc system is running, we use it.
* Before then, we use space going down from IOREMAP_TOP
* (ioremap_bot records where we're up to).
*/
p = addr & PAGE_MASK;
size = PAGE_ALIGN(addr + size) - p;
/*
* If the address lies within the first 16 MB, assume it's in ISA
* memory space
*/
if (p < 16*1024*1024)
p += _ISA_MEM_BASE;
#ifndef CONFIG_CRASH_DUMP
/*
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*/
if (slab_is_available() && p <= virt_to_phys(high_memory - 1) &&
page_is_ram(__phys_to_pfn(p))) {
pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__,
(unsigned long long)p, __builtin_return_address(0));
return NULL;
}
#endif
if (size == 0)
return NULL;
/*
* Is it already mapped? Perhaps overlapped by a previous
* mapping.
*/
v = p_block_mapped(p);
if (v)
goto out;
if (slab_is_available()) {
struct vm_struct *area;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (area == 0)
return NULL;
area->phys_addr = p;
v = (unsigned long) area->addr;
} else {
v = (ioremap_bot -= size);
}
/*
* Should check if it is a candidate for a BAT mapping
*/
err = 0;
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
err = map_kernel_page(v + i, p + i, prot);
if (err) {
if (slab_is_available())
vunmap((void *)v);
return NULL;
}
out:
return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
void iounmap(volatile void __iomem *addr)
{
/*
* If mapped by BATs then there is nothing to do.
* Calling vfree() generates a benign warning.
*/
if (v_block_mapped((unsigned long)addr))
return;
if (addr > high_memory && (unsigned long) addr < ioremap_bot)
vunmap((void *) (PAGE_MASK & (unsigned long)addr));
}
EXPORT_SYMBOL(iounmap);
static void __init *early_alloc_pgtable(unsigned long size)
{
void *ptr = memblock_alloc(size, size);
......
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* This file contains ioremap and related functions for 64-bit machines.
* This file contains pgtable related functions for 64-bit machines.
*
* Derived from arch/ppc64/mm/init.c
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
......@@ -34,7 +34,6 @@
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
......@@ -100,131 +99,6 @@ unsigned long __pte_frag_size_shift;
EXPORT_SYMBOL(__pte_frag_size_shift);
#endif
int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
{
unsigned long i;
for (i = 0; i < size; i += PAGE_SIZE) {
int err = map_kernel_page(ea + i, pa + i, prot);
if (err) {
if (slab_is_available())
unmap_kernel_range(ea, size);
else
WARN_ON_ONCE(1); /* Should clean up */
return err;
}
}
return 0;
}
/**
* __ioremap_at - Low level function to establish the page tables
* for an IO mapping
*/
void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
{
/* We don't support the 4K PFN hack with ioremap */
if (pgprot_val(prot) & H_PAGE_4K_PFN)
return NULL;
if ((ea + size) >= (void *)IOREMAP_END) {
pr_warn("Outside the supported range\n");
return NULL;
}
WARN_ON(pa & ~PAGE_MASK);
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
WARN_ON(size & ~PAGE_MASK);
if (ioremap_range((unsigned long)ea, pa, size, prot, NUMA_NO_NODE))
return NULL;
return (void __iomem *)ea;
}
/**
* __iounmap_from - Low level function to tear down the page tables
* for an IO mapping. This is used for mappings that
* are manipulated manually, like partial unmapping of
* PCI IOs or ISA space.
*/
void __iounmap_at(void *ea, unsigned long size)
{
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
WARN_ON(size & ~PAGE_MASK);
unmap_kernel_range((unsigned long)ea, size);
}
void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
pgprot_t prot, void *caller)
{
phys_addr_t paligned;
void __iomem *ret;
/*
* Choose an address to map it to.
* Once the imalloc system is running, we use it.
* Before that, we map using addresses going
* up from ioremap_bot. imalloc will use
* the addresses from ioremap_bot through
* IMALLOC_END
*
*/
paligned = addr & PAGE_MASK;
size = PAGE_ALIGN(addr + size) - paligned;
if ((size == 0) || (paligned == 0))
return NULL;
if (slab_is_available()) {
struct vm_struct *area;
area = __get_vm_area_caller(size, VM_IOREMAP,
ioremap_bot, IOREMAP_END,
caller);
if (area == NULL)
return NULL;
area->phys_addr = paligned;
ret = __ioremap_at(paligned, area->addr, size, prot);
} else {
ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
if (ret)
ioremap_bot += size;
}
if (ret)
ret += addr & ~PAGE_MASK;
return ret;
}
/*
* Unmap an IO region and remove it from imalloc'd list.
* Access to IO memory should be serialized by driver.
*/
void iounmap(volatile void __iomem *token)
{
void *addr;
if (!slab_is_available())
return;
addr = (void *) ((unsigned long __force)
PCI_FIX_ADDR(token) & PAGE_MASK);
if ((unsigned long)addr < ioremap_bot) {
printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
" at 0x%p\n", addr);
return;
}
vunmap(addr);
}
EXPORT_SYMBOL(__ioremap_at);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__iounmap_at);
#ifndef __PAGETABLE_PUD_FOLDED
/* 4 level page table */
struct page *pgd_page(pgd_t pgd)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment