Commit 4a45b746 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: refactor ioremap vm area setup.

PPC32 and PPC64 are doing the same once SLAB is available.
Create a do_ioremap() function that calls get_vm_area and
do the mapping.

For PPC64, we add the 4K PFN hack sanity check to __ioremap_caller()
in order to avoid using __ioremap_at(). Other checks in __ioremap_at()
are irrelevant for __ioremap_caller().

On PPC64, VM area is allocated in the range [ioremap_bot ; IOREMAP_END]
On PPC32, VM area is allocated in the range [VMALLOC_START ; VMALLOC_END]

Lets define IOREMAP_START is ioremap_bot for PPC64, and alias
IOREMAP_START/END to VMALLOC_START/END on PPC32
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/42e7e36ad32e0fdf76692426cc642799c9f689b8.1566309263.git.christophe.leroy@c-s.fr
parent 191e4206
...@@ -165,6 +165,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); ...@@ -165,6 +165,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#define IOREMAP_TOP KVIRT_TOP #define IOREMAP_TOP KVIRT_TOP
#endif #endif
/* PPC32 shares vmalloc area with ioremap */
#define IOREMAP_START VMALLOC_START
#define IOREMAP_END VMALLOC_END
/* /*
* Just any arbitrary offset to the start of the vmalloc VM area: the * Just any arbitrary offset to the start of the vmalloc VM area: the
* current 16MB value just means that there will be a 64MB "hole" after the * current 16MB value just means that there will be a 64MB "hole" after the
......
...@@ -316,6 +316,7 @@ extern unsigned long pci_io_base; ...@@ -316,6 +316,7 @@ extern unsigned long pci_io_base;
#define PHB_IO_BASE (ISA_IO_END) #define PHB_IO_BASE (ISA_IO_END)
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END) #define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_IO_END) #define IOREMAP_END (KERN_IO_END)
/* Advertise special mapping type for AGP */ /* Advertise special mapping type for AGP */
......
...@@ -723,6 +723,8 @@ void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); ...@@ -723,6 +723,8 @@ void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
extern void iounmap(volatile void __iomem *addr); extern void iounmap(volatile void __iomem *addr);
int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot); int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot);
void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
pgprot_t prot, void *caller);
extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
pgprot_t prot, void *caller); pgprot_t prot, void *caller);
......
...@@ -93,6 +93,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); ...@@ -93,6 +93,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#define IOREMAP_TOP KVIRT_TOP #define IOREMAP_TOP KVIRT_TOP
#endif #endif
/* PPC32 shares vmalloc area with ioremap */
#define IOREMAP_START VMALLOC_START
#define IOREMAP_END VMALLOC_END
/* /*
* Just any arbitrary offset to the start of the vmalloc VM area: the * Just any arbitrary offset to the start of the vmalloc VM area: the
* current 16MB value just means that there will be a 64MB "hole" after the * current 16MB value just means that there will be a 64MB "hole" after the
......
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#define PHB_IO_BASE (ISA_IO_END) #define PHB_IO_BASE (ISA_IO_END)
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END) #define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
......
...@@ -80,3 +80,23 @@ int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t ...@@ -80,3 +80,23 @@ int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t
return 0; return 0;
} }
void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
pgprot_t prot, void *caller)
{
struct vm_struct *area;
int ret;
area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller);
if (area == NULL)
return NULL;
area->phys_addr = pa;
ret = ioremap_range((unsigned long)area->addr, pa, size, prot);
if (!ret)
return (void __iomem *)area->addr + offset;
free_vm_area(area);
return NULL;
}
...@@ -18,7 +18,7 @@ void __iomem * ...@@ -18,7 +18,7 @@ void __iomem *
__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
{ {
unsigned long v; unsigned long v;
phys_addr_t p; phys_addr_t p, offset;
int err; int err;
/* /*
...@@ -28,6 +28,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call ...@@ -28,6 +28,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
* (ioremap_bot records where we're up to). * (ioremap_bot records where we're up to).
*/ */
p = addr & PAGE_MASK; p = addr & PAGE_MASK;
offset = addr & ~PAGE_MASK;
size = PAGE_ALIGN(addr + size) - p; size = PAGE_ALIGN(addr + size) - p;
/* /*
...@@ -62,12 +63,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call ...@@ -62,12 +63,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
goto out; goto out;
if (slab_is_available()) { if (slab_is_available()) {
struct vm_struct *area; return do_ioremap(p, offset, size, prot, caller);
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (area == 0)
return NULL;
area->phys_addr = p;
v = (unsigned long)area->addr;
} else { } else {
v = (ioremap_bot -= size); v = (ioremap_bot -= size);
} }
...@@ -77,11 +73,8 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call ...@@ -77,11 +73,8 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
*/ */
err = ioremap_range((unsigned long)v, p, size, prot); err = ioremap_range((unsigned long)v, p, size, prot);
if (err) { if (err)
if (slab_is_available())
vunmap((void *)v);
return NULL; return NULL;
}
out: out:
return (void __iomem *)(v + ((unsigned long)addr & ~PAGE_MASK)); return (void __iomem *)(v + ((unsigned long)addr & ~PAGE_MASK));
......
...@@ -46,9 +46,13 @@ EXPORT_SYMBOL(__iounmap_at); ...@@ -46,9 +46,13 @@ EXPORT_SYMBOL(__iounmap_at);
void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size, void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
pgprot_t prot, void *caller) pgprot_t prot, void *caller)
{ {
phys_addr_t paligned; phys_addr_t paligned, offset;
void __iomem *ret; void __iomem *ret;
/* We don't support the 4K PFN hack with ioremap */
if (pgprot_val(prot) & H_PAGE_4K_PFN)
return NULL;
/* /*
* Choose an address to map it to. Once the vmalloc system is running, * Choose an address to map it to. Once the vmalloc system is running,
* we use it. Before that, we map using addresses going up from * we use it. Before that, we map using addresses going up from
...@@ -56,21 +60,14 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size, ...@@ -56,21 +60,14 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
* through ioremap_bot. * through ioremap_bot.
*/ */
paligned = addr & PAGE_MASK; paligned = addr & PAGE_MASK;
offset = addr & ~PAGE_MASK;
size = PAGE_ALIGN(addr + size) - paligned; size = PAGE_ALIGN(addr + size) - paligned;
if (size == 0 || paligned == 0) if (size == 0 || paligned == 0)
return NULL; return NULL;
if (slab_is_available()) { if (slab_is_available()) {
struct vm_struct *area; return do_ioremap(paligned, offset, size, prot, caller);
area = __get_vm_area_caller(size, VM_IOREMAP, ioremap_bot,
IOREMAP_END, caller);
if (area == NULL)
return NULL;
area->phys_addr = paligned;
ret = __ioremap_at(paligned, area->addr, size, prot);
} else { } else {
ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot); ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment