Commit d7bca919 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

mm: Introduce vmap_page_range() to map pages in PCI address space

ioremap_page_range() should be used for ranges within vmalloc range only.
The vmalloc ranges are allocated by get_vm_area(). PCI has "resource"
allocator that manages PCI_IOBASE, IO_SPACE_LIMIT address range, hence
introduce vmap_page_range() to be used exclusively to map pages
in PCI address space.

Fixes: 3e49a866 ("mm: Enforce VM_IOREMAP flag and range in ioremap_page_range.")
Reported-by: default avatarMiguel Ojeda <ojeda@kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarMiguel Ojeda <ojeda@kernel.org>
Link: https://lore.kernel.org/bpf/CANiq72ka4rir+RTN2FQoT=Vvprp_Ao-CvoYEkSNqtSY+RZj+AA@mail.gmail.com
parent 96b0f5ad
...@@ -110,7 +110,7 @@ void __init add_static_vm_early(struct static_vm *svm) ...@@ -110,7 +110,7 @@ void __init add_static_vm_early(struct static_vm *svm)
int ioremap_page(unsigned long virt, unsigned long phys, int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype) const struct mem_type *mtype)
{ {
return ioremap_page_range(virt, virt + PAGE_SIZE, phys, return vmap_page_range(virt, virt + PAGE_SIZE, phys,
__pgprot(mtype->prot_pte)); __pgprot(mtype->prot_pte));
} }
EXPORT_SYMBOL(ioremap_page); EXPORT_SYMBOL(ioremap_page);
...@@ -466,7 +466,7 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) ...@@ -466,7 +466,7 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
if (res->end > IO_SPACE_LIMIT) if (res->end > IO_SPACE_LIMIT)
return -EINVAL; return -EINVAL;
return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte)); __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
} }
EXPORT_SYMBOL(pci_remap_iospace); EXPORT_SYMBOL(pci_remap_iospace);
......
...@@ -490,7 +490,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, ...@@ -490,7 +490,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
} }
vaddr = (unsigned long)(PCI_IOBASE + range->io_start); vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL)); vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
return 0; return 0;
} }
......
...@@ -180,7 +180,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_ ...@@ -180,7 +180,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
vaddr = PCI_IOBASE + range->io_start; vaddr = PCI_IOBASE + range->io_start;
ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL)); vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
return 0; return 0;
} }
......
...@@ -46,7 +46,7 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size) ...@@ -46,7 +46,7 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size)
WARN_ON_ONCE(size & ~PAGE_MASK); WARN_ON_ONCE(size & ~PAGE_MASK);
if (slab_is_available()) { if (slab_is_available()) {
if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa, if (vmap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
pgprot_noncached(PAGE_KERNEL))) pgprot_noncached(PAGE_KERNEL)))
vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size); vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
} else { } else {
......
...@@ -4353,7 +4353,7 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) ...@@ -4353,7 +4353,7 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
if (res->end > IO_SPACE_LIMIT) if (res->end > IO_SPACE_LIMIT)
return -EINVAL; return -EINVAL;
return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
pgprot_device(PAGE_KERNEL)); pgprot_device(PAGE_KERNEL));
#else #else
/* /*
......
...@@ -23,12 +23,19 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count); ...@@ -23,12 +23,19 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end, int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot); phys_addr_t phys_addr, pgprot_t prot);
int vmap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot);
#else #else
static inline int ioremap_page_range(unsigned long addr, unsigned long end, static inline int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot) phys_addr_t phys_addr, pgprot_t prot)
{ {
return 0; return 0;
} }
static inline int vmap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
return 0;
}
#endif #endif
/* /*
......
...@@ -304,11 +304,24 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end, ...@@ -304,11 +304,24 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
return err; return err;
} }
int vmap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
int err;
err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
ioremap_max_page_shift);
flush_cache_vmap(addr, end);
if (!err)
err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
ioremap_max_page_shift);
return err;
}
int ioremap_page_range(unsigned long addr, unsigned long end, int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot) phys_addr_t phys_addr, pgprot_t prot)
{ {
struct vm_struct *area; struct vm_struct *area;
int err;
area = find_vm_area((void *)addr); area = find_vm_area((void *)addr);
if (!area || !(area->flags & VM_IOREMAP)) { if (!area || !(area->flags & VM_IOREMAP)) {
...@@ -322,13 +335,7 @@ int ioremap_page_range(unsigned long addr, unsigned long end, ...@@ -322,13 +335,7 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
(long)area->addr + get_vm_area_size(area)); (long)area->addr + get_vm_area_size(area));
return -ERANGE; return -ERANGE;
} }
err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), return vmap_page_range(addr, end, phys_addr, prot);
ioremap_max_page_shift);
flush_cache_vmap(addr, end);
if (!err)
err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
ioremap_max_page_shift);
return err;
} }
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment