Commit f3551520 authored by Will Deacon's avatar Will Deacon

Merge branch 'x86/mm' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip...

Merge branch 'x86/mm' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into aarch64/for-next/core

Pull in core ioremap changes from -tip, since we depend on these for
re-enabling huge I/O mappings on arm64.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parents 693350a7 5e0fb5df
...@@ -977,12 +977,12 @@ int pmd_clear_huge(pmd_t *pmdp) ...@@ -977,12 +977,12 @@ int pmd_clear_huge(pmd_t *pmdp)
return 1; return 1;
} }
int pud_free_pmd_page(pud_t *pud) int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{ {
return pud_none(*pud); return pud_none(*pud);
} }
int pmd_free_pte_page(pmd_t *pmd) int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{ {
return pmd_none(*pmd); return pmd_none(*pmd);
} }
...@@ -719,28 +719,50 @@ int pmd_clear_huge(pmd_t *pmd) ...@@ -719,28 +719,50 @@ int pmd_clear_huge(pmd_t *pmd)
return 0; return 0;
} }
#ifdef CONFIG_X86_64
/** /**
* pud_free_pmd_page - Clear pud entry and free pmd page. * pud_free_pmd_page - Clear pud entry and free pmd page.
* @pud: Pointer to a PUD. * @pud: Pointer to a PUD.
* @addr: Virtual address associated with pud.
* *
* Context: The pud range has been unmaped and TLB purged. * Context: The pud range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise. * Return: 1 if clearing the entry succeeded. 0 otherwise.
*
* NOTE: Callers must allow a single page allocation.
*/ */
int pud_free_pmd_page(pud_t *pud) int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{ {
pmd_t *pmd; pmd_t *pmd, *pmd_sv;
pte_t *pte;
int i; int i;
if (pud_none(*pud)) if (pud_none(*pud))
return 1; return 1;
pmd = (pmd_t *)pud_page_vaddr(*pud); pmd = (pmd_t *)pud_page_vaddr(*pud);
pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
if (!pmd_sv)
return 0;
for (i = 0; i < PTRS_PER_PMD; i++) for (i = 0; i < PTRS_PER_PMD; i++) {
if (!pmd_free_pte_page(&pmd[i])) pmd_sv[i] = pmd[i];
return 0; if (!pmd_none(pmd[i]))
pmd_clear(&pmd[i]);
}
pud_clear(pud); pud_clear(pud);
/* INVLPG to clear all paging-structure caches */
flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
for (i = 0; i < PTRS_PER_PMD; i++) {
if (!pmd_none(pmd_sv[i])) {
pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
free_page((unsigned long)pte);
}
}
free_page((unsigned long)pmd_sv);
free_page((unsigned long)pmd); free_page((unsigned long)pmd);
return 1; return 1;
...@@ -749,11 +771,12 @@ int pud_free_pmd_page(pud_t *pud) ...@@ -749,11 +771,12 @@ int pud_free_pmd_page(pud_t *pud)
/** /**
* pmd_free_pte_page - Clear pmd entry and free pte page. * pmd_free_pte_page - Clear pmd entry and free pte page.
* @pmd: Pointer to a PMD. * @pmd: Pointer to a PMD.
* @addr: Virtual address associated with pmd.
* *
* Context: The pmd range has been unmaped and TLB purged. * Context: The pmd range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise. * Return: 1 if clearing the entry succeeded. 0 otherwise.
*/ */
int pmd_free_pte_page(pmd_t *pmd) int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{ {
pte_t *pte; pte_t *pte;
...@@ -762,8 +785,30 @@ int pmd_free_pte_page(pmd_t *pmd) ...@@ -762,8 +785,30 @@ int pmd_free_pte_page(pmd_t *pmd)
pte = (pte_t *)pmd_page_vaddr(*pmd); pte = (pte_t *)pmd_page_vaddr(*pmd);
pmd_clear(pmd); pmd_clear(pmd);
/* INVLPG to clear all paging-structure caches */
flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
free_page((unsigned long)pte); free_page((unsigned long)pte);
return 1; return 1;
} }
#else /* !CONFIG_X86_64 */
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return pud_none(*pud);
}
/*
* Disable free page handling on x86-PAE. This assures that ioremap()
* does not update sync'd pmd entries. See vmalloc_sync_one().
*/
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return pmd_none(*pmd);
}
#endif /* CONFIG_X86_64 */
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
...@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); ...@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud); int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd); int pmd_clear_huge(pmd_t *pmd);
int pud_free_pmd_page(pud_t *pud); int pud_free_pmd_page(pud_t *pud, unsigned long addr);
int pmd_free_pte_page(pmd_t *pmd); int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{ {
...@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd) ...@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{ {
return 0; return 0;
} }
static inline int pud_free_pmd_page(pud_t *pud) static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{ {
return 0; return 0;
} }
static inline int pmd_free_pte_page(pmd_t *pmd) static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{ {
return 0; return 0;
} }
......
...@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, ...@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() && if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) && ((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) && IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
pmd_free_pte_page(pmd)) { pmd_free_pte_page(pmd, addr)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot)) if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue; continue;
} }
...@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, ...@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
if (ioremap_pud_enabled() && if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) && ((next - addr) == PUD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PUD_SIZE) && IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
pud_free_pmd_page(pud)) { pud_free_pmd_page(pud, addr)) {
if (pud_set_huge(pud, phys_addr + addr, prot)) if (pud_set_huge(pud, phys_addr + addr, prot))
continue; continue;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment