Commit ff31e105 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/hash64: Store the slot information at the right offset for hugetlb

The hugetlb pte entries are at the PMD and PUD level, so we can't use
PTRS_PER_PTE to find the second half of the page table. Use the right
offset for PUD/PMD to get to the second half of the table.

Fixes: bf9a95f9 ("powerpc: Free up four 64K PTE bits in 64K backed HPTE pages")
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: default avatarRam Pai <linuxram@us.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 4a7aa4fe
...@@ -63,7 +63,8 @@ static inline int hash__hugepd_ok(hugepd_t hpd) ...@@ -63,7 +63,8 @@ static inline int hash__hugepd_ok(hugepd_t hpd)
* keeping the prototype consistent across the two formats. * keeping the prototype consistent across the two formats.
*/ */
static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
unsigned int subpg_index, unsigned long hidx) unsigned int subpg_index, unsigned long hidx,
int offset)
{ {
return (hidx << H_PAGE_F_GIX_SHIFT) & return (hidx << H_PAGE_F_GIX_SHIFT) &
(H_PAGE_F_SECOND | H_PAGE_F_GIX); (H_PAGE_F_SECOND | H_PAGE_F_GIX);
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
* generic accessors and iterators here * generic accessors and iterators here
*/ */
#define __real_pte __real_pte #define __real_pte __real_pte
static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
{ {
real_pte_t rpte; real_pte_t rpte;
unsigned long *hidxp; unsigned long *hidxp;
...@@ -59,7 +59,7 @@ static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) ...@@ -59,7 +59,7 @@ static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
*/ */
smp_rmb(); smp_rmb();
hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); hidxp = (unsigned long *)(ptep + offset);
rpte.hidx = *hidxp; rpte.hidx = *hidxp;
return rpte; return rpte;
} }
...@@ -86,9 +86,10 @@ static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) ...@@ -86,9 +86,10 @@ static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
* expected to modify the PTE bits accordingly and commit the PTE to memory. * expected to modify the PTE bits accordingly and commit the PTE to memory.
*/ */
static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
unsigned int subpg_index, unsigned long hidx) unsigned int subpg_index,
unsigned long hidx, int offset)
{ {
unsigned long *hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); unsigned long *hidxp = (unsigned long *)(ptep + offset);
rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index); rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
*hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index); *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
......
...@@ -350,7 +350,7 @@ extern unsigned long pci_io_base; ...@@ -350,7 +350,7 @@ extern unsigned long pci_io_base;
*/ */
#ifndef __real_pte #ifndef __real_pte
#define __real_pte(e,p) ((real_pte_t){(e)}) #define __real_pte(e, p, o) ((real_pte_t){(e)})
#define __rpte_to_pte(r) ((r).pte) #define __rpte_to_pte(r) ((r).pte)
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
......
...@@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* need to add in 0x1 if it's a read-only user page * need to add in 0x1 if it's a read-only user page
*/ */
rflags = htab_convert_pte_flags(new_pte); rflags = htab_convert_pte_flags(new_pte);
rpte = __real_pte(__pte(old_pte), ptep); rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
if (cpu_has_feature(CPU_FTR_NOEXECUTE) && if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
...@@ -117,7 +117,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -117,7 +117,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
return -1; return -1;
} }
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
new_pte |= pte_set_hidx(ptep, rpte, 0, slot); new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
} }
*ptep = __pte(new_pte & ~H_PAGE_BUSY); *ptep = __pte(new_pte & ~H_PAGE_BUSY);
return 0; return 0;
......
...@@ -86,7 +86,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -86,7 +86,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
subpg_index = (ea & (PAGE_SIZE - 1)) >> shift; subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
vpn = hpt_vpn(ea, vsid, ssize); vpn = hpt_vpn(ea, vsid, ssize);
rpte = __real_pte(__pte(old_pte), ptep); rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
/* /*
*None of the sub 4k page is hashed *None of the sub 4k page is hashed
*/ */
...@@ -214,7 +214,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -214,7 +214,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
return -1; return -1;
} }
new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot); new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE);
new_pte |= H_PAGE_HASHPTE; new_pte |= H_PAGE_HASHPTE;
*ptep = __pte(new_pte & ~H_PAGE_BUSY); *ptep = __pte(new_pte & ~H_PAGE_BUSY);
...@@ -262,7 +262,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access, ...@@ -262,7 +262,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
rflags = htab_convert_pte_flags(new_pte); rflags = htab_convert_pte_flags(new_pte);
rpte = __real_pte(__pte(old_pte), ptep); rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
if (cpu_has_feature(CPU_FTR_NOEXECUTE) && if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
...@@ -327,7 +327,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access, ...@@ -327,7 +327,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
} }
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
new_pte |= pte_set_hidx(ptep, rpte, 0, slot); new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
} }
*ptep = __pte(new_pte & ~H_PAGE_BUSY); *ptep = __pte(new_pte & ~H_PAGE_BUSY);
return 0; return 0;
......
...@@ -27,7 +27,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -27,7 +27,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
unsigned long vpn; unsigned long vpn;
unsigned long old_pte, new_pte; unsigned long old_pte, new_pte;
unsigned long rflags, pa, sz; unsigned long rflags, pa, sz;
long slot; long slot, offset;
BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
...@@ -63,7 +63,11 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -63,7 +63,11 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
} while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
rflags = htab_convert_pte_flags(new_pte); rflags = htab_convert_pte_flags(new_pte);
rpte = __real_pte(__pte(old_pte), ptep); if (unlikely(mmu_psize == MMU_PAGE_16G))
offset = PTRS_PER_PUD;
else
offset = PTRS_PER_PMD;
rpte = __real_pte(__pte(old_pte), ptep, offset);
sz = ((1UL) << shift); sz = ((1UL) << shift);
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
...@@ -104,7 +108,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -104,7 +108,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
return -1; return -1;
} }
new_pte |= pte_set_hidx(ptep, rpte, 0, slot); new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
} }
/* /*
......
...@@ -51,7 +51,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, ...@@ -51,7 +51,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
unsigned int psize; unsigned int psize;
int ssize; int ssize;
real_pte_t rpte; real_pte_t rpte;
int i; int i, offset;
i = batch->index; i = batch->index;
...@@ -67,6 +67,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, ...@@ -67,6 +67,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
psize = get_slice_psize(mm, addr); psize = get_slice_psize(mm, addr);
/* Mask the address for the correct page size */ /* Mask the address for the correct page size */
addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
if (unlikely(psize == MMU_PAGE_16G))
offset = PTRS_PER_PUD;
else
offset = PTRS_PER_PMD;
#else #else
BUG(); BUG();
psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
...@@ -78,6 +82,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, ...@@ -78,6 +82,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
* support 64k pages, this might be different from the * support 64k pages, this might be different from the
* hardware page size encoded in the slice table. */ * hardware page size encoded in the slice table. */
addr &= PAGE_MASK; addr &= PAGE_MASK;
offset = PTRS_PER_PTE;
} }
...@@ -91,7 +96,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, ...@@ -91,7 +96,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
} }
WARN_ON(vsid == 0); WARN_ON(vsid == 0);
vpn = hpt_vpn(addr, vsid, ssize); vpn = hpt_vpn(addr, vsid, ssize);
rpte = __real_pte(__pte(pte), ptep); rpte = __real_pte(__pte(pte), ptep, offset);
/* /*
* Check if we have an active batch on this CPU. If not, just * Check if we have an active batch on this CPU. If not, just
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment