Commit ac65e282 authored by Nitin Gupta's avatar Nitin Gupta Committed by David S. Miller

sparc64: Fix build error in flush_tsb_user_page

Patch "sparc64: Add 64K page size support"
unconditionally used __flush_huge_tsb_one_entry()
which is available only when hugetlb support is
enabled.

Another issue was incorrect TSB flushing for 64K
pages in flush_tsb_user().
Signed-off-by: default avatarNitin Gupta <nitin.m.gupta@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent cd429ce2
...@@ -309,7 +309,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -309,7 +309,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
addr &= ~(size - 1); addr &= ~(size - 1);
orig = *ptep; orig = *ptep;
orig_shift = pte_none(orig) ? PAGE_SIZE : huge_tte_to_shift(orig); orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
for (i = 0; i < nptes; i++) for (i = 0; i < nptes; i++)
ptep[i] = __pte(pte_val(entry) + (i << shift)); ptep[i] = __pte(pte_val(entry) + (i << shift));
...@@ -335,7 +335,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -335,7 +335,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
else else
nptes = size >> PAGE_SHIFT; nptes = size >> PAGE_SHIFT;
hugepage_shift = pte_none(entry) ? PAGE_SIZE : huge_tte_to_shift(entry); hugepage_shift = pte_none(entry) ? PAGE_SHIFT :
huge_tte_to_shift(entry);
if (pte_present(entry)) if (pte_present(entry))
mm->context.hugetlb_pte_count -= nptes; mm->context.hugetlb_pte_count -= nptes;
......
...@@ -120,12 +120,18 @@ void flush_tsb_user(struct tlb_batch *tb) ...@@ -120,12 +120,18 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
if (tb->hugepage_shift == PAGE_SHIFT) { if (tb->hugepage_shift < HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base); base = __pa(base);
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries); if (tb->hugepage_shift == PAGE_SHIFT)
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
#if defined(CONFIG_HUGETLB_PAGE)
else
__flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
tb->hugepage_shift);
#endif
} }
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
...@@ -152,8 +158,14 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, ...@@ -152,8 +158,14 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base); base = __pa(base);
__flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries, if (hugepage_shift == PAGE_SHIFT)
hugepage_shift); __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
nentries);
#if defined(CONFIG_HUGETLB_PAGE)
else
__flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
nentries, hugepage_shift);
#endif
} }
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment