Commit bcd896ba authored by David S. Miller's avatar David S. Miller

sparc64: Handle hugepage TSB being NULL.

Accomodate the possibility that the TSB might be NULL at
the point that update_mmu_cache() is invoked.  This is
necessary because we will sometimes need to defer the TSB
allocation to the first fault that happens in the 'mm'.

Seperate out the hugepage PTE test into a seperate function
so that the logic is clearer.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a55ee1ff
...@@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde ...@@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
unsigned long tag; unsigned long tag;
if (unlikely(!tsb))
return;
tsb += ((address >> tsb_hash_shift) & tsb += ((address >> tsb_hash_shift) &
(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
tag = (address >> 22UL); tag = (address >> 22UL);
tsb_insert(tsb, tag, tte); tsb_insert(tsb, tag, tte);
} }
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline bool is_hugetlb_pte(pte_t pte)
{
if ((tlb_type == hypervisor &&
(pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
(tlb_type != hypervisor &&
(pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
return true;
return false;
}
#endif
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{ {
unsigned long tsb_index, tsb_hash_shift, flags;
struct mm_struct *mm; struct mm_struct *mm;
unsigned long flags;
pte_t pte = *ptep; pte_t pte = *ptep;
if (tlb_type != hypervisor) { if (tlb_type != hypervisor) {
...@@ -335,24 +350,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * ...@@ -335,24 +350,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
mm = vma->vm_mm; mm = vma->vm_mm;
tsb_index = MM_TSB_BASE;
tsb_hash_shift = PAGE_SHIFT;
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
if ((tlb_type == hypervisor && __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
(pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || address, pte_val(pte));
(tlb_type != hypervisor && else
(pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
tsb_index = MM_TSB_HUGE;
tsb_hash_shift = HPAGE_SHIFT;
}
}
#endif #endif
__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
__update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift,
address, pte_val(pte)); address, pte_val(pte));
spin_unlock_irqrestore(&mm->context.lock, flags); spin_unlock_irqrestore(&mm->context.lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment