Commit f8e6859e authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc updates from David Miller:

 1) Support multiple huge page sizes, from Nitin Gupta.

 2) Improve boot time on large memory configurations, from Pavel
    Tatashin.

 3) Make BRK handling more consistent and documented, from Vijay Kumar.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Fix build error in flush_tsb_user_page
  sparc64: memblock resizes are not handled properly
  sparc64: use latency groups to improve add_node_ranges speed
  sparc64: Add 64K page size support
  sparc64: Multi-page size support
  Documentation/sparc: Steps for sending break on sunhv console
  sparc64: Send break twice from console to return to boot prom
  sparc64: Migrate hvcons irq to panicked cpu
  sparc64: Set cpu state to offline when stopped
  sunvdc: Add support for setting physical sector size
  sparc64: fix for user probes in high memory
  sparc: topology_64.h: Fix condition for including cpudata.h
  sparc32: mm: srmmu: add __ro_after_init to sparc32_cachetlb_ops structures
parents a682e003 ac65e282
Steps for sending 'break' on sunhv console:
===========================================
On Baremetal:
1. press Esc + 'B'
On LDOM:
1. press Ctrl + ']'
2. telnet> send break
...@@ -17,7 +17,8 @@ ...@@ -17,7 +17,8 @@
#define HPAGE_SHIFT 23 #define HPAGE_SHIFT 23
#define REAL_HPAGE_SHIFT 22 #define REAL_HPAGE_SHIFT 22
#define HPAGE_256MB_SHIFT 28
#define HPAGE_64K_SHIFT 16
#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
...@@ -26,6 +27,7 @@ ...@@ -26,6 +27,7 @@
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
#define HUGE_MAX_HSTATE 3
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -375,7 +375,10 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) ...@@ -375,7 +375,10 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
#define pgprot_noncached pgprot_noncached #define pgprot_noncached pgprot_noncached
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline unsigned long __pte_huge_mask(void) extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable);
#define arch_make_huge_pte arch_make_huge_pte
static inline unsigned long __pte_default_huge_mask(void)
{ {
unsigned long mask; unsigned long mask;
...@@ -395,12 +398,14 @@ static inline unsigned long __pte_huge_mask(void) ...@@ -395,12 +398,14 @@ static inline unsigned long __pte_huge_mask(void)
static inline pte_t pte_mkhuge(pte_t pte) static inline pte_t pte_mkhuge(pte_t pte)
{ {
return __pte(pte_val(pte) | _PAGE_PMD_HUGE | __pte_huge_mask()); return __pte(pte_val(pte) | __pte_default_huge_mask());
} }
static inline bool is_hugetlb_pte(pte_t pte) static inline bool is_default_hugetlb_pte(pte_t pte)
{ {
return !!(pte_val(pte) & __pte_huge_mask()); unsigned long mask = __pte_default_huge_mask();
return (pte_val(pte) & mask) == mask;
} }
static inline bool is_hugetlb_pmd(pmd_t pmd) static inline bool is_hugetlb_pmd(pmd_t pmd)
...@@ -875,10 +880,12 @@ static inline unsigned long pud_pfn(pud_t pud) ...@@ -875,10 +880,12 @@ static inline unsigned long pud_pfn(pud_t pud)
/* Actual page table PTE updates. */ /* Actual page table PTE updates. */
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
pte_t *ptep, pte_t orig, int fullmm); pte_t *ptep, pte_t orig, int fullmm,
unsigned int hugepage_shift);
static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
pte_t *ptep, pte_t orig, int fullmm) pte_t *ptep, pte_t orig, int fullmm,
unsigned int hugepage_shift)
{ {
/* It is more efficient to let flush_tlb_kernel_range() /* It is more efficient to let flush_tlb_kernel_range()
* handle init_mm tlb flushes. * handle init_mm tlb flushes.
...@@ -887,7 +894,7 @@ static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, ...@@ -887,7 +894,7 @@ static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
* and SUN4V pte layout, so this inline test is fine. * and SUN4V pte layout, so this inline test is fine.
*/ */
if (likely(mm != &init_mm) && pte_accessible(mm, orig)) if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, vaddr, ptep, orig, fullmm); tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
} }
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
...@@ -906,7 +913,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -906,7 +913,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t orig = *ptep; pte_t orig = *ptep;
*ptep = pte; *ptep = pte;
maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm); maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
} }
#define set_pte_at(mm,addr,ptep,pte) \ #define set_pte_at(mm,addr,ptep,pte) \
......
...@@ -59,8 +59,11 @@ extern atomic_t dcpage_flushes; ...@@ -59,8 +59,11 @@ extern atomic_t dcpage_flushes;
extern atomic_t dcpage_flushes_xcall; extern atomic_t dcpage_flushes_xcall;
extern int sysctl_tsb_ratio; extern int sysctl_tsb_ratio;
#endif
#ifdef CONFIG_SERIAL_SUNHV
void sunhv_migrate_hvcons_irq(int cpu);
#endif
#endif
void sun_do_break(void); void sun_do_break(void);
extern int stop_a_enabled; extern int stop_a_enabled;
extern int scons_pwroff; extern int scons_pwroff;
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#define TLB_BATCH_NR 192 #define TLB_BATCH_NR 192
struct tlb_batch { struct tlb_batch {
bool huge; unsigned int hugepage_shift;
struct mm_struct *mm; struct mm_struct *mm;
unsigned long tlb_nr; unsigned long tlb_nr;
unsigned long active; unsigned long active;
...@@ -17,7 +17,8 @@ struct tlb_batch { ...@@ -17,7 +17,8 @@ struct tlb_batch {
void flush_tsb_kernel_range(unsigned long start, unsigned long end); void flush_tsb_kernel_range(unsigned long start, unsigned long end);
void flush_tsb_user(struct tlb_batch *tb); void flush_tsb_user(struct tlb_batch *tb);
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge); void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
unsigned int hugepage_shift);
/* TLB flush operations. */ /* TLB flush operations. */
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
#include <asm/mmzone.h> #include <asm/mmzone.h>
#include <asm/cpudata.h>
static inline int cpu_to_node(int cpu) static inline int cpu_to_node(int cpu)
{ {
...@@ -42,6 +41,9 @@ int __node_distance(int, int); ...@@ -42,6 +41,9 @@ int __node_distance(int, int);
#endif /* !(CONFIG_NUMA) */ #endif /* !(CONFIG_NUMA) */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#include <asm/cpudata.h>
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id) #define topology_core_id(cpu) (cpu_data(cpu).core_id)
#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
......
...@@ -42,8 +42,8 @@ struct arch_uprobe { ...@@ -42,8 +42,8 @@ struct arch_uprobe {
}; };
struct arch_uprobe_task { struct arch_uprobe_task {
u32 saved_tpc; u64 saved_tpc;
u32 saved_tnpc; u64 saved_tnpc;
}; };
struct task_struct; struct task_struct;
......
...@@ -1443,6 +1443,7 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) ...@@ -1443,6 +1443,7 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
static void stop_this_cpu(void *dummy) static void stop_this_cpu(void *dummy)
{ {
set_cpu_online(smp_processor_id(), false);
prom_stopself(); prom_stopself();
} }
...@@ -1451,9 +1452,15 @@ void smp_send_stop(void) ...@@ -1451,9 +1452,15 @@ void smp_send_stop(void)
int cpu; int cpu;
if (tlb_type == hypervisor) { if (tlb_type == hypervisor) {
int this_cpu = smp_processor_id();
#ifdef CONFIG_SERIAL_SUNHV
sunhv_migrate_hvcons_irq(this_cpu);
#endif
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (cpu == smp_processor_id()) if (cpu == this_cpu)
continue; continue;
set_cpu_online(cpu, false);
#ifdef CONFIG_SUN_LDOMS #ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled) { if (ldom_domaining_enabled) {
unsigned long hv_err; unsigned long hv_err;
......
...@@ -117,26 +117,11 @@ tsb_miss_page_table_walk_sun4v_fastpath: ...@@ -117,26 +117,11 @@ tsb_miss_page_table_walk_sun4v_fastpath:
/* Valid PTE is now in %g5. */ /* Valid PTE is now in %g5. */
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
661: sethi %uhi(_PAGE_SZALL_4U), %g7 sethi %uhi(_PAGE_PMD_HUGE), %g7
sllx %g7, 32, %g7 sllx %g7, 32, %g7
.section .sun4v_2insn_patch, "ax"
.word 661b
mov _PAGE_SZALL_4V, %g7
nop
.previous
and %g5, %g7, %g2
661: sethi %uhi(_PAGE_SZHUGE_4U), %g7
sllx %g7, 32, %g7
.section .sun4v_2insn_patch, "ax"
.word 661b
mov _PAGE_SZHUGE_4V, %g7
nop
.previous
cmp %g2, %g7 andcc %g5, %g7, %g0
bne,pt %xcc, 60f be,pt %xcc, 60f
nop nop
/* It is a huge page, use huge page TSB entry address we /* It is a huge page, use huge page TSB entry address we
......
...@@ -28,6 +28,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, ...@@ -28,6 +28,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
unsigned long pgoff, unsigned long pgoff,
unsigned long flags) unsigned long flags)
{ {
struct hstate *h = hstate_file(filp);
unsigned long task_size = TASK_SIZE; unsigned long task_size = TASK_SIZE;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
...@@ -38,7 +39,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, ...@@ -38,7 +39,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
info.length = len; info.length = len;
info.low_limit = TASK_UNMAPPED_BASE; info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = min(task_size, VA_EXCLUDE_START); info.high_limit = min(task_size, VA_EXCLUDE_START);
info.align_mask = PAGE_MASK & ~HPAGE_MASK; info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0; info.align_offset = 0;
addr = vm_unmapped_area(&info); addr = vm_unmapped_area(&info);
...@@ -58,6 +59,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -58,6 +59,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long pgoff, const unsigned long pgoff,
const unsigned long flags) const unsigned long flags)
{ {
struct hstate *h = hstate_file(filp);
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr = addr0; unsigned long addr = addr0;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
...@@ -69,7 +71,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -69,7 +71,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.length = len; info.length = len;
info.low_limit = PAGE_SIZE; info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base; info.high_limit = mm->mmap_base;
info.align_mask = PAGE_MASK & ~HPAGE_MASK; info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0; info.align_offset = 0;
addr = vm_unmapped_area(&info); addr = vm_unmapped_area(&info);
...@@ -94,6 +96,7 @@ unsigned long ...@@ -94,6 +96,7 @@ unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr, hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long len, unsigned long pgoff, unsigned long flags)
{ {
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long task_size = TASK_SIZE; unsigned long task_size = TASK_SIZE;
...@@ -101,7 +104,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -101,7 +104,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (test_thread_flag(TIF_32BIT)) if (test_thread_flag(TIF_32BIT))
task_size = STACK_TOP32; task_size = STACK_TOP32;
if (len & ~HPAGE_MASK) if (len & ~huge_page_mask(h))
return -EINVAL; return -EINVAL;
if (len > task_size) if (len > task_size)
return -ENOMEM; return -ENOMEM;
...@@ -113,7 +116,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -113,7 +116,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
} }
if (addr) { if (addr) {
addr = ALIGN(addr, HPAGE_SIZE); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vma->vm_start))
...@@ -127,17 +130,141 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -127,17 +130,141 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
pgoff, flags); pgoff, flags);
} }
static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
{
return entry;
}
static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
{
unsigned long hugepage_size = _PAGE_SZ4MB_4V;
pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
switch (shift) {
case HPAGE_256MB_SHIFT:
hugepage_size = _PAGE_SZ256MB_4V;
pte_val(entry) |= _PAGE_PMD_HUGE;
break;
case HPAGE_SHIFT:
pte_val(entry) |= _PAGE_PMD_HUGE;
break;
case HPAGE_64K_SHIFT:
hugepage_size = _PAGE_SZ64K_4V;
break;
default:
WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
}
pte_val(entry) = pte_val(entry) | hugepage_size;
return entry;
}
static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
{
if (tlb_type == hypervisor)
return sun4v_hugepage_shift_to_tte(entry, shift);
else
return sun4u_hugepage_shift_to_tte(entry, shift);
}
pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writeable)
{
unsigned int shift = huge_page_shift(hstate_vma(vma));
return hugepage_shift_to_tte(entry, shift);
}
static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
{
unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
unsigned int shift;
switch (tte_szbits) {
case _PAGE_SZ256MB_4V:
shift = HPAGE_256MB_SHIFT;
break;
case _PAGE_SZ4MB_4V:
shift = REAL_HPAGE_SHIFT;
break;
case _PAGE_SZ64K_4V:
shift = HPAGE_64K_SHIFT;
break;
default:
shift = PAGE_SHIFT;
break;
}
return shift;
}
static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
{
unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
unsigned int shift;
switch (tte_szbits) {
case _PAGE_SZ256MB_4U:
shift = HPAGE_256MB_SHIFT;
break;
case _PAGE_SZ4MB_4U:
shift = REAL_HPAGE_SHIFT;
break;
case _PAGE_SZ64K_4U:
shift = HPAGE_64K_SHIFT;
break;
default:
shift = PAGE_SHIFT;
break;
}
return shift;
}
static unsigned int huge_tte_to_shift(pte_t entry)
{
unsigned long shift;
if (tlb_type == hypervisor)
shift = sun4v_huge_tte_to_shift(entry);
else
shift = sun4u_huge_tte_to_shift(entry);
if (shift == PAGE_SHIFT)
WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
pte_val(entry));
return shift;
}
static unsigned long huge_tte_to_size(pte_t pte)
{
unsigned long size = 1UL << huge_tte_to_shift(pte);
if (size == REAL_HPAGE_SIZE)
size = HPAGE_SIZE;
return size;
}
pte_t *huge_pte_alloc(struct mm_struct *mm, pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz) unsigned long addr, unsigned long sz)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL; pte_t *pte = NULL;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr); pud = pud_alloc(mm, pgd, addr);
if (pud) if (pud) {
pte = (pte_t *)pmd_alloc(mm, pud, addr); pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return NULL;
if (sz == PMD_SHIFT)
pte = (pte_t *)pmd;
else
pte = pte_alloc_map(mm, pmd, addr);
}
return pte; return pte;
} }
...@@ -146,49 +273,83 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -146,49 +273,83 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL; pte_t *pte = NULL;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) { if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) if (!pud_none(*pud)) {
pte = (pte_t *)pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
if (is_hugetlb_pmd(*pmd))
pte = (pte_t *)pmd;
else
pte = pte_offset_map(pmd, addr);
}
}
} }
return pte; return pte;
} }
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry) pte_t *ptep, pte_t entry)
{ {
unsigned int i, nptes, orig_shift, shift;
unsigned long size;
pte_t orig; pte_t orig;
size = huge_tte_to_size(entry);
shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT;
nptes = size >> shift;
if (!pte_present(*ptep) && pte_present(entry)) if (!pte_present(*ptep) && pte_present(entry))
mm->context.hugetlb_pte_count++; mm->context.hugetlb_pte_count += nptes;
addr &= HPAGE_MASK; addr &= ~(size - 1);
orig = *ptep; orig = *ptep;
*ptep = entry; orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
for (i = 0; i < nptes; i++)
ptep[i] = __pte(pte_val(entry) + (i << shift));
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
maybe_tlb_batch_add(mm, addr, ptep, orig, 0); /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0); if (size == HPAGE_SIZE)
maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
orig_shift);
} }
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
unsigned int i, nptes, hugepage_shift;
unsigned long size;
pte_t entry; pte_t entry;
entry = *ptep; entry = *ptep;
size = huge_tte_to_size(entry);
if (size >= HPAGE_SIZE)
nptes = size >> PMD_SHIFT;
else
nptes = size >> PAGE_SHIFT;
hugepage_shift = pte_none(entry) ? PAGE_SHIFT :
huge_tte_to_shift(entry);
if (pte_present(entry)) if (pte_present(entry))
mm->context.hugetlb_pte_count--; mm->context.hugetlb_pte_count -= nptes;
addr &= HPAGE_MASK; addr &= ~(size - 1);
*ptep = __pte(0UL); for (i = 0; i < nptes; i++)
ptep[i] = __pte(0UL);
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift);
maybe_tlb_batch_add(mm, addr, ptep, entry, 0); /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0); if (size == HPAGE_SIZE)
maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
hugepage_shift);
return entry; return entry;
} }
......
...@@ -324,6 +324,50 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde ...@@ -324,6 +324,50 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
tsb_insert(tsb, tag, tte); tsb_insert(tsb, tag, tte);
} }
#ifdef CONFIG_HUGETLB_PAGE
static int __init setup_hugepagesz(char *string)
{
unsigned long long hugepage_size;
unsigned int hugepage_shift;
unsigned short hv_pgsz_idx;
unsigned int hv_pgsz_mask;
int rc = 0;
hugepage_size = memparse(string, &string);
hugepage_shift = ilog2(hugepage_size);
switch (hugepage_shift) {
case HPAGE_256MB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_256MB;
hv_pgsz_idx = HV_PGSZ_IDX_256MB;
break;
case HPAGE_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_4MB;
hv_pgsz_idx = HV_PGSZ_IDX_4MB;
break;
case HPAGE_64K_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_64K;
hv_pgsz_idx = HV_PGSZ_IDX_64K;
break;
default:
hv_pgsz_mask = 0;
}
if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
pr_warn("hugepagesz=%llu not supported by MMU.\n",
hugepage_size);
goto out;
}
hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT);
rc = 1;
out:
return rc;
}
__setup("hugepagesz=", setup_hugepagesz);
#endif /* CONFIG_HUGETLB_PAGE */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{ {
struct mm_struct *mm; struct mm_struct *mm;
...@@ -347,7 +391,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * ...@@ -347,7 +391,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
is_hugetlb_pte(pte)) { is_hugetlb_pmd(__pmd(pte_val(pte)))) {
/* We are fabricating 8MB pages using 4MB real hw pages. */ /* We are fabricating 8MB pages using 4MB real hw pages. */
pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
...@@ -785,13 +829,23 @@ static void __init find_ramdisk(unsigned long phys_base) ...@@ -785,13 +829,23 @@ static void __init find_ramdisk(unsigned long phys_base)
struct node_mem_mask { struct node_mem_mask {
unsigned long mask; unsigned long mask;
unsigned long val; unsigned long match;
}; };
static struct node_mem_mask node_masks[MAX_NUMNODES]; static struct node_mem_mask node_masks[MAX_NUMNODES];
static int num_node_masks; static int num_node_masks;
#ifdef CONFIG_NEED_MULTIPLE_NODES #ifdef CONFIG_NEED_MULTIPLE_NODES
struct mdesc_mlgroup {
u64 node;
u64 latency;
u64 match;
u64 mask;
};
static struct mdesc_mlgroup *mlgroups;
static int num_mlgroups;
int numa_cpu_lookup_table[NR_CPUS]; int numa_cpu_lookup_table[NR_CPUS];
cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
...@@ -802,78 +856,129 @@ struct mdesc_mblock { ...@@ -802,78 +856,129 @@ struct mdesc_mblock {
}; };
static struct mdesc_mblock *mblocks; static struct mdesc_mblock *mblocks;
static int num_mblocks; static int num_mblocks;
static int find_numa_node_for_addr(unsigned long pa,
struct node_mem_mask *pnode_mask);
static unsigned long __init ra_to_pa(unsigned long addr) static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
{ {
struct mdesc_mblock *m = NULL;
int i; int i;
for (i = 0; i < num_mblocks; i++) { for (i = 0; i < num_mblocks; i++) {
struct mdesc_mblock *m = &mblocks[i]; m = &mblocks[i];
if (addr >= m->base && if (addr >= m->base &&
addr < (m->base + m->size)) { addr < (m->base + m->size)) {
addr += m->offset;
break; break;
} }
} }
return addr;
return m;
} }
static int __init find_node(unsigned long addr) static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
{ {
static bool search_mdesc = true; int prev_nid, new_nid;
static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
static int last_index;
int i;
addr = ra_to_pa(addr); prev_nid = -1;
for (i = 0; i < num_node_masks; i++) { for ( ; start < end; start += PAGE_SIZE) {
struct node_mem_mask *p = &node_masks[i]; for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
struct node_mem_mask *p = &node_masks[new_nid];
if ((addr & p->mask) == p->val) if ((start & p->mask) == p->match) {
return i; if (prev_nid == -1)
prev_nid = new_nid;
break;
} }
/* The following condition has been observed on LDOM guests because
* node_masks only contains the best latency mask and value.
* LDOM guest's mdesc can contain a single latency group to
* cover multiple address range. Print warning message only if the
* address cannot be found in node_masks nor mdesc.
*/
if ((search_mdesc) &&
((addr & last_mem_mask.mask) != last_mem_mask.val)) {
/* find the available node in the mdesc */
last_index = find_numa_node_for_addr(addr, &last_mem_mask);
numadbg("find_node: latency group for address 0x%lx is %d\n",
addr, last_index);
if ((last_index < 0) || (last_index >= num_node_masks)) {
/* WARN_ONCE() and use default group 0 */
WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
search_mdesc = false;
last_index = 0;
} }
if (new_nid == num_node_masks) {
prev_nid = 0;
WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
start);
break;
} }
return last_index; if (prev_nid != new_nid)
break;
}
*nid = prev_nid;
return start > end ? end : start;
} }
static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
{ {
*nid = find_node(start); u64 ret_end, pa_start, m_mask, m_match, m_end;
start += PAGE_SIZE; struct mdesc_mblock *mblock;
while (start < end) { int _nid, i;
int n = find_node(start);
if (tlb_type != hypervisor)
return memblock_nid_range_sun4u(start, end, nid);
mblock = addr_to_mblock(start);
if (!mblock) {
WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
start);
_nid = 0;
ret_end = end;
goto done;
}
pa_start = start + mblock->offset;
m_match = 0;
m_mask = 0;
for (_nid = 0; _nid < num_node_masks; _nid++) {
struct node_mem_mask *const m = &node_masks[_nid];
if ((pa_start & m->mask) == m->match) {
m_match = m->match;
m_mask = m->mask;
break;
}
}
if (num_node_masks == _nid) {
/* We could not find NUMA group, so default to 0, but lets
* search for latency group, so we could calculate the correct
* end address that we return
*/
_nid = 0;
if (n != *nid) for (i = 0; i < num_mlgroups; i++) {
struct mdesc_mlgroup *const m = &mlgroups[i];
if ((pa_start & m->mask) == m->match) {
m_match = m->match;
m_mask = m->mask;
break; break;
start += PAGE_SIZE; }
} }
if (start > end) if (i == num_mlgroups) {
start = end; WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
start);
ret_end = end;
goto done;
}
}
/*
* Each latency group has match and mask, and each memory block has an
* offset. An address belongs to a latency group if its address matches
* the following formula: ((addr + offset) & mask) == match
* It is, however, slow to check every single page if it matches a
* particular latency group. As optimization we calculate end value by
* using bit arithmetics.
*/
m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
ret_end = m_end > end ? end : m_end;
return start; done:
*nid = _nid;
return ret_end;
} }
#endif #endif
...@@ -914,7 +1019,8 @@ static void init_node_masks_nonnuma(void) ...@@ -914,7 +1019,8 @@ static void init_node_masks_nonnuma(void)
numadbg("Initializing tables for non-numa.\n"); numadbg("Initializing tables for non-numa.\n");
node_masks[0].mask = node_masks[0].val = 0; node_masks[0].mask = 0;
node_masks[0].match = 0;
num_node_masks = 1; num_node_masks = 1;
#ifdef CONFIG_NEED_MULTIPLE_NODES #ifdef CONFIG_NEED_MULTIPLE_NODES
...@@ -932,15 +1038,6 @@ EXPORT_SYMBOL(numa_cpu_lookup_table); ...@@ -932,15 +1038,6 @@ EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(numa_cpumask_lookup_table); EXPORT_SYMBOL(numa_cpumask_lookup_table);
EXPORT_SYMBOL(node_data); EXPORT_SYMBOL(node_data);
struct mdesc_mlgroup {
u64 node;
u64 latency;
u64 match;
u64 mask;
};
static struct mdesc_mlgroup *mlgroups;
static int num_mlgroups;
static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
u32 cfg_handle) u32 cfg_handle)
{ {
...@@ -1029,6 +1126,10 @@ int of_node_to_nid(struct device_node *dp) ...@@ -1029,6 +1126,10 @@ int of_node_to_nid(struct device_node *dp)
static void __init add_node_ranges(void) static void __init add_node_ranges(void)
{ {
struct memblock_region *reg; struct memblock_region *reg;
unsigned long prev_max;
memblock_resized:
prev_max = memblock.memory.max;
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
unsigned long size = reg->size; unsigned long size = reg->size;
...@@ -1048,6 +1149,8 @@ static void __init add_node_ranges(void) ...@@ -1048,6 +1149,8 @@ static void __init add_node_ranges(void)
memblock_set_node(start, this_end - start, memblock_set_node(start, this_end - start,
&memblock.memory, nid); &memblock.memory, nid);
if (memblock.memory.max != prev_max)
goto memblock_resized;
start = this_end; start = this_end;
} }
} }
...@@ -1182,41 +1285,6 @@ int __node_distance(int from, int to) ...@@ -1182,41 +1285,6 @@ int __node_distance(int from, int to)
return numa_latency[from][to]; return numa_latency[from][to];
} }
static int find_numa_node_for_addr(unsigned long pa,
struct node_mem_mask *pnode_mask)
{
struct mdesc_handle *md = mdesc_grab();
u64 node, arc;
int i = 0;
node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
if (node == MDESC_NODE_NULL)
goto out;
mdesc_for_each_node_by_name(md, node, "group") {
mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
u64 target = mdesc_arc_target(md, arc);
struct mdesc_mlgroup *m = find_mlgroup(target);
if (!m)
continue;
if ((pa & m->mask) == m->match) {
if (pnode_mask) {
pnode_mask->mask = m->mask;
pnode_mask->val = m->match;
}
mdesc_release(md);
return i;
}
}
i++;
}
out:
mdesc_release(md);
return -1;
}
static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
{ {
int i; int i;
...@@ -1224,7 +1292,7 @@ static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) ...@@ -1224,7 +1292,7 @@ static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
for (i = 0; i < MAX_NUMNODES; i++) { for (i = 0; i < MAX_NUMNODES; i++) {
struct node_mem_mask *n = &node_masks[i]; struct node_mem_mask *n = &node_masks[i];
if ((grp->mask == n->mask) && (grp->match == n->val)) if ((grp->mask == n->mask) && (grp->match == n->match))
break; break;
} }
return i; return i;
...@@ -1279,10 +1347,10 @@ static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, ...@@ -1279,10 +1347,10 @@ static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
n = &node_masks[num_node_masks++]; n = &node_masks[num_node_masks++];
n->mask = candidate->mask; n->mask = candidate->mask;
n->val = candidate->match; n->match = candidate->match;
numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
index, n->mask, n->val, candidate->latency); index, n->mask, n->match, candidate->latency);
return 0; return 0;
} }
...@@ -1379,7 +1447,7 @@ static int __init numa_parse_jbus(void) ...@@ -1379,7 +1447,7 @@ static int __init numa_parse_jbus(void)
numa_cpu_lookup_table[cpu] = index; numa_cpu_lookup_table[cpu] = index;
cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
node_masks[index].mask = ~((1UL << 36UL) - 1UL); node_masks[index].mask = ~((1UL << 36UL) - 1UL);
node_masks[index].val = cpu << 36UL; node_masks[index].match = cpu << 36UL;
index++; index++;
} }
......
...@@ -1444,7 +1444,7 @@ static void poke_viking(void) ...@@ -1444,7 +1444,7 @@ static void poke_viking(void)
srmmu_set_mmureg(mreg); srmmu_set_mmureg(mreg);
} }
static struct sparc32_cachetlb_ops viking_ops = { static struct sparc32_cachetlb_ops viking_ops __ro_after_init = {
.cache_all = viking_flush_cache_all, .cache_all = viking_flush_cache_all,
.cache_mm = viking_flush_cache_mm, .cache_mm = viking_flush_cache_mm,
.cache_page = viking_flush_cache_page, .cache_page = viking_flush_cache_page,
...@@ -1475,7 +1475,7 @@ static struct sparc32_cachetlb_ops viking_ops = { ...@@ -1475,7 +1475,7 @@ static struct sparc32_cachetlb_ops viking_ops = {
* flushes going at once will require SMP locking anyways so there's * flushes going at once will require SMP locking anyways so there's
* no real value in trying any harder than this. * no real value in trying any harder than this.
*/ */
static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = { static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = {
.cache_all = viking_flush_cache_all, .cache_all = viking_flush_cache_all,
.cache_mm = viking_flush_cache_mm, .cache_mm = viking_flush_cache_mm,
.cache_page = viking_flush_cache_page, .cache_page = viking_flush_cache_page,
...@@ -1759,7 +1759,7 @@ static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) ...@@ -1759,7 +1759,7 @@ static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
local_ops->sig_insns(mm, insn_addr); local_ops->sig_insns(mm, insn_addr);
} }
static struct sparc32_cachetlb_ops smp_cachetlb_ops = { static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = {
.cache_all = smp_flush_cache_all, .cache_all = smp_flush_cache_all,
.cache_mm = smp_flush_cache_mm, .cache_mm = smp_flush_cache_mm,
.cache_page = smp_flush_cache_page, .cache_page = smp_flush_cache_page,
......
...@@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void) ...@@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void)
} }
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
bool exec, bool huge) bool exec, unsigned int hugepage_shift)
{ {
struct tlb_batch *tb = &get_cpu_var(tlb_batch); struct tlb_batch *tb = &get_cpu_var(tlb_batch);
unsigned long nr; unsigned long nr;
...@@ -84,19 +84,19 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, ...@@ -84,19 +84,19 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
} }
if (!tb->active) { if (!tb->active) {
flush_tsb_user_page(mm, vaddr, huge); flush_tsb_user_page(mm, vaddr, hugepage_shift);
global_flush_tlb_page(mm, vaddr); global_flush_tlb_page(mm, vaddr);
goto out; goto out;
} }
if (nr == 0) { if (nr == 0) {
tb->mm = mm; tb->mm = mm;
tb->huge = huge; tb->hugepage_shift = hugepage_shift;
} }
if (tb->huge != huge) { if (tb->hugepage_shift != hugepage_shift) {
flush_tlb_pending(); flush_tlb_pending();
tb->huge = huge; tb->hugepage_shift = hugepage_shift;
nr = 0; nr = 0;
} }
...@@ -110,10 +110,9 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, ...@@ -110,10 +110,9 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
} }
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
pte_t *ptep, pte_t orig, int fullmm) pte_t *ptep, pte_t orig, int fullmm,
unsigned int hugepage_shift)
{ {
bool huge = is_hugetlb_pte(orig);
if (tlb_type != hypervisor && if (tlb_type != hypervisor &&
pte_dirty(orig)) { pte_dirty(orig)) {
unsigned long paddr, pfn = pte_pfn(orig); unsigned long paddr, pfn = pte_pfn(orig);
...@@ -139,7 +138,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, ...@@ -139,7 +138,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
no_cache_flush: no_cache_flush:
if (!fullmm) if (!fullmm)
tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge); tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
......
...@@ -86,6 +86,33 @@ static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, ...@@ -86,6 +86,33 @@ static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
} }
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v,
unsigned long hash_shift,
unsigned long nentries,
unsigned int hugepage_shift)
{
unsigned int hpage_entries;
unsigned int i;
hpage_entries = 1 << (hugepage_shift - hash_shift);
for (i = 0; i < hpage_entries; i++)
__flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift,
nentries);
}
static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
unsigned long tsb, unsigned long nentries,
unsigned int hugepage_shift)
{
unsigned long i;
for (i = 0; i < tb->tlb_nr; i++)
__flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift,
nentries, hugepage_shift);
}
#endif
void flush_tsb_user(struct tlb_batch *tb) void flush_tsb_user(struct tlb_batch *tb)
{ {
struct mm_struct *mm = tb->mm; struct mm_struct *mm = tb->mm;
...@@ -93,45 +120,61 @@ void flush_tsb_user(struct tlb_batch *tb) ...@@ -93,45 +120,61 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
if (!tb->huge) { if (tb->hugepage_shift < HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base); base = __pa(base);
if (tb->hugepage_shift == PAGE_SHIFT)
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries); __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
#if defined(CONFIG_HUGETLB_PAGE)
else
__flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
tb->hugepage_shift);
#endif
} }
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base); base = __pa(base);
__flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries); __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries,
tb->hugepage_shift);
} }
#endif #endif
spin_unlock_irqrestore(&mm->context.lock, flags); spin_unlock_irqrestore(&mm->context.lock, flags);
} }
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge) void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
unsigned int hugepage_shift)
{ {
unsigned long nentries, base, flags; unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
if (!huge) { if (hugepage_shift < HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base); base = __pa(base);
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); if (hugepage_shift == PAGE_SHIFT)
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
nentries);
#if defined(CONFIG_HUGETLB_PAGE)
else
__flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
nentries, hugepage_shift);
#endif
} }
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base); base = __pa(base);
__flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries); __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT,
nentries, hugepage_shift);
} }
#endif #endif
spin_unlock_irqrestore(&mm->context.lock, flags); spin_unlock_irqrestore(&mm->context.lock, flags);
......
...@@ -34,6 +34,7 @@ MODULE_LICENSE("GPL"); ...@@ -34,6 +34,7 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION); MODULE_VERSION(DRV_MODULE_VERSION);
#define VDC_TX_RING_SIZE 512 #define VDC_TX_RING_SIZE 512
#define VDC_DEFAULT_BLK_SIZE 512
#define WAITING_FOR_LINK_UP 0x01 #define WAITING_FOR_LINK_UP 0x01
#define WAITING_FOR_TX_SPACE 0x02 #define WAITING_FOR_TX_SPACE 0x02
...@@ -73,6 +74,7 @@ struct vdc_port { ...@@ -73,6 +74,7 @@ struct vdc_port {
u32 vdisk_size; u32 vdisk_size;
u8 vdisk_type; u8 vdisk_type;
u8 vdisk_mtype; u8 vdisk_mtype;
u32 vdisk_phys_blksz;
char disk_name[32]; char disk_name[32];
}; };
...@@ -88,6 +90,7 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) ...@@ -88,6 +90,7 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
/* Ordered from largest major to lowest */ /* Ordered from largest major to lowest */
static struct vio_version vdc_versions[] = { static struct vio_version vdc_versions[] = {
{ .major = 1, .minor = 2 },
{ .major = 1, .minor = 1 }, { .major = 1, .minor = 1 },
{ .major = 1, .minor = 0 }, { .major = 1, .minor = 0 },
}; };
...@@ -271,6 +274,11 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) ...@@ -271,6 +274,11 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
if (pkt->max_xfer_size < port->max_xfer_size) if (pkt->max_xfer_size < port->max_xfer_size)
port->max_xfer_size = pkt->max_xfer_size; port->max_xfer_size = pkt->max_xfer_size;
port->vdisk_block_size = pkt->vdisk_block_size; port->vdisk_block_size = pkt->vdisk_block_size;
port->vdisk_phys_blksz = VDC_DEFAULT_BLK_SIZE;
if (vdc_version_supported(port, 1, 2))
port->vdisk_phys_blksz = pkt->phys_block_size;
return 0; return 0;
} else { } else {
printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name); printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
...@@ -754,6 +762,12 @@ static int probe_disk(struct vdc_port *port) ...@@ -754,6 +762,12 @@ static int probe_disk(struct vdc_port *port)
if (err) if (err)
return err; return err;
/* Using version 1.2 means vdisk_phys_blksz should be set unless the
* disk is reserved by another system.
*/
if (vdc_version_supported(port, 1, 2) && !port->vdisk_phys_blksz)
return -ENODEV;
if (vdc_version_supported(port, 1, 1)) { if (vdc_version_supported(port, 1, 1)) {
/* vdisk_size should be set during the handshake, if it wasn't /* vdisk_size should be set during the handshake, if it wasn't
* then the underlying disk is reserved by another system * then the underlying disk is reserved by another system
...@@ -829,6 +843,8 @@ static int probe_disk(struct vdc_port *port) ...@@ -829,6 +843,8 @@ static int probe_disk(struct vdc_port *port)
} }
} }
blk_queue_physical_block_size(q, port->vdisk_phys_blksz);
pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n", pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
g->disk_name, g->disk_name,
port->vdisk_size, (port->vdisk_size >> (20 - 9)), port->vdisk_size, (port->vdisk_size >> (20 - 9)),
...@@ -910,7 +926,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) ...@@ -910,7 +926,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err) if (err)
goto err_out_free_port; goto err_out_free_port;
port->vdisk_block_size = 512; port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size); port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
port->ring_cookies = ((port->max_xfer_size * port->ring_cookies = ((port->max_xfer_size *
port->vdisk_block_size) / PAGE_SIZE) + 2; port->vdisk_block_size) / PAGE_SIZE) + 2;
......
...@@ -116,7 +116,7 @@ static int receive_chars_getchar(struct uart_port *port) ...@@ -116,7 +116,7 @@ static int receive_chars_getchar(struct uart_port *port)
static int receive_chars_read(struct uart_port *port) static int receive_chars_read(struct uart_port *port)
{ {
int saw_console_brk = 0; static int saw_console_brk;
int limit = 10000; int limit = 10000;
while (limit-- > 0) { while (limit-- > 0) {
...@@ -128,6 +128,9 @@ static int receive_chars_read(struct uart_port *port) ...@@ -128,6 +128,9 @@ static int receive_chars_read(struct uart_port *port)
bytes_read = 0; bytes_read = 0;
if (stat == CON_BREAK) { if (stat == CON_BREAK) {
if (saw_console_brk)
sun_do_break();
if (uart_handle_break(port)) if (uart_handle_break(port))
continue; continue;
saw_console_brk = 1; saw_console_brk = 1;
...@@ -151,6 +154,7 @@ static int receive_chars_read(struct uart_port *port) ...@@ -151,6 +154,7 @@ static int receive_chars_read(struct uart_port *port)
if (port->sysrq != 0 && *con_read_page) { if (port->sysrq != 0 && *con_read_page) {
for (i = 0; i < bytes_read; i++) for (i = 0; i < bytes_read; i++)
uart_handle_sysrq_char(port, con_read_page[i]); uart_handle_sysrq_char(port, con_read_page[i]);
saw_console_brk = 0;
} }
if (port->state == NULL) if (port->state == NULL)
...@@ -398,6 +402,12 @@ static struct uart_driver sunhv_reg = { ...@@ -398,6 +402,12 @@ static struct uart_driver sunhv_reg = {
static struct uart_port *sunhv_port; static struct uart_port *sunhv_port;
void sunhv_migrate_hvcons_irq(int cpu)
{
/* Migrate hvcons irq to param cpu */
irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
}
/* Copy 's' into the con_write_page, decoding "\n" into /* Copy 's' into the con_write_page, decoding "\n" into
* "\r\n" along the way. We have to return two lengths * "\r\n" along the way. We have to return two lengths
* because the caller needs to know how much to advance * because the caller needs to know how much to advance
......
...@@ -273,7 +273,8 @@ void panic(const char *fmt, ...) ...@@ -273,7 +273,8 @@ void panic(const char *fmt, ...)
extern int stop_a_enabled; extern int stop_a_enabled;
/* Make sure the user can actually press Stop-A (L1-A) */ /* Make sure the user can actually press Stop-A (L1-A) */
stop_a_enabled = 1; stop_a_enabled = 1;
pr_emerg("Press Stop-A (L1-A) to return to the boot prom\n"); pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
"twice on console to return to the boot prom\n");
} }
#endif #endif
#if defined(CONFIG_S390) #if defined(CONFIG_S390)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment