Commit 618e9ed9 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Hypervisor TSB context switching.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent aa9143b9
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include <asm/tsb.h> #include <asm/tsb.h>
#include <asm/hypervisor.h>
.text .text
.align 32 .align 32
...@@ -233,6 +234,7 @@ tsb_flush: ...@@ -233,6 +234,7 @@ tsb_flush:
* %o1: TSB register value * %o1: TSB register value
* %o2: TSB virtual address * %o2: TSB virtual address
* %o3: TSB mapping locked PTE * %o3: TSB mapping locked PTE
* %o4: Hypervisor TSB descriptor physical address
* *
* We have to run this whole thing with interrupts * We have to run this whole thing with interrupts
* disabled so that the current cpu doesn't change * disabled so that the current cpu doesn't change
...@@ -251,30 +253,40 @@ __tsb_context_switch: ...@@ -251,30 +253,40 @@ __tsb_context_switch:
add %g2, %g1, %g2 add %g2, %g1, %g2
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
661: mov TSB_REG, %g1 sethi %hi(tlb_type), %g1
stxa %o1, [%g1] ASI_DMMU lduw [%g1 + %lo(tlb_type)], %g1
.section .sun4v_2insn_patch, "ax" cmp %g1, 3
.word 661b bne,pt %icc, 1f
nop
/* Hypervisor TSB switch. */
mov SCRATCHPAD_UTSBREG1, %g1 mov SCRATCHPAD_UTSBREG1, %g1
stxa %o1, [%g1] ASI_SCRATCHPAD stxa %o1, [%g1] ASI_SCRATCHPAD
.previous mov -1, %g2
mov SCRATCHPAD_UTSBREG2, %g1
stxa %g2, [%g1] ASI_SCRATCHPAD
membar #Sync mov HV_FAST_MMU_TSB_CTXNON0, %o0
mov 1, %o1
mov %o4, %o2
ta HV_FAST_TRAP
661: stxa %o1, [%g1] ASI_IMMU ba,pt %xcc, 9f
membar #Sync
.section .sun4v_2insn_patch, "ax"
.word 661b
nop nop
nop
.previous
brz %o2, 9f /* SUN4U TSB switch. */
1: mov TSB_REG, %g1
stxa %o1, [%g1] ASI_DMMU
membar #Sync
stxa %o1, [%g1] ASI_IMMU
membar #Sync
2: brz %o2, 9f
nop nop
sethi %hi(sparc64_highest_unlocked_tlb_ent), %o4 sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2
mov TLB_TAG_ACCESS, %g1 mov TLB_TAG_ACCESS, %g1
lduw [%o4 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
stxa %o2, [%g1] ASI_DMMU stxa %o2, [%g1] ASI_DMMU
membar #Sync membar #Sync
sllx %g2, 3, %g2 sllx %g2, 3, %g2
......
...@@ -149,7 +149,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) ...@@ -149,7 +149,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
BUG(); BUG();
}; };
if (tlb_type == cheetah_plus) { if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
/* Physical mapping, no locked TLB entry for TSB. */ /* Physical mapping, no locked TLB entry for TSB. */
tsb_reg |= tsb_paddr; tsb_reg |= tsb_paddr;
...@@ -166,6 +166,52 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) ...@@ -166,6 +166,52 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
mm->context.tsb_map_pte = tte; mm->context.tsb_map_pte = tte;
} }
/* Setup the Hypervisor TSB descriptor. */
if (tlb_type == hypervisor) {
struct hv_tsb_descr *hp = &mm->context.tsb_descr;
switch (PAGE_SIZE) {
case 8192:
default:
hp->pgsz_idx = HV_PGSZ_IDX_8K;
break;
case 64 * 1024:
hp->pgsz_idx = HV_PGSZ_IDX_64K;
break;
case 512 * 1024:
hp->pgsz_idx = HV_PGSZ_IDX_512K;
break;
case 4 * 1024 * 1024:
hp->pgsz_idx = HV_PGSZ_IDX_4MB;
break;
};
hp->assoc = 1;
hp->num_ttes = tsb_bytes / 16;
hp->ctx_idx = 0;
switch (PAGE_SIZE) {
case 8192:
default:
hp->pgsz_mask = HV_PGSZ_MASK_8K;
break;
case 64 * 1024:
hp->pgsz_mask = HV_PGSZ_MASK_64K;
break;
case 512 * 1024:
hp->pgsz_mask = HV_PGSZ_MASK_512K;
break;
case 4 * 1024 * 1024:
hp->pgsz_mask = HV_PGSZ_MASK_4MB;
break;
};
hp->tsb_base = tsb_paddr;
hp->resv = 0;
}
} }
/* The page tables are locked against modifications while this /* The page tables are locked against modifications while this
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/const.h> #include <asm/const.h>
#include <asm/hypervisor.h>
/* /*
* For the 8k pagesize kernel, use only 10 hw context bits to optimize some * For the 8k pagesize kernel, use only 10 hw context bits to optimize some
...@@ -108,6 +109,7 @@ typedef struct { ...@@ -108,6 +109,7 @@ typedef struct {
unsigned long tsb_reg_val; unsigned long tsb_reg_val;
unsigned long tsb_map_vaddr; unsigned long tsb_map_vaddr;
unsigned long tsb_map_pte; unsigned long tsb_map_pte;
struct hv_tsb_descr tsb_descr;
} mm_context_t; } mm_context_t;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -22,14 +22,18 @@ extern void get_new_mmu_context(struct mm_struct *mm); ...@@ -22,14 +22,18 @@ extern void get_new_mmu_context(struct mm_struct *mm);
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm);
extern void __tsb_context_switch(unsigned long pgd_pa, unsigned long tsb_reg, extern void __tsb_context_switch(unsigned long pgd_pa,
unsigned long tsb_vaddr, unsigned long tsb_pte); unsigned long tsb_reg,
unsigned long tsb_vaddr,
unsigned long tsb_pte,
unsigned long tsb_descr_pa);
static inline void tsb_context_switch(struct mm_struct *mm) static inline void tsb_context_switch(struct mm_struct *mm)
{ {
__tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val, __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
mm->context.tsb_map_vaddr, mm->context.tsb_map_vaddr,
mm->context.tsb_map_pte); mm->context.tsb_map_pte,
__pa(&mm->context.tsb_descr));
} }
extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags); extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment