Commit 7777b939 authored by Jayachandran C's avatar Jayachandran C Committed by Ralf Baechle

MIPS: Allow platform specific scratch registers

XLR/XLP COP0 scratch is register 22, sel 0-7. Add a function
c0_kscratch() which returns the scratch register for the platform,
and use the return value while generating TLB handlers.

Setup kscratch_mask to 0xf for XLR/XLP since the config4 register
does not exist. This allows the kernel to allocate scratch registers
0-3 if needed.
Signed-off-by: default avatarJayachandran C <jchandra@broadcom.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5445/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 8ecd0837
...@@ -959,6 +959,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) ...@@ -959,6 +959,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
set_isa(c, MIPS_CPU_ISA_M64R1); set_isa(c, MIPS_CPU_ISA_M64R1);
c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
} }
c->kscratch_mask = 0xf;
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
......
...@@ -305,6 +305,17 @@ static int check_for_high_segbits __cpuinitdata; ...@@ -305,6 +305,17 @@ static int check_for_high_segbits __cpuinitdata;
static unsigned int kscratch_used_mask __cpuinitdata; static unsigned int kscratch_used_mask __cpuinitdata;
static inline int __maybe_unused c0_kscratch(void)
{
switch (current_cpu_type()) {
case CPU_XLP:
case CPU_XLR:
return 22;
default:
return 31;
}
}
static int __cpuinit allocate_kscratch(void) static int __cpuinit allocate_kscratch(void)
{ {
int r; int r;
...@@ -336,7 +347,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p) ...@@ -336,7 +347,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p)
if (scratch_reg > 0) { if (scratch_reg > 0) {
/* Save in CPU local C0_KScratch? */ /* Save in CPU local C0_KScratch? */
UASM_i_MTC0(p, 1, 31, scratch_reg); UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
r.r1 = K0; r.r1 = K0;
r.r2 = K1; r.r2 = K1;
r.r3 = 1; r.r3 = 1;
...@@ -385,7 +396,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p) ...@@ -385,7 +396,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p)
static void __cpuinit build_restore_work_registers(u32 **p) static void __cpuinit build_restore_work_registers(u32 **p)
{ {
if (scratch_reg > 0) { if (scratch_reg > 0) {
UASM_i_MFC0(p, 1, 31, scratch_reg); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
return; return;
} }
/* K0 already points to save area, restore $1 and $2 */ /* K0 already points to save area, restore $1 and $2 */
...@@ -674,7 +685,7 @@ static __cpuinit void build_restore_pagemask(u32 **p, ...@@ -674,7 +685,7 @@ static __cpuinit void build_restore_pagemask(u32 **p,
uasm_il_b(p, r, lid); uasm_il_b(p, r, lid);
} }
if (scratch_reg > 0) if (scratch_reg > 0)
UASM_i_MFC0(p, 1, 31, scratch_reg); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else else
UASM_i_LW(p, 1, scratchpad_offset(0), 0); UASM_i_LW(p, 1, scratchpad_offset(0), 0);
} else { } else {
...@@ -817,7 +828,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, ...@@ -817,7 +828,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
if (pgd_reg != -1) { if (pgd_reg != -1) {
/* pgd is in pgd_reg */ /* pgd is in pgd_reg */
UASM_i_MFC0(p, ptr, 31, pgd_reg); UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
} else { } else {
/* /*
* &pgd << 11 stored in CONTEXT [23..63]. * &pgd << 11 stored in CONTEXT [23..63].
...@@ -930,7 +941,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, ...@@ -930,7 +941,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
if (mode == refill_scratch) { if (mode == refill_scratch) {
if (scratch_reg > 0) if (scratch_reg > 0)
UASM_i_MFC0(p, 1, 31, scratch_reg); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else else
UASM_i_LW(p, 1, scratchpad_offset(0), 0); UASM_i_LW(p, 1, scratchpad_offset(0), 0);
} else { } else {
...@@ -1096,7 +1107,7 @@ struct mips_huge_tlb_info { ...@@ -1096,7 +1107,7 @@ struct mips_huge_tlb_info {
static struct mips_huge_tlb_info __cpuinit static struct mips_huge_tlb_info __cpuinit
build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp, struct uasm_reloc **r, unsigned int tmp,
unsigned int ptr, int c0_scratch) unsigned int ptr, int c0_scratch_reg)
{ {
struct mips_huge_tlb_info rv; struct mips_huge_tlb_info rv;
unsigned int even, odd; unsigned int even, odd;
...@@ -1110,12 +1121,12 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, ...@@ -1110,12 +1121,12 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
UASM_i_MFC0(p, tmp, C0_BADVADDR); UASM_i_MFC0(p, tmp, C0_BADVADDR);
if (pgd_reg != -1) if (pgd_reg != -1)
UASM_i_MFC0(p, ptr, 31, pgd_reg); UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
else else
UASM_i_MFC0(p, ptr, C0_CONTEXT); UASM_i_MFC0(p, ptr, C0_CONTEXT);
if (c0_scratch >= 0) if (c0_scratch_reg >= 0)
UASM_i_MTC0(p, scratch, 31, c0_scratch); UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
else else
UASM_i_SW(p, scratch, scratchpad_offset(0), 0); UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
...@@ -1130,14 +1141,14 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, ...@@ -1130,14 +1141,14 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
} }
} else { } else {
if (pgd_reg != -1) if (pgd_reg != -1)
UASM_i_MFC0(p, ptr, 31, pgd_reg); UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
else else
UASM_i_MFC0(p, ptr, C0_CONTEXT); UASM_i_MFC0(p, ptr, C0_CONTEXT);
UASM_i_MFC0(p, tmp, C0_BADVADDR); UASM_i_MFC0(p, tmp, C0_BADVADDR);
if (c0_scratch >= 0) if (c0_scratch_reg >= 0)
UASM_i_MTC0(p, scratch, 31, c0_scratch); UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
else else
UASM_i_SW(p, scratch, scratchpad_offset(0), 0); UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
...@@ -1242,8 +1253,8 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, ...@@ -1242,8 +1253,8 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
} }
UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
if (c0_scratch >= 0) { if (c0_scratch_reg >= 0) {
UASM_i_MFC0(p, scratch, 31, c0_scratch); UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
build_tlb_write_entry(p, l, r, tlb_random); build_tlb_write_entry(p, l, r, tlb_random);
uasm_l_leave(l, *p); uasm_l_leave(l, *p);
rv.restore_scratch = 1; rv.restore_scratch = 1;
...@@ -1490,7 +1501,7 @@ static void __cpuinit build_r4000_setup_pgd(void) ...@@ -1490,7 +1501,7 @@ static void __cpuinit build_r4000_setup_pgd(void)
} else { } else {
/* PGD in c0_KScratch */ /* PGD in c0_KScratch */
uasm_i_jr(&p, 31); uasm_i_jr(&p, 31);
UASM_i_MTC0(&p, a0, 31, pgd_reg); UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
} }
if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)) if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
panic("tlbmiss_handler_setup_pgd_array space exceeded"); panic("tlbmiss_handler_setup_pgd_array space exceeded");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment