Commit 5e9d13bd authored by Jiaxun Yang's avatar Jiaxun Yang Committed by Thomas Bogendoerfer

MIPS: Allows relocation exception vectors everywhere

Now the exception vector for CPS systems are allocated on-fly
with memblock as well.

It will try to allocate from KSEG1 first, and then try to allocate
in low 4G if possible.

The main reset vector is now generated by uasm, to avoid tons
of patches to the code. Other vectors are copied to the location
later.

    move 64bits fix in an other patch
    fix cache issue with mips_cps_core_entry
    rewrite the patch to reduce the diff stat
    move extern in header
    use cache address for copying vector

gc: use the new macro CKSEG[0A1]DDR_OR_64BIT()
Signed-off-by: default avatarJiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: default avatarGregory CLEMENT <gregory.clement@bootlin.com>
Signed-off-by: default avatarThomas Bogendoerfer <tsbogend@alpha.franken.de>
parent 3391b95c
...@@ -311,6 +311,7 @@ GCR_CX_ACCESSOR_RW(32, 0x018, other) ...@@ -311,6 +311,7 @@ GCR_CX_ACCESSOR_RW(32, 0x018, other)
/* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */ /* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */
GCR_CX_ACCESSOR_RW(32, 0x020, reset_base) GCR_CX_ACCESSOR_RW(32, 0x020, reset_base)
#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12) #define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12)
#define CM_GCR_Cx_RESET_BASE_MODE BIT(1)
/* GCR_Cx_ID - Identify the current core */ /* GCR_Cx_ID - Identify the current core */
GCR_CX_ACCESSOR_RO(32, 0x028, id) GCR_CX_ACCESSOR_RO(32, 0x028, id)
......
...@@ -24,7 +24,7 @@ struct core_boot_config { ...@@ -24,7 +24,7 @@ struct core_boot_config {
extern struct core_boot_config *mips_cps_core_bootcfg; extern struct core_boot_config *mips_cps_core_bootcfg;
extern void mips_cps_core_entry(void); extern void mips_cps_core_boot(int cca, void __iomem *gcr_base);
extern void mips_cps_core_init(void); extern void mips_cps_core_init(void);
extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe); extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe);
...@@ -32,7 +32,12 @@ extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe); ...@@ -32,7 +32,12 @@ extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe);
extern void mips_cps_pm_save(void); extern void mips_cps_pm_save(void);
extern void mips_cps_pm_restore(void); extern void mips_cps_pm_restore(void);
extern void *mips_cps_core_entry_patch_end; extern void excep_tlbfill(void);
extern void excep_xtlbfill(void);
extern void excep_cache(void);
extern void excep_genex(void);
extern void excep_intex(void);
extern void excep_ejtag(void);
#ifdef CONFIG_MIPS_CPS #ifdef CONFIG_MIPS_CPS
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Author: Paul Burton <paul.burton@mips.com> * Author: Paul Burton <paul.burton@mips.com>
*/ */
#include <linux/init.h>
#include <asm/addrspace.h> #include <asm/addrspace.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -82,39 +83,10 @@ ...@@ -82,39 +83,10 @@
.endm .endm
.balign 0x1000 LEAF(mips_cps_core_boot)
/* Save CCA and GCR base */
LEAF(mips_cps_core_entry) move s0, a0
/* move s1, a1
* These first several instructions will be patched by cps_smp_setup to load the
* CCA to use into register s0 and GCR base address to register s1.
*/
.rept CPS_ENTRY_PATCH_INSNS
nop
.endr
.global mips_cps_core_entry_patch_end
mips_cps_core_entry_patch_end:
/* Check whether we're here due to an NMI */
mfc0 k0, CP0_STATUS
and k0, k0, ST0_NMI
beqz k0, not_nmi
nop
/* This is an NMI */
PTR_LA k0, nmi_handler
jr k0
nop
not_nmi:
/* Setup Cause */
li t0, CAUSEF_IV
mtc0 t0, CP0_CAUSE
/* Setup Status */
li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
mtc0 t0, CP0_STATUS
/* We don't know how to do coherence setup on earlier ISA */ /* We don't know how to do coherence setup on earlier ISA */
#if MIPS_ISA_REV > 0 #if MIPS_ISA_REV > 0
...@@ -178,49 +150,45 @@ not_nmi: ...@@ -178,49 +150,45 @@ not_nmi:
PTR_L sp, VPEBOOTCFG_SP(v1) PTR_L sp, VPEBOOTCFG_SP(v1)
jr t1 jr t1
nop nop
END(mips_cps_core_entry) END(mips_cps_core_boot)
.org 0x200 __INIT
LEAF(excep_tlbfill) LEAF(excep_tlbfill)
DUMP_EXCEP("TLB Fill") DUMP_EXCEP("TLB Fill")
b . b .
nop nop
END(excep_tlbfill) END(excep_tlbfill)
.org 0x280
LEAF(excep_xtlbfill) LEAF(excep_xtlbfill)
DUMP_EXCEP("XTLB Fill") DUMP_EXCEP("XTLB Fill")
b . b .
nop nop
END(excep_xtlbfill) END(excep_xtlbfill)
.org 0x300
LEAF(excep_cache) LEAF(excep_cache)
DUMP_EXCEP("Cache") DUMP_EXCEP("Cache")
b . b .
nop nop
END(excep_cache) END(excep_cache)
.org 0x380
LEAF(excep_genex) LEAF(excep_genex)
DUMP_EXCEP("General") DUMP_EXCEP("General")
b . b .
nop nop
END(excep_genex) END(excep_genex)
.org 0x400
LEAF(excep_intex) LEAF(excep_intex)
DUMP_EXCEP("Interrupt") DUMP_EXCEP("Interrupt")
b . b .
nop nop
END(excep_intex) END(excep_intex)
.org 0x480
LEAF(excep_ejtag) LEAF(excep_ejtag)
PTR_LA k0, ejtag_debug_handler PTR_LA k0, ejtag_debug_handler
jr k0 jr k0
nop nop
END(excep_ejtag) END(excep_ejtag)
__FINIT
LEAF(mips_cps_core_init) LEAF(mips_cps_core_init)
#ifdef CONFIG_MIPS_MT_SMP #ifdef CONFIG_MIPS_MT_SMP
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/memblock.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/sched/hotplug.h> #include <linux/sched/hotplug.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -20,12 +21,24 @@ ...@@ -20,12 +21,24 @@
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/pm-cps.h> #include <asm/pm-cps.h>
#include <asm/r4kcache.h> #include <asm/r4kcache.h>
#include <asm/regdef.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/smp-cps.h> #include <asm/smp-cps.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/uasm.h> #include <asm/uasm.h>
#define BEV_VEC_SIZE 0x500
#define BEV_VEC_ALIGN 0x1000
enum label_id {
label_not_nmi = 1,
};
UASM_L_LA(_not_nmi)
static DECLARE_BITMAP(core_power, NR_CPUS); static DECLARE_BITMAP(core_power, NR_CPUS);
static uint32_t core_entry_reg;
static phys_addr_t cps_vec_pa;
struct core_boot_config *mips_cps_core_bootcfg; struct core_boot_config *mips_cps_core_bootcfg;
...@@ -34,10 +47,100 @@ static unsigned __init core_vpe_count(unsigned int cluster, unsigned core) ...@@ -34,10 +47,100 @@ static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
return min(smp_max_threads, mips_cps_numvps(cluster, core)); return min(smp_max_threads, mips_cps_numvps(cluster, core));
} }
static void __init *mips_cps_build_core_entry(void *addr)
{
extern void (*nmi_handler)(void);
u32 *p = addr;
u32 val;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
uasm_i_mfc0(&p, GPR_K0, C0_STATUS);
UASM_i_LA(&p, GPR_T9, ST0_NMI);
uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9);
uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi);
uasm_i_nop(&p);
UASM_i_LA(&p, GPR_K0, (long)&nmi_handler);
uasm_l_not_nmi(&l, p);
val = CAUSEF_IV;
uasm_i_lui(&p, GPR_K0, val >> 16);
uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
uasm_i_mtc0(&p, GPR_K0, C0_CAUSE);
val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64;
uasm_i_lui(&p, GPR_K0, val >> 16);
uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK);
UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base);
#if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT)
UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot)));
#else
UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot)));
#endif
uasm_i_jr(&p, GPR_T9);
uasm_i_nop(&p);
uasm_resolve_relocs(relocs, labels);
return p;
}
static int __init allocate_cps_vecs(void)
{
/* Try to allocate in KSEG1 first */
cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
0x0, CSEGX_SIZE - 1);
if (cps_vec_pa)
core_entry_reg = CKSEG1ADDR(cps_vec_pa) &
CM_GCR_Cx_RESET_BASE_BEVEXCBASE;
if (!cps_vec_pa && mips_cm_is64) {
cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
0x0, SZ_4G - 1);
if (cps_vec_pa)
core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) |
CM_GCR_Cx_RESET_BASE_MODE;
}
if (!cps_vec_pa)
return -ENOMEM;
return 0;
}
static void __init setup_cps_vecs(void)
{
void *cps_vec;
cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa);
mips_cps_build_core_entry(cps_vec);
memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80);
memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80);
memcpy(cps_vec + 0x300, &excep_cache, 0x80);
memcpy(cps_vec + 0x380, &excep_genex, 0x80);
memcpy(cps_vec + 0x400, &excep_intex, 0x80);
memcpy(cps_vec + 0x480, &excep_ejtag, 0x80);
/* Make sure no prefetched data in cache */
blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE);
bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE);
__sync();
}
static void __init cps_smp_setup(void) static void __init cps_smp_setup(void)
{ {
unsigned int nclusters, ncores, nvpes, core_vpes; unsigned int nclusters, ncores, nvpes, core_vpes;
unsigned long core_entry;
int cl, c, v; int cl, c, v;
/* Detect & record VPE topology */ /* Detect & record VPE topology */
...@@ -94,10 +197,11 @@ static void __init cps_smp_setup(void) ...@@ -94,10 +197,11 @@ static void __init cps_smp_setup(void)
/* Make core 0 coherent with everything */ /* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff); write_gcr_cl_coherence(0xff);
if (mips_cm_revision() >= CM_REV_CM3) { if (allocate_cps_vecs())
core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); pr_err("Failed to allocate CPS vectors\n");
write_gcr_bev_base(core_entry);
} if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3)
write_gcr_bev_base(core_entry_reg);
#ifdef CONFIG_MIPS_MT_FPAFF #ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */ /* If we have an FPU, enroll ourselves in the FPU-full mask */
...@@ -110,10 +214,14 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) ...@@ -110,10 +214,14 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
{ {
unsigned ncores, core_vpes, c, cca; unsigned ncores, core_vpes, c, cca;
bool cca_unsuitable, cores_limited; bool cca_unsuitable, cores_limited;
u32 *entry_code;
mips_mt_set_cpuoptions(); mips_mt_set_cpuoptions();
if (!core_entry_reg) {
pr_err("core_entry address unsuitable, disabling smp-cps\n");
goto err_out;
}
/* Detect whether the CCA is unsuited to multi-core SMP */ /* Detect whether the CCA is unsuited to multi-core SMP */
cca = read_c0_config() & CONF_CM_CMASK; cca = read_c0_config() & CONF_CM_CMASK;
switch (cca) { switch (cca) {
...@@ -145,20 +253,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) ...@@ -145,20 +253,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
cpu_has_dc_aliases ? "dcache aliasing" : ""); cpu_has_dc_aliases ? "dcache aliasing" : "");
/* setup_cps_vecs();
* Patch the start of mips_cps_core_entry to provide:
*
* s0 = kseg0 CCA
*/
entry_code = (u32 *)&mips_cps_core_entry;
uasm_i_addiu(&entry_code, 16, 0, cca);
UASM_i_LA(&entry_code, 17, (long)mips_gcr_base);
BUG_ON((void *)entry_code > (void *)&mips_cps_core_entry_patch_end);
blast_dcache_range((unsigned long)&mips_cps_core_entry,
(unsigned long)entry_code);
bc_wback_inv((unsigned long)&mips_cps_core_entry,
(void *)entry_code - (void *)&mips_cps_core_entry);
__sync();
/* Allocate core boot configuration structs */ /* Allocate core boot configuration structs */
ncores = mips_cps_numcores(0); ncores = mips_cps_numcores(0);
...@@ -213,7 +308,7 @@ static void boot_core(unsigned int core, unsigned int vpe_id) ...@@ -213,7 +308,7 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
/* Set its reset vector */ /* Set its reset vector */
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); write_gcr_co_reset_base(core_entry_reg);
/* Ensure its coherency is disabled */ /* Ensure its coherency is disabled */
write_gcr_co_coherence(0); write_gcr_co_coherence(0);
...@@ -290,7 +385,6 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle) ...@@ -290,7 +385,6 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
unsigned long core_entry;
unsigned int remote; unsigned int remote;
int err; int err;
...@@ -314,8 +408,7 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle) ...@@ -314,8 +408,7 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
if (cpu_has_vp) { if (cpu_has_vp) {
mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); write_gcr_co_reset_base(core_entry_reg);
write_gcr_co_reset_base(core_entry);
mips_cm_unlock_other(); mips_cm_unlock_other();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment