Commit 85e540be authored by Matt Redfearn's avatar Matt Redfearn Committed by Ralf Baechle

MIPS: pm-cps: Use MIPS standard lightweight ordering barrier

Since R2 of the MIPS architecture, SYNC(0x10) has been an optional but
architecturally defined ordering barrier. If a CPU does not implement it,
the arch specifies that it must fall back to SYNC(0).

In places where we require that the instruction stream not be reordered,
but do not require that loads / stores are gloablly completed, use the
defined standard sync stype.
Signed-off-by: default avatarMatt Redfearn <matt.redfearn@imgtec.com>
Reviewed-by: default avatarPaul Burton <paul.burton@imgtec.com>
Cc: Adam Buchbinder <adam.buchbinder@gmail.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/14221/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 6622ada3
...@@ -76,7 +76,6 @@ static struct uasm_reloc relocs[32] __initdata; ...@@ -76,7 +76,6 @@ static struct uasm_reloc relocs[32] __initdata;
/* CPU dependant sync types */ /* CPU dependant sync types */
static unsigned stype_intervention; static unsigned stype_intervention;
static unsigned stype_memory; static unsigned stype_memory;
static unsigned stype_ordering;
enum mips_reg { enum mips_reg {
zero, at, v0, v1, a0, a1, a2, a3, zero, at, v0, v1, a0, a1, a2, a3,
...@@ -406,7 +405,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) ...@@ -406,7 +405,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
if (coupled_coherence) { if (coupled_coherence) {
/* Increment ready_count */ /* Increment ready_count */
uasm_i_sync(&p, stype_ordering); uasm_i_sync(&p, STYPE_SYNC_MB);
uasm_build_label(&l, p, lbl_incready); uasm_build_label(&l, p, lbl_incready);
uasm_i_ll(&p, t1, 0, r_nc_count); uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, 1); uasm_i_addiu(&p, t2, t1, 1);
...@@ -415,7 +414,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) ...@@ -415,7 +414,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_i_addiu(&p, t1, t1, 1); uasm_i_addiu(&p, t1, t1, 1);
/* Barrier ensuring all CPUs see the updated r_nc_count value */ /* Barrier ensuring all CPUs see the updated r_nc_count value */
uasm_i_sync(&p, stype_ordering); uasm_i_sync(&p, STYPE_SYNC_MB);
/* /*
* If this is the last VPE to become ready for non-coherence * If this is the last VPE to become ready for non-coherence
...@@ -568,7 +567,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) ...@@ -568,7 +567,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
/* Decrement ready_count */ /* Decrement ready_count */
uasm_build_label(&l, p, lbl_decready); uasm_build_label(&l, p, lbl_decready);
uasm_i_sync(&p, stype_ordering); uasm_i_sync(&p, STYPE_SYNC_MB);
uasm_i_ll(&p, t1, 0, r_nc_count); uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, -1); uasm_i_addiu(&p, t2, t1, -1);
uasm_i_sc(&p, t2, 0, r_nc_count); uasm_i_sc(&p, t2, 0, r_nc_count);
...@@ -576,7 +575,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) ...@@ -576,7 +575,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
/* Barrier ensuring all CPUs see the updated r_nc_count value */ /* Barrier ensuring all CPUs see the updated r_nc_count value */
uasm_i_sync(&p, stype_ordering); uasm_i_sync(&p, STYPE_SYNC_MB);
} }
if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
...@@ -598,7 +597,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) ...@@ -598,7 +597,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_build_label(&l, p, lbl_secondary_cont); uasm_build_label(&l, p, lbl_secondary_cont);
/* Barrier ensuring all CPUs see the updated r_nc_count value */ /* Barrier ensuring all CPUs see the updated r_nc_count value */
uasm_i_sync(&p, stype_ordering); uasm_i_sync(&p, STYPE_SYNC_MB);
} }
/* The core is coherent, time to return to C code */ /* The core is coherent, time to return to C code */
...@@ -677,7 +676,6 @@ static int __init cps_pm_init(void) ...@@ -677,7 +676,6 @@ static int __init cps_pm_init(void)
case CPU_I6400: case CPU_I6400:
stype_intervention = 0x2; stype_intervention = 0x2;
stype_memory = 0x3; stype_memory = 0x3;
stype_ordering = 0x10;
break; break;
default: default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment