Commit cccf34e9 authored by Markos Chandras's avatar Markos Chandras Committed by Ralf Baechle

MIPS: c-r4k: Fix cache flushing for MT cores

MT_SMP is not the only SMP option for MT cores. The MT_SMP option
allows more than one VPE per core to appear as a secondary CPU in the
system. Because of how CM works, it propagates the address-based
cache ops to the secondary cores but not the index-based ones.
Because of that, the code does not use IPIs to flush the L1 caches on
secondary cores because the CM would have done that already. However,
the CM functionality is independent of the type of SMP kernel so even in
non-MT kernels, IPIs are not necessary. As a result of which, we change
the conditional to depend on the CM presence. Moreover, since VPEs on
the same core share the same L1 caches, there is no need to send an
IPI on all of them so we calculate a suitable cpumask with only one
VPE per core.
Signed-off-by: default avatarMarkos Chandras <markos.chandras@imgtec.com>
Cc: <stable@vger.kernel.org> # 3.15+
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/10654/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 1c885357
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
extern int smp_num_siblings; extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[]; extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map;
#define raw_smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
......
...@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map); ...@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __read_mostly; cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map); EXPORT_SYMBOL(cpu_core_map);
/*
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
*/
cpumask_t cpu_foreign_map __read_mostly;
EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */ /* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map; static cpumask_t cpu_sibling_setup_map;
...@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu) ...@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu)
} }
} }
/*
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
static inline void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
if (cpu_data[i].package == cpu_data[k].package &&
cpu_data[i].core == cpu_data[k].core)
core_present = 1;
if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
}
cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
}
struct plat_smp_ops *mp_ops; struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops); EXPORT_SYMBOL(mp_ops);
...@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void) ...@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
set_cpu_sibling_map(cpu); set_cpu_sibling_map(cpu);
set_cpu_core_map(cpu); set_cpu_core_map(cpu);
calculate_cpu_foreign_map();
cpumask_set_cpu(cpu, &cpu_callin_map); cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu); synchronise_count_slave(cpu);
...@@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void) ...@@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void)
static void stop_this_cpu(void *dummy) static void stop_this_cpu(void *dummy)
{ {
/* /*
* Remove this CPU: * Remove this CPU. Be a bit slow here and
* set the bits for every online CPU so we don't miss
* any IPI whilst taking this VPE down.
*/ */
cpumask_copy(&cpu_foreign_map, cpu_online_mask);
/* Make it visible to every other CPU */
smp_mb();
set_cpu_online(smp_processor_id(), false); set_cpu_online(smp_processor_id(), false);
calculate_cpu_foreign_map();
local_irq_disable(); local_irq_disable();
while (1); while (1);
} }
...@@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus); mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0); set_cpu_sibling_map(0);
set_cpu_core_map(0); set_cpu_core_map(0);
calculate_cpu_foreign_map();
#ifndef CONFIG_HOTPLUG_CPU #ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask); init_cpu_present(cpu_possible_mask);
#endif #endif
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/cacheflush.h> /* for run_uncached() */ #include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/dma-coherence.h> #include <asm/dma-coherence.h>
#include <asm/mips-cm.h>
/* /*
* Special Variant of smp_call_function for use by cache functions: * Special Variant of smp_call_function for use by cache functions:
...@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) ...@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
{ {
preempt_disable(); preempt_disable();
#ifndef CONFIG_MIPS_MT_SMP /*
smp_call_function(func, info, 1); * The Coherent Manager propagates address-based cache ops to other
#endif * cores but not index-based ops. However, r4k_on_each_cpu is used
* in both cases so there is no easy way to tell what kind of op is
* executed to the other cores. The best we can probably do is
* to restrict that call when a CM is not present because both
* CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
*/
if (!mips_cm_present())
smp_call_function_many(&cpu_foreign_map, func, info, 1);
func(info); func(info);
preempt_enable(); preempt_enable();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment