Commit d6d04819 authored by Daniel Hellstrom's avatar Daniel Hellstrom Committed by David S. Miller

sparc32: implement SMP IPIs using the generic functions

The current sparc32 SMP IPI generation is implemented the
cross call function. The cross call function uses IRQ15 the
NMI, this is has the effect that IPIs will interrupt IRQ
critical areas and hang the system. Typically on/after
spin_lock_irqsave calls can be aborted.

The cross call functionality must still exist to flush
cache/TLBS.

This patch provides CPU models a custom way to implement
generation of IPIs on the generic code's request. The
typical approach is to generate an IRQ for each IPI case.

After this patch each sparc32 SMP CPU model needs to
implement IPIs in order to function properly.
Signed-off-by: default avatarDaniel Hellstrom <daniel@gaisler.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2645e721
......@@ -28,7 +28,7 @@ config SPARC
select HAVE_GENERIC_HARDIRQS
select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IRQ_SHOW
select USE_GENERIC_SMP_HELPERS if SMP
config SPARC32
def_bool !64BIT
......@@ -47,7 +47,6 @@ config SPARC64
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_SYSCALL_TRACEPOINTS
select USE_GENERIC_SMP_HELPERS if SMP
select RTC_DRV_CMOS
select RTC_DRV_BQ4802
select RTC_DRV_SUN4V
......
......@@ -16,6 +16,10 @@ typedef struct {
unsigned long clock_tick;
unsigned int multiplier;
unsigned int counter;
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
#endif
int prom_node;
int mid;
int next;
......@@ -23,5 +27,6 @@ typedef struct {
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#define local_cpu_data() __get_cpu_var(__cpu_data)
#endif /* _SPARC_CPUDATA_H */
......@@ -50,12 +50,19 @@ void smp_callin(void);
void smp_boot_cpus(void);
void smp_store_cpu_info(int);
void smp_resched_interrupt(void);
void smp_call_function_single_interrupt(void);
void smp_call_function_interrupt(void);
struct seq_file;
void smp_bogo(struct seq_file *);
void smp_info(struct seq_file *);
BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
BTFIXUPDEF_CALL(void, smp_ipi_resched, int);
BTFIXUPDEF_CALL(void, smp_ipi_single, int);
BTFIXUPDEF_CALL(void, smp_ipi_mask_one, int);
BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
BTFIXUPDEF_BLACKBOX(load_current)
......@@ -73,19 +80,8 @@ static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4)
{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); }
static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
{
xc1((smpfunc_t)func, (unsigned long)info);
return 0;
}
static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
void *info, int wait)
{
smp_cross_call((smpfunc_t)func, cpumask_of_cpu(cpuid),
(unsigned long) info, 0, 0, 0);
return 0;
}
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
static inline int cpu_logical_map(int cpu)
{
......
......@@ -206,6 +206,16 @@ int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
#ifdef CONFIG_SMP
seq_printf(p, "RES: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_data(j).irq_resched_count);
seq_printf(p, " IPI rescheduling interrupts\n");
seq_printf(p, "CAL: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_data(j).irq_call_count);
seq_printf(p, " IPI function call interrupts\n");
#endif
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_data(j).counter);
......
......@@ -123,13 +123,58 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
void smp_send_reschedule(int cpu)
{
/* See sparc64 */
/*
* CPU model dependent way of implementing IPI generation targeting
* a single CPU. The trap handler needs only to do trap entry/return
* to call schedule.
*/
BTFIXUP_CALL(smp_ipi_resched)(cpu);
}
void smp_send_stop(void)
{
}
void arch_send_call_function_single_ipi(int cpu)
{
/* trigger one IPI single call on one CPU */
BTFIXUP_CALL(smp_ipi_single)(cpu);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
int cpu;
/* trigger IPI mask call on each CPU */
for_each_cpu(cpu, mask)
BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
}
void smp_resched_interrupt(void)
{
local_cpu_data().irq_resched_count++;
/*
* do nothing, since it all was about calling re-schedule
* routine called by interrupt return code.
*/
}
void smp_call_function_single_interrupt(void)
{
irq_enter();
generic_smp_call_function_single_interrupt();
local_cpu_data().irq_call_count++;
irq_exit();
}
void smp_call_function_interrupt(void)
{
irq_enter();
generic_smp_call_function_interrupt();
local_cpu_data().irq_call_count++;
irq_exit();
}
void smp_flush_cache_all(void)
{
xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment