Commit dc892fb4 authored by Samuel Holland's avatar Samuel Holland Committed by Palmer Dabbelt

riscv: Use IPIs for remote cache/TLB flushes by default

An IPI backend is always required in an SMP configuration, but an SBI
implementation is not. For example, SBI will be unavailable when the
kernel runs in M mode. For this reason, consider IPI delivery of cache
and TLB flushes to be the base case, and any other implementation (such
as the SBI remote fence extension) to be an optimization.

Generally, if IPIs can be delivered without firmware assistance, they
are assumed to be faster than SBI calls due to the SBI context switch
overhead. However, when SBI is used as the IPI backend, then the context
switch cost must be paid anyway, and performing the cache/TLB flush
directly in the SBI implementation is more efficient than injecting an
interrupt to S-mode. This is the only existing scenario where
riscv_ipi_set_virq_range() is called with use_for_rfence set to false.

sbi_ipi_init() already checks riscv_ipi_have_virq_range(), so it only
calls riscv_ipi_set_virq_range() when no other IPI device is available.
This allows moving the static key and dropping the use_for_rfence
parameter. This decouples the static key from the irqchip driver probe
order.

Furthermore, the static branch only makes sense when CONFIG_RISCV_SBI is
enabled. Optherwise, IPIs must be used. Add a fallback definition of
riscv_use_sbi_for_rfence() which handles this case and removes the need
to check CONFIG_RISCV_SBI elsewhere, such as in cacheflush.c.
Reviewed-by: default avatarAnup Patel <anup@brainfault.org>
Signed-off-by: default avatarSamuel Holland <samuel.holland@sifive.com>
Reviewed-by: default avatarAlexandre Ghiti <alexghiti@rivosinc.com>
Link: https://lore.kernel.org/r/20240327045035.368512-4-samuel.holland@sifive.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent aaa56c8f
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define _ASM_RISCV_PGALLOC_H #define _ASM_RISCV_PGALLOC_H
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/sbi.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
...@@ -17,10 +18,10 @@ ...@@ -17,10 +18,10 @@
static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt) static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
{ {
if (riscv_use_ipi_for_rfence()) if (riscv_use_sbi_for_rfence())
tlb_remove_page_ptdesc(tlb, pt);
else
tlb_remove_ptdesc(tlb, pt); tlb_remove_ptdesc(tlb, pt);
else
tlb_remove_page_ptdesc(tlb, pt);
} }
static inline void pmd_populate_kernel(struct mm_struct *mm, static inline void pmd_populate_kernel(struct mm_struct *mm,
......
...@@ -375,8 +375,12 @@ unsigned long riscv_cached_marchid(unsigned int cpu_id); ...@@ -375,8 +375,12 @@ unsigned long riscv_cached_marchid(unsigned int cpu_id);
unsigned long riscv_cached_mimpid(unsigned int cpu_id); unsigned long riscv_cached_mimpid(unsigned int cpu_id);
#if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI) #if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI)
DECLARE_STATIC_KEY_FALSE(riscv_sbi_for_rfence);
#define riscv_use_sbi_for_rfence() \
static_branch_unlikely(&riscv_sbi_for_rfence)
void sbi_ipi_init(void); void sbi_ipi_init(void);
#else #else
static inline bool riscv_use_sbi_for_rfence(void) { return false; }
static inline void sbi_ipi_init(void) { } static inline void sbi_ipi_init(void) { }
#endif #endif
......
...@@ -49,12 +49,7 @@ void riscv_ipi_disable(void); ...@@ -49,12 +49,7 @@ void riscv_ipi_disable(void);
bool riscv_ipi_have_virq_range(void); bool riscv_ipi_have_virq_range(void);
/* Set the IPI interrupt numbers for arch (called by irqchip drivers) */ /* Set the IPI interrupt numbers for arch (called by irqchip drivers) */
void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence); void riscv_ipi_set_virq_range(int virq, int nr);
/* Check if we can use IPIs for remote FENCEs */
DECLARE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
#define riscv_use_ipi_for_rfence() \
static_branch_unlikely(&riscv_ipi_for_rfence)
/* Check other CPUs stop or not */ /* Check other CPUs stop or not */
bool smp_crash_stop_failed(void); bool smp_crash_stop_failed(void);
...@@ -104,16 +99,10 @@ static inline bool riscv_ipi_have_virq_range(void) ...@@ -104,16 +99,10 @@ static inline bool riscv_ipi_have_virq_range(void)
return false; return false;
} }
static inline void riscv_ipi_set_virq_range(int virq, int nr, static inline void riscv_ipi_set_virq_range(int virq, int nr)
bool use_for_rfence)
{ {
} }
static inline bool riscv_use_ipi_for_rfence(void)
{
return false;
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP) #if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
......
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <asm/sbi.h> #include <asm/sbi.h>
DEFINE_STATIC_KEY_FALSE(riscv_sbi_for_rfence);
EXPORT_SYMBOL_GPL(riscv_sbi_for_rfence);
static int sbi_ipi_virq; static int sbi_ipi_virq;
static void sbi_ipi_handle(struct irq_desc *desc) static void sbi_ipi_handle(struct irq_desc *desc)
...@@ -72,6 +75,12 @@ void __init sbi_ipi_init(void) ...@@ -72,6 +75,12 @@ void __init sbi_ipi_init(void)
"irqchip/sbi-ipi:starting", "irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL); sbi_ipi_starting_cpu, NULL);
riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, false); riscv_ipi_set_virq_range(virq, BITS_PER_BYTE);
pr_info("providing IPIs using SBI IPI extension\n"); pr_info("providing IPIs using SBI IPI extension\n");
/*
* Use the SBI remote fence extension to avoid
* the extra context switch needed to handle IPIs.
*/
static_branch_enable(&riscv_sbi_for_rfence);
} }
...@@ -171,10 +171,7 @@ bool riscv_ipi_have_virq_range(void) ...@@ -171,10 +171,7 @@ bool riscv_ipi_have_virq_range(void)
return (ipi_virq_base) ? true : false; return (ipi_virq_base) ? true : false;
} }
DEFINE_STATIC_KEY_FALSE(riscv_ipi_for_rfence); void riscv_ipi_set_virq_range(int virq, int nr)
EXPORT_SYMBOL_GPL(riscv_ipi_for_rfence);
void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
{ {
int i, err; int i, err;
...@@ -197,12 +194,6 @@ void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence) ...@@ -197,12 +194,6 @@ void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
/* Enabled IPIs for boot CPU immediately */ /* Enabled IPIs for boot CPU immediately */
riscv_ipi_enable(); riscv_ipi_enable();
/* Update RFENCE static key */
if (use_for_rfence)
static_branch_enable(&riscv_ipi_for_rfence);
else
static_branch_disable(&riscv_ipi_for_rfence);
} }
static const char * const ipi_names[] = { static const char * const ipi_names[] = {
......
...@@ -21,7 +21,7 @@ void flush_icache_all(void) ...@@ -21,7 +21,7 @@ void flush_icache_all(void)
{ {
local_flush_icache_all(); local_flush_icache_all();
if (IS_ENABLED(CONFIG_RISCV_SBI) && !riscv_use_ipi_for_rfence()) if (riscv_use_sbi_for_rfence())
sbi_remote_fence_i(NULL); sbi_remote_fence_i(NULL);
else else
on_each_cpu(ipi_remote_fence_i, NULL, 1); on_each_cpu(ipi_remote_fence_i, NULL, 1);
...@@ -69,8 +69,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local) ...@@ -69,8 +69,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
* with flush_icache_deferred(). * with flush_icache_deferred().
*/ */
smp_mb(); smp_mb();
} else if (IS_ENABLED(CONFIG_RISCV_SBI) && } else if (riscv_use_sbi_for_rfence()) {
!riscv_use_ipi_for_rfence()) {
sbi_remote_fence_i(&others); sbi_remote_fence_i(&others);
} else { } else {
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
......
...@@ -79,10 +79,10 @@ static void __ipi_flush_tlb_all(void *info) ...@@ -79,10 +79,10 @@ static void __ipi_flush_tlb_all(void *info)
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
if (riscv_use_ipi_for_rfence()) if (riscv_use_sbi_for_rfence())
on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
else
sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID); sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
else
on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
} }
struct flush_tlb_range_data { struct flush_tlb_range_data {
...@@ -103,7 +103,6 @@ static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid, ...@@ -103,7 +103,6 @@ static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
unsigned long start, unsigned long size, unsigned long start, unsigned long size,
unsigned long stride) unsigned long stride)
{ {
struct flush_tlb_range_data ftd;
bool broadcast; bool broadcast;
if (cpumask_empty(cmask)) if (cpumask_empty(cmask))
...@@ -119,20 +118,18 @@ static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid, ...@@ -119,20 +118,18 @@ static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
broadcast = true; broadcast = true;
} }
if (broadcast) { if (!broadcast) {
if (riscv_use_ipi_for_rfence()) {
ftd.asid = asid;
ftd.start = start;
ftd.size = size;
ftd.stride = stride;
on_each_cpu_mask(cmask,
__ipi_flush_tlb_range_asid,
&ftd, 1);
} else
sbi_remote_sfence_vma_asid(cmask,
start, size, asid);
} else {
local_flush_tlb_range_asid(start, size, stride, asid); local_flush_tlb_range_asid(start, size, stride, asid);
} else if (riscv_use_sbi_for_rfence()) {
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
} else {
struct flush_tlb_range_data ftd;
ftd.asid = asid;
ftd.start = start;
ftd.size = size;
ftd.stride = stride;
on_each_cpu_mask(cmask, __ipi_flush_tlb_range_asid, &ftd, 1);
} }
if (cmask != cpu_online_mask) if (cmask != cpu_online_mask)
......
...@@ -251,7 +251,7 @@ static int __init clint_timer_init_dt(struct device_node *np) ...@@ -251,7 +251,7 @@ static int __init clint_timer_init_dt(struct device_node *np)
} }
irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt); irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt);
riscv_ipi_set_virq_range(rc, BITS_PER_BYTE, true); riscv_ipi_set_virq_range(rc, BITS_PER_BYTE);
clint_clear_ipi(); clint_clear_ipi();
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment