Commit e7b5771a authored by Marc Zyngier's avatar Marc Zyngier

Merge branch irq/riscv-ipi into irq/irqchip-next

* irq/riscv-ipi:
  : .
  : RISC-V IPI rework from Anup Patel:
  :
  : "This series aims to improve IPI support in Linux RISC-V in following ways:
  :  1) Treat IPIs as normal per-CPU interrupts instead of having custom RISC-V
  :     specific hooks. This also makes Linux RISC-V IPI support aligned with
  :     other architectures.
  :  2) Remote TLB flushes and icache flushes should prefer local IPIs instead
  :     of SBI calls whenever we have specialized hardware (such as RISC-V AIA
  :     IMSIC and RISC-V SWI) which allows S-mode software to directly inject
  :     IPIs without any assistance from M-mode runtime firmware."
  : .
  irqchip/riscv-intc: Add empty irq_eoi() for chained irq handlers
  RISC-V: Use IPIs for remote icache flush when possible
  RISC-V: Use IPIs for remote TLB flush when possible
  RISC-V: Allow marking IPIs as suitable for remote FENCEs
  RISC-V: Treat IPIs as normal Linux IRQs
  irqchip/riscv-intc: Allow drivers to directly discover INTC hwnode
  RISC-V: Clear SIP bit only when using SBI IPI operations
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents f39157b3 f8415f2d
...@@ -63,6 +63,8 @@ config RISCV ...@@ -63,6 +63,8 @@ config RISCV
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
select GENERIC_IDLE_POLL_SETUP select GENERIC_IDLE_POLL_SETUP
select GENERIC_IOREMAP if MMU select GENERIC_IOREMAP if MMU
select GENERIC_IRQ_IPI if SMP
select GENERIC_IRQ_IPI_MUX if SMP
select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL select GENERIC_IRQ_SHOW_LEVEL
......
...@@ -12,6 +12,10 @@ ...@@ -12,6 +12,10 @@
#include <asm-generic/irq.h> #include <asm-generic/irq.h>
void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void));
struct fwnode_handle *riscv_get_intc_hwnode(void);
extern void __init init_IRQ(void); extern void __init init_IRQ(void);
#endif /* _ASM_RISCV_IRQ_H */ #endif /* _ASM_RISCV_IRQ_H */
...@@ -271,8 +271,7 @@ long sbi_get_marchid(void); ...@@ -271,8 +271,7 @@ long sbi_get_marchid(void);
long sbi_get_mimpid(void); long sbi_get_mimpid(void);
void sbi_set_timer(uint64_t stime_value); void sbi_set_timer(uint64_t stime_value);
void sbi_shutdown(void); void sbi_shutdown(void);
void sbi_clear_ipi(void); void sbi_send_ipi(unsigned int cpu);
int sbi_send_ipi(const struct cpumask *cpu_mask);
int sbi_remote_fence_i(const struct cpumask *cpu_mask); int sbi_remote_fence_i(const struct cpumask *cpu_mask);
int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
unsigned long start, unsigned long start,
...@@ -335,4 +334,10 @@ unsigned long riscv_cached_mvendorid(unsigned int cpu_id); ...@@ -335,4 +334,10 @@ unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
unsigned long riscv_cached_marchid(unsigned int cpu_id); unsigned long riscv_cached_marchid(unsigned int cpu_id);
unsigned long riscv_cached_mimpid(unsigned int cpu_id); unsigned long riscv_cached_mimpid(unsigned int cpu_id);
#if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI)
void sbi_ipi_init(void);
#else
static inline void sbi_ipi_init(void) { }
#endif
#endif /* _ASM_RISCV_SBI_H */ #endif /* _ASM_RISCV_SBI_H */
...@@ -15,12 +15,10 @@ ...@@ -15,12 +15,10 @@
struct seq_file; struct seq_file;
extern unsigned long boot_cpu_hartid; extern unsigned long boot_cpu_hartid;
struct riscv_ipi_ops {
void (*ipi_inject)(const struct cpumask *target);
void (*ipi_clear)(void);
};
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#include <linux/jump_label.h>
/* /*
* Mapping between linux logical cpu index and hartid. * Mapping between linux logical cpu index and hartid.
*/ */
...@@ -33,9 +31,6 @@ void show_ipi_stats(struct seq_file *p, int prec); ...@@ -33,9 +31,6 @@ void show_ipi_stats(struct seq_file *p, int prec);
/* SMP initialization hook for setup_arch */ /* SMP initialization hook for setup_arch */
void __init setup_smp(void); void __init setup_smp(void);
/* Called from C code, this handles an IPI. */
void handle_IPI(struct pt_regs *regs);
/* Hook for the generic smp_call_function_many() routine. */ /* Hook for the generic smp_call_function_many() routine. */
void arch_send_call_function_ipi_mask(struct cpumask *mask); void arch_send_call_function_ipi_mask(struct cpumask *mask);
...@@ -44,11 +39,22 @@ void arch_send_call_function_single_ipi(int cpu); ...@@ -44,11 +39,22 @@ void arch_send_call_function_single_ipi(int cpu);
int riscv_hartid_to_cpuid(unsigned long hartid); int riscv_hartid_to_cpuid(unsigned long hartid);
/* Set custom IPI operations */ /* Enable IPI for CPU hotplug */
void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops); void riscv_ipi_enable(void);
/* Disable IPI for CPU hotplug */
void riscv_ipi_disable(void);
/* Clear IPI for current CPU */ /* Check if IPI interrupt numbers are available */
void riscv_clear_ipi(void); bool riscv_ipi_have_virq_range(void);
/* Set the IPI interrupt numbers for arch (called by irqchip drivers) */
void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence);
/* Check if we can use IPIs for remote FENCEs */
DECLARE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
#define riscv_use_ipi_for_rfence() \
static_branch_unlikely(&riscv_ipi_for_rfence)
/* Check other CPUs stop or not */ /* Check other CPUs stop or not */
bool smp_crash_stop_failed(void); bool smp_crash_stop_failed(void);
...@@ -85,14 +91,29 @@ static inline unsigned long cpuid_to_hartid_map(int cpu) ...@@ -85,14 +91,29 @@ static inline unsigned long cpuid_to_hartid_map(int cpu)
return boot_cpu_hartid; return boot_cpu_hartid;
} }
static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) static inline void riscv_ipi_enable(void)
{
}
static inline void riscv_ipi_disable(void)
{ {
} }
static inline void riscv_clear_ipi(void) static inline bool riscv_ipi_have_virq_range(void)
{
return false;
}
static inline void riscv_ipi_set_virq_range(int virq, int nr,
bool use_for_rfence)
{ {
} }
static inline bool riscv_use_ipi_for_rfence(void)
{
return false;
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP) #if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
......
...@@ -74,6 +74,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o ...@@ -74,6 +74,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
obj-$(CONFIG_RISCV_SBI) += sbi.o obj-$(CONFIG_RISCV_SBI) += sbi.o
ifeq ($(CONFIG_RISCV_SBI), y) ifeq ($(CONFIG_RISCV_SBI), y)
obj-$(CONFIG_SMP) += sbi-ipi.o
obj-$(CONFIG_SMP) += cpu_ops_sbi.o obj-$(CONFIG_SMP) += cpu_ops_sbi.o
endif endif
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/sbi.h> #include <asm/smp.h>
bool cpu_has_hotplug(unsigned int cpu) bool cpu_has_hotplug(unsigned int cpu)
{ {
...@@ -43,6 +43,7 @@ int __cpu_disable(void) ...@@ -43,6 +43,7 @@ int __cpu_disable(void)
remove_cpu_topology(cpu); remove_cpu_topology(cpu);
numa_remove_cpu(cpu); numa_remove_cpu(cpu);
set_cpu_online(cpu, false); set_cpu_online(cpu, false);
riscv_ipi_disable();
irq_migrate_all_off_this_cpu(); irq_migrate_all_off_this_cpu();
return ret; return ret;
......
...@@ -7,8 +7,26 @@ ...@@ -7,8 +7,26 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <asm/smp.h> #include <asm/sbi.h>
static struct fwnode_handle *(*__get_intc_node)(void);
void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void))
{
__get_intc_node = fn;
}
struct fwnode_handle *riscv_get_intc_hwnode(void)
{
if (__get_intc_node)
return __get_intc_node();
return NULL;
}
EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode);
int arch_show_interrupts(struct seq_file *p, int prec) int arch_show_interrupts(struct seq_file *p, int prec)
{ {
...@@ -21,4 +39,5 @@ void __init init_IRQ(void) ...@@ -21,4 +39,5 @@ void __init init_IRQ(void)
irqchip_init(); irqchip_init();
if (!handle_arch_irq) if (!handle_arch_irq)
panic("No interrupt controller found."); panic("No interrupt controller found.");
sbi_ipi_init();
} }
// SPDX-License-Identifier: GPL-2.0-only
/*
* Multiplex several IPIs over a single HW IPI.
*
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#define pr_fmt(fmt) "riscv: " fmt
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <asm/sbi.h>
static int sbi_ipi_virq;
static void sbi_ipi_handle(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
csr_clear(CSR_IP, IE_SIE);
ipi_mux_process();
chained_irq_exit(chip, desc);
}
static int sbi_ipi_starting_cpu(unsigned int cpu)
{
enable_percpu_irq(sbi_ipi_virq, irq_get_trigger_type(sbi_ipi_virq));
return 0;
}
void __init sbi_ipi_init(void)
{
int virq;
struct irq_domain *domain;
if (riscv_ipi_have_virq_range())
return;
domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
DOMAIN_BUS_ANY);
if (!domain) {
pr_err("unable to find INTC IRQ domain\n");
return;
}
sbi_ipi_virq = irq_create_mapping(domain, RV_IRQ_SOFT);
if (!sbi_ipi_virq) {
pr_err("unable to create INTC IRQ mapping\n");
return;
}
virq = ipi_mux_create(BITS_PER_BYTE, sbi_send_ipi);
if (virq <= 0) {
pr_err("unable to create muxed IPIs\n");
irq_dispose_mapping(sbi_ipi_virq);
return;
}
irq_set_chained_handler(sbi_ipi_virq, sbi_ipi_handle);
/*
* Don't disable IPI when CPU goes offline because
* the masking/unmasking of virtual IPIs is done
* via generic IPI-Mux
*/
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL);
riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, false);
pr_info("providing IPIs using SBI IPI extension\n");
}
...@@ -17,7 +17,7 @@ unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; ...@@ -17,7 +17,7 @@ unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
EXPORT_SYMBOL(sbi_spec_version); EXPORT_SYMBOL(sbi_spec_version);
static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init; static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init;
static int (*__sbi_send_ipi)(const struct cpumask *cpu_mask) __ro_after_init; static void (*__sbi_send_ipi)(unsigned int cpu) __ro_after_init;
static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask, static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size, unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5) __ro_after_init; unsigned long arg4, unsigned long arg5) __ro_after_init;
...@@ -130,17 +130,6 @@ void sbi_shutdown(void) ...@@ -130,17 +130,6 @@ void sbi_shutdown(void)
} }
EXPORT_SYMBOL(sbi_shutdown); EXPORT_SYMBOL(sbi_shutdown);
/**
* sbi_clear_ipi() - Clear any pending IPIs for the calling hart.
*
* Return: None
*/
void sbi_clear_ipi(void)
{
sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0);
}
EXPORT_SYMBOL(sbi_clear_ipi);
/** /**
* __sbi_set_timer_v01() - Program the timer for next timer event. * __sbi_set_timer_v01() - Program the timer for next timer event.
* @stime_value: The value after which next timer event should fire. * @stime_value: The value after which next timer event should fire.
...@@ -157,17 +146,12 @@ static void __sbi_set_timer_v01(uint64_t stime_value) ...@@ -157,17 +146,12 @@ static void __sbi_set_timer_v01(uint64_t stime_value)
#endif #endif
} }
static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask) static void __sbi_send_ipi_v01(unsigned int cpu)
{ {
unsigned long hart_mask; unsigned long hart_mask =
__sbi_v01_cpumask_to_hartmask(cpumask_of(cpu));
if (!cpu_mask || cpumask_empty(cpu_mask))
cpu_mask = cpu_online_mask;
hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask), sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask),
0, 0, 0, 0, 0); 0, 0, 0, 0, 0);
return 0;
} }
static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
...@@ -216,12 +200,10 @@ static void __sbi_set_timer_v01(uint64_t stime_value) ...@@ -216,12 +200,10 @@ static void __sbi_set_timer_v01(uint64_t stime_value)
sbi_major_version(), sbi_minor_version()); sbi_major_version(), sbi_minor_version());
} }
static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask) static void __sbi_send_ipi_v01(unsigned int cpu)
{ {
pr_warn("IPI extension is not available in SBI v%lu.%lu\n", pr_warn("IPI extension is not available in SBI v%lu.%lu\n",
sbi_major_version(), sbi_minor_version()); sbi_major_version(), sbi_minor_version());
return 0;
} }
static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
...@@ -248,55 +230,18 @@ static void __sbi_set_timer_v02(uint64_t stime_value) ...@@ -248,55 +230,18 @@ static void __sbi_set_timer_v02(uint64_t stime_value)
#endif #endif
} }
static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask) static void __sbi_send_ipi_v02(unsigned int cpu)
{ {
unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
struct sbiret ret = {0};
int result; int result;
struct sbiret ret = {0};
if (!cpu_mask || cpumask_empty(cpu_mask)) ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
cpu_mask = cpu_online_mask; 1UL, cpuid_to_hartid_map(cpu), 0, 0, 0, 0);
if (ret.error) {
for_each_cpu(cpuid, cpu_mask) { result = sbi_err_map_linux_errno(ret.error);
hartid = cpuid_to_hartid_map(cpuid); pr_err("%s: hbase = [%lu] failed (error [%d])\n",
if (hmask) { __func__, cpuid_to_hartid_map(cpu), result);
if (hartid + BITS_PER_LONG <= htop ||
hbase + BITS_PER_LONG <= hartid) {
ret = sbi_ecall(SBI_EXT_IPI,
SBI_EXT_IPI_SEND_IPI, hmask,
hbase, 0, 0, 0, 0);
if (ret.error)
goto ecall_failed;
hmask = 0;
} else if (hartid < hbase) {
/* shift the mask to fit lower hartid */
hmask <<= hbase - hartid;
hbase = hartid;
}
}
if (!hmask) {
hbase = hartid;
htop = hartid;
} else if (hartid > htop) {
htop = hartid;
}
hmask |= BIT(hartid - hbase);
}
if (hmask) {
ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
hmask, hbase, 0, 0, 0, 0);
if (ret.error)
goto ecall_failed;
} }
return 0;
ecall_failed:
result = sbi_err_map_linux_errno(ret.error);
pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
__func__, hbase, hmask, result);
return result;
} }
static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask, static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask,
...@@ -410,13 +355,11 @@ void sbi_set_timer(uint64_t stime_value) ...@@ -410,13 +355,11 @@ void sbi_set_timer(uint64_t stime_value)
/** /**
* sbi_send_ipi() - Send an IPI to any hart. * sbi_send_ipi() - Send an IPI to any hart.
* @cpu_mask: A cpu mask containing all the target harts. * @cpu: Logical id of the target CPU.
*
* Return: 0 on success, appropriate linux error code otherwise.
*/ */
int sbi_send_ipi(const struct cpumask *cpu_mask) void sbi_send_ipi(unsigned int cpu)
{ {
return __sbi_send_ipi(cpu_mask); __sbi_send_ipi(cpu);
} }
EXPORT_SYMBOL(sbi_send_ipi); EXPORT_SYMBOL(sbi_send_ipi);
...@@ -641,15 +584,6 @@ long sbi_get_mimpid(void) ...@@ -641,15 +584,6 @@ long sbi_get_mimpid(void)
} }
EXPORT_SYMBOL_GPL(sbi_get_mimpid); EXPORT_SYMBOL_GPL(sbi_get_mimpid);
static void sbi_send_cpumask_ipi(const struct cpumask *target)
{
sbi_send_ipi(target);
}
static const struct riscv_ipi_ops sbi_ipi_ops = {
.ipi_inject = sbi_send_cpumask_ipi
};
void __init sbi_init(void) void __init sbi_init(void)
{ {
int ret; int ret;
...@@ -696,6 +630,4 @@ void __init sbi_init(void) ...@@ -696,6 +630,4 @@ void __init sbi_init(void)
__sbi_send_ipi = __sbi_send_ipi_v01; __sbi_send_ipi = __sbi_send_ipi_v01;
__sbi_rfence = __sbi_rfence_v01; __sbi_rfence = __sbi_rfence_v01;
} }
riscv_set_ipi_ops(&sbi_ipi_ops);
} }
...@@ -13,14 +13,15 @@ ...@@ -13,14 +13,15 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/percpu.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/irq.h>
#include <linux/irq_work.h> #include <linux/irq_work.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
...@@ -44,11 +45,10 @@ void __init smp_setup_processor_id(void) ...@@ -44,11 +45,10 @@ void __init smp_setup_processor_id(void)
cpuid_to_hartid_map(0) = boot_cpu_hartid; cpuid_to_hartid_map(0) = boot_cpu_hartid;
} }
/* A collection of single bit ipi messages. */ static DEFINE_PER_CPU_READ_MOSTLY(int, ipi_dummy_dev);
static struct { static int ipi_virq_base __ro_after_init;
unsigned long stats[IPI_MAX] ____cacheline_aligned; static int nr_ipi __ro_after_init = IPI_MAX;
unsigned long bits ____cacheline_aligned; static struct irq_desc *ipi_desc[IPI_MAX] __read_mostly;
} ipi_data[NR_CPUS] __cacheline_aligned;
int riscv_hartid_to_cpuid(unsigned long hartid) int riscv_hartid_to_cpuid(unsigned long hartid)
{ {
...@@ -100,48 +100,14 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) ...@@ -100,48 +100,14 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
} }
#endif #endif
static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
{
ipi_ops = ops;
}
EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
void riscv_clear_ipi(void)
{
if (ipi_ops && ipi_ops->ipi_clear)
ipi_ops->ipi_clear();
csr_clear(CSR_IP, IE_SIE);
}
EXPORT_SYMBOL_GPL(riscv_clear_ipi);
static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
{ {
int cpu; __ipi_send_mask(ipi_desc[op], mask);
smp_mb__before_atomic();
for_each_cpu(cpu, mask)
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
if (ipi_ops && ipi_ops->ipi_inject)
ipi_ops->ipi_inject(mask);
else
pr_warn("SMP: IPI inject method not available\n");
} }
static void send_ipi_single(int cpu, enum ipi_message_type op) static void send_ipi_single(int cpu, enum ipi_message_type op)
{ {
smp_mb__before_atomic(); __ipi_send_mask(ipi_desc[op], cpumask_of(cpu));
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
if (ipi_ops && ipi_ops->ipi_inject)
ipi_ops->ipi_inject(cpumask_of(cpu));
else
pr_warn("SMP: IPI inject method not available\n");
} }
#ifdef CONFIG_IRQ_WORK #ifdef CONFIG_IRQ_WORK
...@@ -151,59 +117,98 @@ void arch_irq_work_raise(void) ...@@ -151,59 +117,98 @@ void arch_irq_work_raise(void)
} }
#endif #endif
void handle_IPI(struct pt_regs *regs) static irqreturn_t handle_IPI(int irq, void *data)
{ {
unsigned int cpu = smp_processor_id(); int ipi = irq - ipi_virq_base;
unsigned long *pending_ipis = &ipi_data[cpu].bits;
unsigned long *stats = ipi_data[cpu].stats; switch (ipi) {
case IPI_RESCHEDULE:
scheduler_ipi();
break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
case IPI_CPU_STOP:
ipi_stop();
break;
case IPI_CPU_CRASH_STOP:
ipi_cpu_crash_stop(smp_processor_id(), get_irq_regs());
break;
case IPI_IRQ_WORK:
irq_work_run();
break;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
tick_receive_broadcast();
break;
#endif
default:
pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi);
break;
}
riscv_clear_ipi(); return IRQ_HANDLED;
}
while (true) { void riscv_ipi_enable(void)
unsigned long ops; {
int i;
/* Order bit clearing and data access. */ if (WARN_ON_ONCE(!ipi_virq_base))
mb(); return;
ops = xchg(pending_ipis, 0); for (i = 0; i < nr_ipi; i++)
if (ops == 0) enable_percpu_irq(ipi_virq_base + i, 0);
return; }
if (ops & (1 << IPI_RESCHEDULE)) { void riscv_ipi_disable(void)
stats[IPI_RESCHEDULE]++; {
scheduler_ipi(); int i;
}
if (ops & (1 << IPI_CALL_FUNC)) { if (WARN_ON_ONCE(!ipi_virq_base))
stats[IPI_CALL_FUNC]++; return;
generic_smp_call_function_interrupt();
}
if (ops & (1 << IPI_CPU_STOP)) { for (i = 0; i < nr_ipi; i++)
stats[IPI_CPU_STOP]++; disable_percpu_irq(ipi_virq_base + i);
ipi_stop(); }
}
if (ops & (1 << IPI_CPU_CRASH_STOP)) { bool riscv_ipi_have_virq_range(void)
ipi_cpu_crash_stop(cpu, get_irq_regs()); {
} return (ipi_virq_base) ? true : false;
}
if (ops & (1 << IPI_IRQ_WORK)) { DEFINE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
stats[IPI_IRQ_WORK]++; EXPORT_SYMBOL_GPL(riscv_ipi_for_rfence);
irq_work_run();
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
if (ops & (1 << IPI_TIMER)) { {
stats[IPI_TIMER]++; int i, err;
tick_receive_broadcast();
} if (WARN_ON(ipi_virq_base))
#endif return;
BUG_ON((ops >> IPI_MAX) != 0);
WARN_ON(nr < IPI_MAX);
nr_ipi = min(nr, IPI_MAX);
ipi_virq_base = virq;
/* Order data access and bit testing. */ /* Request IPIs */
mb(); for (i = 0; i < nr_ipi; i++) {
err = request_percpu_irq(ipi_virq_base + i, handle_IPI,
"IPI", &ipi_dummy_dev);
WARN_ON(err);
ipi_desc[i] = irq_to_desc(ipi_virq_base + i);
irq_set_status_flags(ipi_virq_base + i, IRQ_HIDDEN);
} }
/* Enabled IPIs for boot CPU immediately */
riscv_ipi_enable();
/* Update RFENCE static key */
if (use_for_rfence)
static_branch_enable(&riscv_ipi_for_rfence);
else
static_branch_disable(&riscv_ipi_for_rfence);
} }
static const char * const ipi_names[] = { static const char * const ipi_names[] = {
...@@ -223,7 +228,7 @@ void show_ipi_stats(struct seq_file *p, int prec) ...@@ -223,7 +228,7 @@ void show_ipi_stats(struct seq_file *p, int prec)
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : ""); prec >= 4 ? " " : "");
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]); seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
seq_printf(p, " %s\n", ipi_names[i]); seq_printf(p, " %s\n", ipi_names[i]);
} }
} }
......
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/sbi.h>
#include <asm/smp.h> #include <asm/smp.h>
#include "head.h" #include "head.h"
...@@ -158,12 +157,12 @@ asmlinkage __visible void smp_callin(void) ...@@ -158,12 +157,12 @@ asmlinkage __visible void smp_callin(void)
struct mm_struct *mm = &init_mm; struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id(); unsigned int curr_cpuid = smp_processor_id();
riscv_clear_ipi();
/* All kernel threads share the same mm context. */ /* All kernel threads share the same mm context. */
mmgrab(mm); mmgrab(mm);
current->active_mm = mm; current->active_mm = mm;
riscv_ipi_enable();
store_cpu_topology(curr_cpuid); store_cpu_topology(curr_cpuid);
notify_cpu_starting(curr_cpuid); notify_cpu_starting(curr_cpuid);
numa_add_cpu(curr_cpuid); numa_add_cpu(curr_cpuid);
......
...@@ -19,7 +19,7 @@ void flush_icache_all(void) ...@@ -19,7 +19,7 @@ void flush_icache_all(void)
{ {
local_flush_icache_all(); local_flush_icache_all();
if (IS_ENABLED(CONFIG_RISCV_SBI)) if (IS_ENABLED(CONFIG_RISCV_SBI) && !riscv_use_ipi_for_rfence())
sbi_remote_fence_i(NULL); sbi_remote_fence_i(NULL);
else else
on_each_cpu(ipi_remote_fence_i, NULL, 1); on_each_cpu(ipi_remote_fence_i, NULL, 1);
...@@ -67,7 +67,8 @@ void flush_icache_mm(struct mm_struct *mm, bool local) ...@@ -67,7 +67,8 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
* with flush_icache_deferred(). * with flush_icache_deferred().
*/ */
smp_mb(); smp_mb();
} else if (IS_ENABLED(CONFIG_RISCV_SBI)) { } else if (IS_ENABLED(CONFIG_RISCV_SBI) &&
!riscv_use_ipi_for_rfence()) {
sbi_remote_fence_i(&others); sbi_remote_fence_i(&others);
} else { } else {
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
......
...@@ -23,14 +23,62 @@ static inline void local_flush_tlb_page_asid(unsigned long addr, ...@@ -23,14 +23,62 @@ static inline void local_flush_tlb_page_asid(unsigned long addr,
: "memory"); : "memory");
} }
static inline void local_flush_tlb_range(unsigned long start,
unsigned long size, unsigned long stride)
{
if (size <= stride)
local_flush_tlb_page(start);
else
local_flush_tlb_all();
}
static inline void local_flush_tlb_range_asid(unsigned long start,
unsigned long size, unsigned long stride, unsigned long asid)
{
if (size <= stride)
local_flush_tlb_page_asid(start, asid);
else
local_flush_tlb_all_asid(asid);
}
static void __ipi_flush_tlb_all(void *info)
{
local_flush_tlb_all();
}
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
sbi_remote_sfence_vma(NULL, 0, -1); if (riscv_use_ipi_for_rfence())
on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
else
sbi_remote_sfence_vma(NULL, 0, -1);
}
struct flush_tlb_range_data {
unsigned long asid;
unsigned long start;
unsigned long size;
unsigned long stride;
};
static void __ipi_flush_tlb_range_asid(void *info)
{
struct flush_tlb_range_data *d = info;
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
static void __ipi_flush_tlb_range(void *info)
{
struct flush_tlb_range_data *d = info;
local_flush_tlb_range(d->start, d->size, d->stride);
} }
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride) unsigned long size, unsigned long stride)
{ {
struct flush_tlb_range_data ftd;
struct cpumask *cmask = mm_cpumask(mm); struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid; unsigned int cpuid;
bool broadcast; bool broadcast;
...@@ -45,19 +93,34 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, ...@@ -45,19 +93,34 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask; unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
if (broadcast) { if (broadcast) {
sbi_remote_sfence_vma_asid(cmask, start, size, asid); if (riscv_use_ipi_for_rfence()) {
} else if (size <= stride) { ftd.asid = asid;
local_flush_tlb_page_asid(start, asid); ftd.start = start;
ftd.size = size;
ftd.stride = stride;
on_each_cpu_mask(cmask,
__ipi_flush_tlb_range_asid,
&ftd, 1);
} else
sbi_remote_sfence_vma_asid(cmask,
start, size, asid);
} else { } else {
local_flush_tlb_all_asid(asid); local_flush_tlb_range_asid(start, size, stride, asid);
} }
} else { } else {
if (broadcast) { if (broadcast) {
sbi_remote_sfence_vma(cmask, start, size); if (riscv_use_ipi_for_rfence()) {
} else if (size <= stride) { ftd.asid = 0;
local_flush_tlb_page(start); ftd.start = start;
ftd.size = size;
ftd.stride = stride;
on_each_cpu_mask(cmask,
__ipi_flush_tlb_range,
&ftd, 1);
} else
sbi_remote_sfence_vma(cmask, start, size);
} else { } else {
local_flush_tlb_all(); local_flush_tlb_range(start, size, stride);
} }
} }
...@@ -66,23 +129,23 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, ...@@ -66,23 +129,23 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
{ {
__sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE); __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
} }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{ {
__sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE); __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
} }
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end) unsigned long end)
{ {
__sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE); __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end) unsigned long end)
{ {
__sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE); __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
} }
#endif #endif
...@@ -17,6 +17,9 @@ ...@@ -17,6 +17,9 @@
#include <linux/sched_clock.h> #include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/timex.h> #include <linux/timex.h>
...@@ -31,6 +34,7 @@ ...@@ -31,6 +34,7 @@
/* CLINT manages IPI and Timer for RISC-V M-mode */ /* CLINT manages IPI and Timer for RISC-V M-mode */
static u32 __iomem *clint_ipi_base; static u32 __iomem *clint_ipi_base;
static unsigned int clint_ipi_irq;
static u64 __iomem *clint_timer_cmp; static u64 __iomem *clint_timer_cmp;
static u64 __iomem *clint_timer_val; static u64 __iomem *clint_timer_val;
static unsigned long clint_timer_freq; static unsigned long clint_timer_freq;
...@@ -41,12 +45,10 @@ u64 __iomem *clint_time_val; ...@@ -41,12 +45,10 @@ u64 __iomem *clint_time_val;
EXPORT_SYMBOL(clint_time_val); EXPORT_SYMBOL(clint_time_val);
#endif #endif
static void clint_send_ipi(const struct cpumask *target) #ifdef CONFIG_SMP
static void clint_send_ipi(unsigned int cpu)
{ {
unsigned int cpu; writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
for_each_cpu(cpu, target)
writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
} }
static void clint_clear_ipi(void) static void clint_clear_ipi(void)
...@@ -54,10 +56,18 @@ static void clint_clear_ipi(void) ...@@ -54,10 +56,18 @@ static void clint_clear_ipi(void)
writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id())); writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
} }
static struct riscv_ipi_ops clint_ipi_ops = { static void clint_ipi_interrupt(struct irq_desc *desc)
.ipi_inject = clint_send_ipi, {
.ipi_clear = clint_clear_ipi, struct irq_chip *chip = irq_desc_get_chip(desc);
};
chained_irq_enter(chip, desc);
clint_clear_ipi();
ipi_mux_process();
chained_irq_exit(chip, desc);
}
#endif
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define clint_get_cycles() readq_relaxed(clint_timer_val) #define clint_get_cycles() readq_relaxed(clint_timer_val)
...@@ -125,12 +135,19 @@ static int clint_timer_starting_cpu(unsigned int cpu) ...@@ -125,12 +135,19 @@ static int clint_timer_starting_cpu(unsigned int cpu)
enable_percpu_irq(clint_timer_irq, enable_percpu_irq(clint_timer_irq,
irq_get_trigger_type(clint_timer_irq)); irq_get_trigger_type(clint_timer_irq));
enable_percpu_irq(clint_ipi_irq,
irq_get_trigger_type(clint_ipi_irq));
return 0; return 0;
} }
static int clint_timer_dying_cpu(unsigned int cpu) static int clint_timer_dying_cpu(unsigned int cpu)
{ {
disable_percpu_irq(clint_timer_irq); disable_percpu_irq(clint_timer_irq);
/*
* Don't disable IPI when CPU goes offline because
* the masking/unmasking of virtual IPIs is done
* via generic IPI-Mux
*/
return 0; return 0;
} }
...@@ -170,6 +187,12 @@ static int __init clint_timer_init_dt(struct device_node *np) ...@@ -170,6 +187,12 @@ static int __init clint_timer_init_dt(struct device_node *np)
return -ENODEV; return -ENODEV;
} }
/* Find parent irq domain and map ipi irq */
if (!clint_ipi_irq &&
oirq.args[0] == RV_IRQ_SOFT &&
irq_find_host(oirq.np))
clint_ipi_irq = irq_of_parse_and_map(np, i);
/* Find parent irq domain and map timer irq */ /* Find parent irq domain and map timer irq */
if (!clint_timer_irq && if (!clint_timer_irq &&
oirq.args[0] == RV_IRQ_TIMER && oirq.args[0] == RV_IRQ_TIMER &&
...@@ -177,9 +200,9 @@ static int __init clint_timer_init_dt(struct device_node *np) ...@@ -177,9 +200,9 @@ static int __init clint_timer_init_dt(struct device_node *np)
clint_timer_irq = irq_of_parse_and_map(np, i); clint_timer_irq = irq_of_parse_and_map(np, i);
} }
/* If CLINT timer irq not found then fail */ /* If CLINT ipi or timer irq not found then fail */
if (!clint_timer_irq) { if (!clint_ipi_irq || !clint_timer_irq) {
pr_err("%pOFP: timer irq not found\n", np); pr_err("%pOFP: ipi/timer irq not found\n", np);
return -ENODEV; return -ENODEV;
} }
...@@ -219,6 +242,19 @@ static int __init clint_timer_init_dt(struct device_node *np) ...@@ -219,6 +242,19 @@ static int __init clint_timer_init_dt(struct device_node *np)
goto fail_iounmap; goto fail_iounmap;
} }
#ifdef CONFIG_SMP
rc = ipi_mux_create(BITS_PER_BYTE, clint_send_ipi);
if (rc <= 0) {
pr_err("unable to create muxed IPIs\n");
rc = (rc < 0) ? rc : -ENODEV;
goto fail_free_irq;
}
irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt);
riscv_ipi_set_virq_range(rc, BITS_PER_BYTE, true);
clint_clear_ipi();
#endif
rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING, rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
"clockevents/clint/timer:starting", "clockevents/clint/timer:starting",
clint_timer_starting_cpu, clint_timer_starting_cpu,
...@@ -228,13 +264,10 @@ static int __init clint_timer_init_dt(struct device_node *np) ...@@ -228,13 +264,10 @@ static int __init clint_timer_init_dt(struct device_node *np)
goto fail_free_irq; goto fail_free_irq;
} }
riscv_set_ipi_ops(&clint_ipi_ops);
clint_clear_ipi();
return 0; return 0;
fail_free_irq: fail_free_irq:
free_irq(clint_timer_irq, &clint_clock_event); free_percpu_irq(clint_timer_irq, &clint_clock_event);
fail_iounmap: fail_iounmap:
iounmap(base); iounmap(base);
return rc; return rc;
......
...@@ -537,6 +537,7 @@ config TI_PRUSS_INTC ...@@ -537,6 +537,7 @@ config TI_PRUSS_INTC
config RISCV_INTC config RISCV_INTC
bool bool
depends on RISCV depends on RISCV
select IRQ_DOMAIN_HIERARCHY
config SIFIVE_PLIC config SIFIVE_PLIC
bool bool
......
...@@ -26,20 +26,7 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs) ...@@ -26,20 +26,7 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
if (unlikely(cause >= BITS_PER_LONG)) if (unlikely(cause >= BITS_PER_LONG))
panic("unexpected interrupt cause"); panic("unexpected interrupt cause");
switch (cause) { generic_handle_domain_irq(intc_domain, cause);
#ifdef CONFIG_SMP
case RV_IRQ_SOFT:
/*
* We only use software interrupts to pass IPIs, so if a
* non-SMP system gets one, then we don't know what to do.
*/
handle_IPI(regs);
break;
#endif
default:
generic_handle_domain_irq(intc_domain, cause);
break;
}
} }
/* /*
...@@ -59,22 +46,27 @@ static void riscv_intc_irq_unmask(struct irq_data *d) ...@@ -59,22 +46,27 @@ static void riscv_intc_irq_unmask(struct irq_data *d)
csr_set(CSR_IE, BIT(d->hwirq)); csr_set(CSR_IE, BIT(d->hwirq));
} }
static int riscv_intc_cpu_starting(unsigned int cpu) static void riscv_intc_irq_eoi(struct irq_data *d)
{
csr_set(CSR_IE, BIT(RV_IRQ_SOFT));
return 0;
}
static int riscv_intc_cpu_dying(unsigned int cpu)
{ {
csr_clear(CSR_IE, BIT(RV_IRQ_SOFT)); /*
return 0; * The RISC-V INTC driver uses handle_percpu_devid_irq() flow
* for the per-HART local interrupts and child irqchip drivers
* (such as PLIC, SBI IPI, CLINT, APLIC, IMSIC, etc) implement
* chained handlers for the per-HART local interrupts.
*
* In the absence of irq_eoi(), the chained_irq_enter() and
* chained_irq_exit() functions (used by child irqchip drivers)
* will do unnecessary mask/unmask of per-HART local interrupts
* at the time of handling interrupts. To avoid this, we provide
* an empty irq_eoi() callback for RISC-V INTC irqchip.
*/
} }
static struct irq_chip riscv_intc_chip = { static struct irq_chip riscv_intc_chip = {
.name = "RISC-V INTC", .name = "RISC-V INTC",
.irq_mask = riscv_intc_irq_mask, .irq_mask = riscv_intc_irq_mask,
.irq_unmask = riscv_intc_irq_unmask, .irq_unmask = riscv_intc_irq_unmask,
.irq_eoi = riscv_intc_irq_eoi,
}; };
static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq, static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
...@@ -87,11 +79,39 @@ static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq, ...@@ -87,11 +79,39 @@ static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
return 0; return 0;
} }
static int riscv_intc_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *arg)
{
int i, ret;
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
struct irq_fwspec *fwspec = arg;
ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
if (ret)
return ret;
for (i = 0; i < nr_irqs; i++) {
ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
if (ret)
return ret;
}
return 0;
}
static const struct irq_domain_ops riscv_intc_domain_ops = { static const struct irq_domain_ops riscv_intc_domain_ops = {
.map = riscv_intc_domain_map, .map = riscv_intc_domain_map,
.xlate = irq_domain_xlate_onecell, .xlate = irq_domain_xlate_onecell,
.alloc = riscv_intc_domain_alloc
}; };
static struct fwnode_handle *riscv_intc_hwnode(void)
{
return intc_domain->fwnode;
}
static int __init riscv_intc_init(struct device_node *node, static int __init riscv_intc_init(struct device_node *node,
struct device_node *parent) struct device_node *parent)
{ {
...@@ -126,10 +146,7 @@ static int __init riscv_intc_init(struct device_node *node, ...@@ -126,10 +146,7 @@ static int __init riscv_intc_init(struct device_node *node,
return rc; return rc;
} }
cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_STARTING, riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
"irqchip/riscv/intc:starting",
riscv_intc_cpu_starting,
riscv_intc_cpu_dying);
pr_info("%d local interrupts mapped\n", BITS_PER_LONG); pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment