Commit 5634d9c2 authored by Palmer Dabbelt's avatar Palmer Dabbelt

Merge patch series "riscv: CPU operations cleanup"

Samuel Holland <samuel.holland@sifive.com> says:

This series cleans up some duplicated and dead code around the RISC-V
CPU operations, that was copied from arm64 but is not needed here. The
result is a bit of memory savings and removal of a few SBI calls during
boot, with no functional change.

* b4-shazam-merge:
  riscv: Use the same CPU operations for all CPUs
  riscv: Remove unused members from struct cpu_operations
  riscv: Deduplicate code in setup_smp()

Link: https://lore.kernel.org/r/20231121234736.3489608-1-samuel.holland@sifive.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parents b7b4e4d7 62ff2622
...@@ -13,33 +13,23 @@ ...@@ -13,33 +13,23 @@
/** /**
* struct cpu_operations - Callback operations for hotplugging CPUs. * struct cpu_operations - Callback operations for hotplugging CPUs.
* *
* @name: Name of the boot protocol.
* @cpu_prepare: Early one-time preparation step for a cpu. If there
* is a mechanism for doing so, tests whether it is
* possible to boot the given HART.
* @cpu_start: Boots a cpu into the kernel. * @cpu_start: Boots a cpu into the kernel.
* @cpu_disable: Prepares a cpu to die. May fail for some
* mechanism-specific reason, which will cause the hot
* unplug to be aborted. Called from the cpu to be killed.
* @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from * @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from
* the cpu being stopped. * the cpu being stopped.
* @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another * @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another
* cpu. * cpu.
*/ */
struct cpu_operations { struct cpu_operations {
const char *name;
int (*cpu_prepare)(unsigned int cpu);
int (*cpu_start)(unsigned int cpu, int (*cpu_start)(unsigned int cpu,
struct task_struct *tidle); struct task_struct *tidle);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int (*cpu_disable)(unsigned int cpu);
void (*cpu_stop)(void); void (*cpu_stop)(void);
int (*cpu_is_stopped)(unsigned int cpu); int (*cpu_is_stopped)(unsigned int cpu);
#endif #endif
}; };
extern const struct cpu_operations cpu_ops_spinwait; extern const struct cpu_operations cpu_ops_spinwait;
extern const struct cpu_operations *cpu_ops[NR_CPUS]; extern const struct cpu_operations *cpu_ops;
void __init cpu_set_ops(int cpu); void __init cpu_set_ops(void);
#endif /* ifndef __ASM_CPU_OPS_H */ #endif /* ifndef __ASM_CPU_OPS_H */
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
bool cpu_has_hotplug(unsigned int cpu) bool cpu_has_hotplug(unsigned int cpu)
{ {
if (cpu_ops[cpu]->cpu_stop) if (cpu_ops->cpu_stop)
return true; return true;
return false; return false;
...@@ -29,25 +29,18 @@ bool cpu_has_hotplug(unsigned int cpu) ...@@ -29,25 +29,18 @@ bool cpu_has_hotplug(unsigned int cpu)
*/ */
int __cpu_disable(void) int __cpu_disable(void)
{ {
int ret = 0;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop) if (!cpu_ops->cpu_stop)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (cpu_ops[cpu]->cpu_disable)
ret = cpu_ops[cpu]->cpu_disable(cpu);
if (ret)
return ret;
remove_cpu_topology(cpu); remove_cpu_topology(cpu);
numa_remove_cpu(cpu); numa_remove_cpu(cpu);
set_cpu_online(cpu, false); set_cpu_online(cpu, false);
riscv_ipi_disable(); riscv_ipi_disable();
irq_migrate_all_off_this_cpu(); irq_migrate_all_off_this_cpu();
return ret; return 0;
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
...@@ -62,8 +55,8 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) ...@@ -62,8 +55,8 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
pr_notice("CPU%u: off\n", cpu); pr_notice("CPU%u: off\n", cpu);
/* Verify from the firmware if the cpu is really stopped*/ /* Verify from the firmware if the cpu is really stopped*/
if (cpu_ops[cpu]->cpu_is_stopped) if (cpu_ops->cpu_is_stopped)
ret = cpu_ops[cpu]->cpu_is_stopped(cpu); ret = cpu_ops->cpu_is_stopped(cpu);
if (ret) if (ret)
pr_warn("CPU%d may not have stopped: %d\n", cpu, ret); pr_warn("CPU%d may not have stopped: %d\n", cpu, ret);
} }
...@@ -77,7 +70,7 @@ void __noreturn arch_cpu_idle_dead(void) ...@@ -77,7 +70,7 @@ void __noreturn arch_cpu_idle_dead(void)
cpuhp_ap_report_dead(); cpuhp_ap_report_dead();
cpu_ops[smp_processor_id()]->cpu_stop(); cpu_ops->cpu_stop();
/* It should never reach here */ /* It should never reach here */
BUG(); BUG();
} }
......
...@@ -13,25 +13,21 @@ ...@@ -13,25 +13,21 @@
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/smp.h> #include <asm/smp.h>
const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; const struct cpu_operations *cpu_ops __ro_after_init = &cpu_ops_spinwait;
extern const struct cpu_operations cpu_ops_sbi; extern const struct cpu_operations cpu_ops_sbi;
#ifndef CONFIG_RISCV_BOOT_SPINWAIT #ifndef CONFIG_RISCV_BOOT_SPINWAIT
const struct cpu_operations cpu_ops_spinwait = { const struct cpu_operations cpu_ops_spinwait = {
.name = "",
.cpu_prepare = NULL,
.cpu_start = NULL, .cpu_start = NULL,
}; };
#endif #endif
void __init cpu_set_ops(int cpuid) void __init cpu_set_ops(void)
{ {
#if IS_ENABLED(CONFIG_RISCV_SBI) #if IS_ENABLED(CONFIG_RISCV_SBI)
if (sbi_probe_extension(SBI_EXT_HSM)) { if (sbi_probe_extension(SBI_EXT_HSM)) {
if (!cpuid) pr_info("SBI HSM extension detected\n");
pr_info("SBI HSM extension detected\n"); cpu_ops = &cpu_ops_sbi;
cpu_ops[cpuid] = &cpu_ops_sbi; }
} else
#endif #endif
cpu_ops[cpuid] = &cpu_ops_spinwait;
} }
...@@ -79,23 +79,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle) ...@@ -79,23 +79,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
return sbi_hsm_hart_start(hartid, boot_addr, hsm_data); return sbi_hsm_hart_start(hartid, boot_addr, hsm_data);
} }
static int sbi_cpu_prepare(unsigned int cpuid)
{
if (!cpu_ops_sbi.cpu_start) {
pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
return -ENODEV;
}
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int sbi_cpu_disable(unsigned int cpuid)
{
if (!cpu_ops_sbi.cpu_stop)
return -EOPNOTSUPP;
return 0;
}
static void sbi_cpu_stop(void) static void sbi_cpu_stop(void)
{ {
int ret; int ret;
...@@ -118,11 +102,8 @@ static int sbi_cpu_is_stopped(unsigned int cpuid) ...@@ -118,11 +102,8 @@ static int sbi_cpu_is_stopped(unsigned int cpuid)
#endif #endif
const struct cpu_operations cpu_ops_sbi = { const struct cpu_operations cpu_ops_sbi = {
.name = "sbi",
.cpu_prepare = sbi_cpu_prepare,
.cpu_start = sbi_cpu_start, .cpu_start = sbi_cpu_start,
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = sbi_cpu_disable,
.cpu_stop = sbi_cpu_stop, .cpu_stop = sbi_cpu_stop,
.cpu_is_stopped = sbi_cpu_is_stopped, .cpu_is_stopped = sbi_cpu_is_stopped,
#endif #endif
......
...@@ -39,15 +39,6 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid, ...@@ -39,15 +39,6 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle); WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
} }
static int spinwait_cpu_prepare(unsigned int cpuid)
{
if (!cpu_ops_spinwait.cpu_start) {
pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
return -ENODEV;
}
return 0;
}
static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle) static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
{ {
/* /*
...@@ -64,7 +55,5 @@ static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle) ...@@ -64,7 +55,5 @@ static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
} }
const struct cpu_operations cpu_ops_spinwait = { const struct cpu_operations cpu_ops_spinwait = {
.name = "spinwait",
.cpu_prepare = spinwait_cpu_prepare,
.cpu_start = spinwait_cpu_start, .cpu_start = spinwait_cpu_start,
}; };
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/csr.h> #include <asm/csr.h>
#include <asm/cpu_ops_sbi.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <asm/image.h> #include <asm/image.h>
#include <asm/scs.h> #include <asm/scs.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/early_ioremap.h> #include <asm/early_ioremap.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/setup.h> #include <asm/setup.h>
......
...@@ -81,7 +81,7 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) ...@@ -81,7 +81,7 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
if (cpu_has_hotplug(cpu)) if (cpu_has_hotplug(cpu))
cpu_ops[cpu]->cpu_stop(); cpu_ops->cpu_stop();
#endif #endif
for(;;) for(;;)
......
...@@ -49,7 +49,6 @@ void __init smp_prepare_boot_cpu(void) ...@@ -49,7 +49,6 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
int cpuid; int cpuid;
int ret;
unsigned int curr_cpuid; unsigned int curr_cpuid;
init_cpu_topology(); init_cpu_topology();
...@@ -66,11 +65,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -66,11 +65,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for_each_possible_cpu(cpuid) { for_each_possible_cpu(cpuid) {
if (cpuid == curr_cpuid) if (cpuid == curr_cpuid)
continue; continue;
if (cpu_ops[cpuid]->cpu_prepare) {
ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
if (ret)
continue;
}
set_cpu_present(cpuid, true); set_cpu_present(cpuid, true);
numa_store_cpu_info(cpuid); numa_store_cpu_info(cpuid);
} }
...@@ -125,18 +119,7 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un ...@@ -125,18 +119,7 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
static void __init acpi_parse_and_init_cpus(void) static void __init acpi_parse_and_init_cpus(void)
{ {
int cpuid;
cpu_set_ops(0);
acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_rintc, 0); acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_rintc, 0);
for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
cpu_set_ops(cpuid);
set_cpu_possible(cpuid, true);
}
}
} }
#else #else
#define acpi_parse_and_init_cpus(...) do { } while (0) #define acpi_parse_and_init_cpus(...) do { } while (0)
...@@ -150,8 +133,6 @@ static void __init of_parse_and_init_cpus(void) ...@@ -150,8 +133,6 @@ static void __init of_parse_and_init_cpus(void)
int cpuid = 1; int cpuid = 1;
int rc; int rc;
cpu_set_ops(0);
for_each_of_cpu_node(dn) { for_each_of_cpu_node(dn) {
rc = riscv_early_of_processor_hartid(dn, &hart); rc = riscv_early_of_processor_hartid(dn, &hart);
if (rc < 0) if (rc < 0)
...@@ -179,27 +160,28 @@ static void __init of_parse_and_init_cpus(void) ...@@ -179,27 +160,28 @@ static void __init of_parse_and_init_cpus(void)
if (cpuid > nr_cpu_ids) if (cpuid > nr_cpu_ids)
pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n", pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
cpuid, nr_cpu_ids); cpuid, nr_cpu_ids);
for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
cpu_set_ops(cpuid);
set_cpu_possible(cpuid, true);
}
}
} }
void __init setup_smp(void) void __init setup_smp(void)
{ {
int cpuid;
cpu_set_ops();
if (acpi_disabled) if (acpi_disabled)
of_parse_and_init_cpus(); of_parse_and_init_cpus();
else else
acpi_parse_and_init_cpus(); acpi_parse_and_init_cpus();
for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++)
if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
set_cpu_possible(cpuid, true);
} }
static int start_secondary_cpu(int cpu, struct task_struct *tidle) static int start_secondary_cpu(int cpu, struct task_struct *tidle)
{ {
if (cpu_ops[cpu]->cpu_start) if (cpu_ops->cpu_start)
return cpu_ops[cpu]->cpu_start(cpu, tidle); return cpu_ops->cpu_start(cpu, tidle);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment