Commit 88b31f07 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:

 - Spectre/Meltdown safelisting for some Qualcomm KRYO cores

 - Fix RCU splat when failing to online a CPU due to a feature mismatch

 - Fix a recently introduced sparse warning in kexec()

 - Fix handling of CPU erratum 1418040 for late CPUs

 - Ensure hot-added memory falls within linear-mapped region

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: cpu_errata: Apply Erratum 845719 to KRYO2XX Silver
  arm64: proton-pack: Add KRYO2XX silver CPUs to spectre-v2 safe-list
  arm64: kpti: Add KRYO2XX gold/silver CPU cores to kpti safelist
  arm64: Add MIDR value for KRYO2XX gold/silver CPU cores
  arm64/mm: Validate hotplug range before creating linear mapping
  arm64: smp: Tell RCU about CPUs that fail to come online
  arm64: psci: Avoid printing in cpu_psci_cpu_die()
  arm64: kexec_file: Fix sparse warning
  arm64: errata: Fix handling of 1418040 with late CPU onlining
parents d3ba7afc 23c21641
...@@ -268,6 +268,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; ...@@ -268,6 +268,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
/* /*
* CPU feature detected at boot time based on feature of one or more CPUs. * CPU feature detected at boot time based on feature of one or more CPUs.
* All possible conflicts for a late CPU are ignored. * All possible conflicts for a late CPU are ignored.
* NOTE: this means that a late CPU with the feature will *not* cause the
* capability to be advertised by cpus_have_*cap()!
*/ */
#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \ #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
......
...@@ -86,6 +86,8 @@ ...@@ -86,6 +86,8 @@
#define QCOM_CPU_PART_FALKOR_V1 0x800 #define QCOM_CPU_PART_FALKOR_V1 0x800
#define QCOM_CPU_PART_FALKOR 0xC00 #define QCOM_CPU_PART_FALKOR 0xC00
#define QCOM_CPU_PART_KRYO 0x200 #define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803 #define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804 #define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805 #define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
...@@ -116,6 +118,8 @@ ...@@ -116,6 +118,8 @@
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER) #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD) #define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER) #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
......
...@@ -299,6 +299,8 @@ static const struct midr_range erratum_845719_list[] = { ...@@ -299,6 +299,8 @@ static const struct midr_range erratum_845719_list[] = {
MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
/* Brahma-B53 r0p[0] */ /* Brahma-B53 r0p[0] */
MIDR_REV(MIDR_BRAHMA_B53, 0, 0), MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
/* Kryo2XX Silver rAp4 */
MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
{}, {},
}; };
#endif #endif
......
...@@ -1337,6 +1337,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, ...@@ -1337,6 +1337,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ } { /* sentinel */ }
......
...@@ -127,7 +127,7 @@ static void *image_load(struct kimage *image, ...@@ -127,7 +127,7 @@ static void *image_load(struct kimage *image,
kernel_segment->mem, kbuf.bufsz, kernel_segment->mem, kbuf.bufsz,
kernel_segment->memsz); kernel_segment->memsz);
return 0; return NULL;
} }
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
......
...@@ -522,14 +522,13 @@ static void erratum_1418040_thread_switch(struct task_struct *prev, ...@@ -522,14 +522,13 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
bool prev32, next32; bool prev32, next32;
u64 val; u64 val;
if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) && if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
return; return;
prev32 = is_compat_thread(task_thread_info(prev)); prev32 = is_compat_thread(task_thread_info(prev));
next32 = is_compat_thread(task_thread_info(next)); next32 = is_compat_thread(task_thread_info(next));
if (prev32 == next32) if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
return; return;
val = read_sysreg(cntkctl_el1); val = read_sysreg(cntkctl_el1);
......
...@@ -118,6 +118,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) ...@@ -118,6 +118,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ } { /* sentinel */ }
......
...@@ -66,7 +66,6 @@ static int cpu_psci_cpu_disable(unsigned int cpu) ...@@ -66,7 +66,6 @@ static int cpu_psci_cpu_disable(unsigned int cpu)
static void cpu_psci_cpu_die(unsigned int cpu) static void cpu_psci_cpu_die(unsigned int cpu)
{ {
int ret;
/* /*
* There are no known implementations of PSCI actually using the * There are no known implementations of PSCI actually using the
* power state field, pass a sensible default for now. * power state field, pass a sensible default for now.
...@@ -74,9 +73,7 @@ static void cpu_psci_cpu_die(unsigned int cpu) ...@@ -74,9 +73,7 @@ static void cpu_psci_cpu_die(unsigned int cpu)
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN << u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
PSCI_0_2_POWER_STATE_TYPE_SHIFT; PSCI_0_2_POWER_STATE_TYPE_SHIFT;
ret = psci_ops.cpu_off(state); psci_ops.cpu_off(state);
pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
} }
static int cpu_psci_cpu_kill(unsigned int cpu) static int cpu_psci_cpu_kill(unsigned int cpu)
......
...@@ -413,6 +413,7 @@ void cpu_die_early(void) ...@@ -413,6 +413,7 @@ void cpu_die_early(void)
/* Mark this CPU absent */ /* Mark this CPU absent */
set_cpu_present(cpu, 0); set_cpu_present(cpu, 0);
rcu_report_dead(cpu);
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
update_cpu_boot_status(CPU_KILL_ME); update_cpu_boot_status(CPU_KILL_ME);
......
...@@ -1444,11 +1444,28 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) ...@@ -1444,11 +1444,28 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
} }
static bool inside_linear_region(u64 start, u64 size)
{
/*
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
* accommodating both its ends but excluding PAGE_END. Max physical
* range which can be mapped inside this linear mapping range, must
* also be derived from its end points.
*/
return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
(start + size - 1) <= __pa(PAGE_END - 1);
}
int arch_add_memory(int nid, u64 start, u64 size, int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params) struct mhp_params *params)
{ {
int ret, flags = 0; int ret, flags = 0;
if (!inside_linear_region(start, size)) {
pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
return -EINVAL;
}
if (rodata_full || debug_pagealloc_enabled()) if (rodata_full || debug_pagealloc_enabled())
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
......
...@@ -4077,7 +4077,6 @@ void rcu_cpu_starting(unsigned int cpu) ...@@ -4077,7 +4077,6 @@ void rcu_cpu_starting(unsigned int cpu)
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
} }
#ifdef CONFIG_HOTPLUG_CPU
/* /*
* The outgoing function has no further need of RCU, so remove it from * The outgoing function has no further need of RCU, so remove it from
* the rcu_node tree's ->qsmaskinitnext bit masks. * the rcu_node tree's ->qsmaskinitnext bit masks.
...@@ -4117,6 +4116,7 @@ void rcu_report_dead(unsigned int cpu) ...@@ -4117,6 +4116,7 @@ void rcu_report_dead(unsigned int cpu)
rdp->cpu_started = false; rdp->cpu_started = false;
} }
#ifdef CONFIG_HOTPLUG_CPU
/* /*
* The outgoing CPU has just passed through the dying-idle state, and we * The outgoing CPU has just passed through the dying-idle state, and we
* are being invoked from the CPU that was IPIed to continue the offline * are being invoked from the CPU that was IPIed to continue the offline
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment