Commit ddce1921 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - Cache invalidation fix for early CPU boot status update (incorrect
   cacheline)

 - of_put_node() missing in the spin_table code

 - EL1/El2 early init inconsistency when Virtualisation Host Extensions
   are present

 - RCU warning fix in the arm_pmu.c driver

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Fix EL1/EL2 early init inconsistencies with VHE
  drivers/perf: arm-pmu: fix RCU usage on pmu resume from low-power
  arm64: spin-table: add missing of_node_put()
  arm64: fix invalidation of wrong __early_cpu_boot_status cacheline
parents ff061624 882416c1
...@@ -588,6 +588,15 @@ set_hcr: ...@@ -588,6 +588,15 @@ set_hcr:
msr vpidr_el2, x0 msr vpidr_el2, x0
msr vmpidr_el2, x1 msr vmpidr_el2, x1
/*
* When VHE is not in use, early init of EL2 and EL1 needs to be
* done here.
* When VHE _is_ in use, EL1 will not be used in the host and
* requires no configuration, and all non-hyp-specific EL2 setup
* will be done via the _EL1 system register aliases in __cpu_setup.
*/
cbnz x2, 1f
/* sctlr_el1 */ /* sctlr_el1 */
mov x0, #0x0800 // Set/clear RES{1,0} bits mov x0, #0x0800 // Set/clear RES{1,0} bits
CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
...@@ -597,6 +606,7 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems ...@@ -597,6 +606,7 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
/* Coprocessor traps. */ /* Coprocessor traps. */
mov x0, #0x33ff mov x0, #0x33ff
msr cptr_el2, x0 // Disable copro. traps to EL2 msr cptr_el2, x0 // Disable copro. traps to EL2
1:
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
msr hstr_el2, xzr // Disable CP15 traps to EL2 msr hstr_el2, xzr // Disable CP15 traps to EL2
...@@ -734,7 +744,8 @@ ENDPROC(__secondary_switched) ...@@ -734,7 +744,8 @@ ENDPROC(__secondary_switched)
.macro update_early_cpu_boot_status status, tmp1, tmp2 .macro update_early_cpu_boot_status status, tmp1, tmp2
mov \tmp2, #\status mov \tmp2, #\status
str_l \tmp2, __early_cpu_boot_status, \tmp1 adr_l \tmp1, __early_cpu_boot_status
str \tmp2, [\tmp1]
dmb sy dmb sy
dc ivac, \tmp1 // Invalidate potentially stale cache line dc ivac, \tmp1 // Invalidate potentially stale cache line
.endm .endm
......
...@@ -52,6 +52,7 @@ static void write_pen_release(u64 val) ...@@ -52,6 +52,7 @@ static void write_pen_release(u64 val)
static int smp_spin_table_cpu_init(unsigned int cpu) static int smp_spin_table_cpu_init(unsigned int cpu)
{ {
struct device_node *dn; struct device_node *dn;
int ret;
dn = of_get_cpu_node(cpu, NULL); dn = of_get_cpu_node(cpu, NULL);
if (!dn) if (!dn)
...@@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu) ...@@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
/* /*
* Determine the address from which the CPU is polling. * Determine the address from which the CPU is polling.
*/ */
if (of_property_read_u64(dn, "cpu-release-addr", ret = of_property_read_u64(dn, "cpu-release-addr",
&cpu_release_addr[cpu])) { &cpu_release_addr[cpu]);
if (ret)
pr_err("CPU %d: missing or invalid cpu-release-addr property\n", pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
cpu); cpu);
return -1; of_node_put(dn);
}
return 0; return ret;
} }
static int smp_spin_table_cpu_prepare(unsigned int cpu) static int smp_spin_table_cpu_prepare(unsigned int cpu)
......
...@@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) ...@@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
break; break;
case CPU_PM_EXIT: case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED: case CPU_PM_ENTER_FAILED:
/* Restore and enable the counter */ /*
armpmu_start(event, PERF_EF_RELOAD); * Restore and enable the counter.
* armpmu_start() indirectly calls
*
* perf_event_update_userpage()
*
* that requires RCU read locking to be functional,
* wrap the call within RCU_NONIDLE to make the
* RCU subsystem aware this cpu is not idle from
* an RCU perspective for the armpmu_start() call
* duration.
*/
RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
break; break;
default: default:
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment