Commit 3ffa9d9e authored by Michael Ellerman's avatar Michael Ellerman

powerpc/64s: Fix Power9 DD2.0 workarounds by adding DD2.1 feature

Recently we added a CPU feature for Power9 DD2.0, to capture the fact
that some workarounds are required only on Power9 DD1 and DD2.0 but
not DD2.1 or later.

Then in commit 9d2f510a ("powerpc/64s/idle: avoid POWER9 DD1 and
DD2.0 ERAT workaround on DD2.1") and commit e3646330
"powerpc/64s/idle: avoid POWER9 DD1 and DD2.0 PMU workaround on
DD2.1") we changed CPU_FTR_SECTIONs to check for DD1 or DD20, eg:

  BEGIN_FTR_SECTION
          PPC_INVALIDATE_ERAT
  END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1 | CPU_FTR_POWER9_DD20)

Unfortunately although this reads as "if set DD1 or DD2.0", the or is
a bitwise or and actually generates a mask of both bits. The code that
does the feature patching then checks that the value of the CPU
features masked with that mask are equal to the mask.

So the end result is we're checking for DD1 and DD20 being set, which
never happens. Yes the API is terrible.

Removing the ERAT workaround on DD2.0 results in random SEGVs, the
system tends to boot, but things randomly die including sometimes
dhclient, udev etc.

To fix the problem and hopefully avoid it in future, we remove the
DD2.0 CPU feature and instead add a DD2.1 (or later) feature. This
allows us to easily express that the workarounds are required if DD2.1
is not set.

At some point we will drop the DD1 workarounds entirely and some of
this can be cleaned up.

Fixes: 9d2f510a ("powerpc/64s/idle: avoid POWER9 DD1 and DD2.0 ERAT workaround on DD2.1")
Fixes: e3646330 ("powerpc/64s/idle: avoid POWER9 DD1 and DD2.0 PMU workaround on DD2.1")
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 475b581f
...@@ -215,7 +215,7 @@ enum { ...@@ -215,7 +215,7 @@ enum {
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) #define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000) #define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000) #define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
#define CPU_FTR_POWER9_DD20 LONG_ASM_CONST(0x8000000000000000) #define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x8000000000000000)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -478,7 +478,8 @@ enum { ...@@ -478,7 +478,8 @@ enum {
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300) CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300)
#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \ #define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
(~CPU_FTR_SAO)) (~CPU_FTR_SAO))
#define CPU_FTRS_POWER9_DD20 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD20) #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
...@@ -498,7 +499,7 @@ enum { ...@@ -498,7 +499,7 @@ enum {
CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \ CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \ CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \
CPU_FTRS_PA6T | CPU_FTR_VSX | CPU_FTRS_POWER9 | \ CPU_FTRS_PA6T | CPU_FTR_VSX | CPU_FTRS_POWER9 | \
CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD20) CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1)
#endif #endif
#else #else
enum { enum {
......
...@@ -551,7 +551,7 @@ static struct cpu_spec __initdata cpu_specs[] = { ...@@ -551,7 +551,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_mask = 0xffffefff, .pvr_mask = 0xffffefff,
.pvr_value = 0x004e0200, .pvr_value = 0x004e0200,
.cpu_name = "POWER9 (raw)", .cpu_name = "POWER9 (raw)",
.cpu_features = CPU_FTRS_POWER9_DD20, .cpu_features = CPU_FTRS_POWER9_DD2_0,
.cpu_user_features = COMMON_USER_POWER9, .cpu_user_features = COMMON_USER_POWER9,
.cpu_user_features2 = COMMON_USER2_POWER9, .cpu_user_features2 = COMMON_USER2_POWER9,
.mmu_features = MMU_FTRS_POWER9, .mmu_features = MMU_FTRS_POWER9,
...@@ -567,11 +567,11 @@ static struct cpu_spec __initdata cpu_specs[] = { ...@@ -567,11 +567,11 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p9, .machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9", .platform = "power9",
}, },
{ /* Power9 */ { /* Power9 DD 2.1 or later (see DD2.0 above) */
.pvr_mask = 0xffff0000, .pvr_mask = 0xffff0000,
.pvr_value = 0x004e0000, .pvr_value = 0x004e0000,
.cpu_name = "POWER9 (raw)", .cpu_name = "POWER9 (raw)",
.cpu_features = CPU_FTRS_POWER9, .cpu_features = CPU_FTRS_POWER9_DD2_1,
.cpu_user_features = COMMON_USER_POWER9, .cpu_user_features = COMMON_USER_POWER9,
.cpu_user_features2 = COMMON_USER2_POWER9, .cpu_user_features2 = COMMON_USER2_POWER9,
.mmu_features = MMU_FTRS_POWER9, .mmu_features = MMU_FTRS_POWER9,
......
...@@ -736,7 +736,7 @@ static __init void cpufeatures_cpu_quirks(void) ...@@ -736,7 +736,7 @@ static __init void cpufeatures_cpu_quirks(void)
if ((version & 0xffffff00) == 0x004e0100) if ((version & 0xffffff00) == 0x004e0100)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1; cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
else if ((version & 0xffffefff) == 0x004e0200) else if ((version & 0xffffefff) == 0x004e0200)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD20; cur_cpu_spec->cpu_features &= ~CPU_FTR_POWER9_DD2_1;
} }
static void __init cpufeatures_setup_finished(void) static void __init cpufeatures_setup_finished(void)
......
...@@ -357,13 +357,13 @@ power_enter_stop: ...@@ -357,13 +357,13 @@ power_enter_stop:
.Lhandle_esl_ec_set: .Lhandle_esl_ec_set:
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* /*
* POWER9 DD2 can incorrectly set PMAO when waking up after a * POWER9 DD2.0 or earlier can incorrectly set PMAO when waking up after
* state-loss idle. Saving and restoring MMCR0 over idle is a * a state-loss idle. Saving and restoring MMCR0 over idle is a
* workaround. * workaround.
*/ */
mfspr r4,SPRN_MMCR0 mfspr r4,SPRN_MMCR0
std r4,_MMCR0(r1) std r4,_MMCR0(r1)
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1 | CPU_FTR_POWER9_DD20) END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
/* /*
* Check if the requested state is a deep idle state. * Check if the requested state is a deep idle state.
...@@ -554,7 +554,7 @@ BEGIN_FTR_SECTION ...@@ -554,7 +554,7 @@ BEGIN_FTR_SECTION
ld r1,PACAR1(r13) ld r1,PACAR1(r13)
ld r4,_MMCR0(r1) ld r4,_MMCR0(r1)
mtspr SPRN_MMCR0,r4 mtspr SPRN_MMCR0,r4
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1 | CPU_FTR_POWER9_DD20) END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
mfspr r4,SPRN_MMCRA mfspr r4,SPRN_MMCRA
ori r4,r4,(1 << (63-60)) ori r4,r4,(1 << (63-60))
mtspr SPRN_MMCRA,r4 mtspr SPRN_MMCRA,r4
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment