Commit c5a2db6a authored by Juerg Haefliger's avatar Juerg Haefliger Committed by Stefan Bader

Revert "UBUNTU: SAUCE: rfi-flush: Implement congruence-first fallback flush"

This reverts commit 2d94edf3.

CVE-2017-5754

BugLink: http://bugs.launchpad.net/bugs/1756121

The functionality of this commit is provided by the following upstream
patch series:
  * powerpc/64s: Allow control of RFI flush via debugfs
  * powerpc/64s: Wire up cpu_show_meltdown()
  * powerpc/powernv: Check device-tree for RFI flush settings
  * powerpc/pseries: Query hypervisor for RFI flush settings
  * powerpc/64s: Support disabling RFI flush with no_rfi_flush and nopti
  * powerpc/64s: Add support for RFI flush of L1-D cache
  * powerpc/64s: Convert slb_miss_common to use RFI_TO_USER/KERNEL
  * powerpc/64: Convert the syscall exit path to use RFI_TO_USER/KERNEL
  * powerpc/64: Convert fast_exception_return to use RFI_TO_USER/KERNEL
  * powerpc/64s: Simple RFI macro conversions
  * powerpc/64: Add macros for annotating the destination of rfid/hrfid
  * powerpc/pseries: Add H_GET_CPU_CHARACTERISTICS flags & wrapper
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent c0f341c5
...@@ -209,8 +209,8 @@ struct paca_struct { ...@@ -209,8 +209,8 @@ struct paca_struct {
* other paca data leaking into the L1d * other paca data leaking into the L1d
*/ */
u64 exrfi[13] __aligned(0x80); u64 exrfi[13] __aligned(0x80);
u64 l1d_flush_congruence; /* Number of consecutive 128 byte lines that must be loaded */
u64 l1d_flush_sets; u64 l1d_flush_lines;
#endif #endif
}; };
......
...@@ -245,8 +245,7 @@ int main(void) ...@@ -245,8 +245,7 @@ int main(void)
DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce)); DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce));
OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area); OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
OFFSET(PACA_EXRFI, paca_struct, exrfi); OFFSET(PACA_EXRFI, paca_struct, exrfi);
OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence); OFFSET(PACA_L1D_FLUSH_LINES, paca_struct, l1d_flush_lines);
OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
#endif #endif
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
......
...@@ -727,36 +727,29 @@ rfi_flush_fallback: ...@@ -727,36 +727,29 @@ rfi_flush_fallback:
std r9,PACA_EXRFI+EX_R9(r13) std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13) std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13) std r11,PACA_EXRFI+EX_R11(r13)
std r12,PACA_EXRFI+EX_R12(r13)
std r8,PACA_EXRFI+EX_R13(r13)
mfctr r9 mfctr r9
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
ld r11,PACA_L1D_FLUSH_SETS(r13) ld r11,PACA_L1D_FLUSH_LINES(r13)
ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) srdi r11,r11,2 /* Unrolled x4 */
mtctr r11
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
/* XXX: Should an instruction synchronizing operation be done here? */
1:
/* /*
* The load adresses are at staggered offsets within cachelines, * The load adresses are at staggered offsets within cachelines,
* which suits some pipelines better (on others it should not * which suits some pipelines better (on others it should not
* hurt. * hurt.
*/ */
addi r12,r12,8 ld r11,128*0+0(r10)
mtctr r11 ld r11,128*1+8(r10)
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ ld r11,128*2+16(r10)
/* XXX: Should an instruction synchronizing operation be done here? */ ld r11,128*3+24(r10)
addi r10,r10,(128 * 4)
1: li r8,0
.rept 8 /* 8-way set associative */
ldx r11,r10,r8
add r8,r8,r12
.endr
addi r10,r10,128 /* 128 byte cache line */
bdnz 1b bdnz 1b
mtctr r9 mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13) ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13) ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13) ld r11,PACA_EXRFI+EX_R11(r13)
ld r12,PACA_EXRFI+EX_R12(r13)
ld r8,PACA_EXRFI+EX_R13(r13)
GET_SCRATCH0(r13); GET_SCRATCH0(r13);
rfid rfid
...@@ -767,36 +760,29 @@ hrfi_flush_fallback: ...@@ -767,36 +760,29 @@ hrfi_flush_fallback:
std r9,PACA_EXRFI+EX_R9(r13) std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13) std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13) std r11,PACA_EXRFI+EX_R11(r13)
std r12,PACA_EXRFI+EX_R12(r13)
std r8,PACA_EXRFI+EX_R13(r13)
mfctr r9 mfctr r9
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
ld r11,PACA_L1D_FLUSH_SETS(r13) ld r11,PACA_L1D_FLUSH_LINES(r13)
ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) srdi r11,r11,2 /* Unrolled x4 */
mtctr r11
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
/* XXX: Should an instruction synchronizing operation be done here? */
1:
/* /*
* The load adresses are at staggered offsets within cachelines, * The load adresses are at staggered offsets within cachelines,
* which suits some pipelines better (on others it should not * which suits some pipelines better (on others it should not
* hurt. * hurt.
*/ */
addi r12,r12,8 ld r11,128*0+0(r10)
mtctr r11 ld r11,128*1+8(r10)
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ ld r11,128*2+16(r10)
/* XXX: Should an instruction synchronizing operation be done here? */ ld r11,128*3+24(r10)
addi r10,r10,(128 * 4)
1: li r8,0
.rept 8 /* 8-way set associative */
ldx r11,r10,r8
add r8,r8,r12
.endr
addi r10,r10,128 /* 128 byte cache line */
bdnz 1b bdnz 1b
mtctr r9 mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13) ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13) ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13) ld r11,PACA_EXRFI+EX_R11(r13)
ld r12,PACA_EXRFI+EX_R12(r13)
ld r8,PACA_EXRFI+EX_R13(r13)
GET_SCRATCH0(r13); GET_SCRATCH0(r13);
hrfid hrfid
......
...@@ -899,19 +899,8 @@ void __init setup_rfi_flush(enum l1d_flush_type type, bool enable) ...@@ -899,19 +899,8 @@ void __init setup_rfi_flush(enum l1d_flush_type type, bool enable)
memset(l1d_flush_fallback_area, 0, l1d_size * 2); memset(l1d_flush_fallback_area, 0, l1d_size * 2);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
/*
* The fallback flush is currently coded for 8-way
* associativity. Different associativity is possible,
* but it will be treated as 8-way and may not evict
* the lines as effectively.
*
* 128 byte lines are mandatory.
*/
u64 c = l1d_size / 8;
paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
paca[cpu].l1d_flush_congruence = c; paca[cpu].l1d_flush_lines = l1d_size / 128;
paca[cpu].l1d_flush_sets = c / 128;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment