Commit f718d426 authored by Matt Brown's avatar Matt Brown Committed by Michael Ellerman

powerpc/lib/xor_vmx: Ensure no altivec code executes before enable_kernel_altivec()

The xor_vmx.c file is used for the RAID5 xor operations. In these functions
altivec is enabled to run the operation and then disabled.

The code uses enable_kernel_altivec() around the core of the algorithm, however
the whole file is built with -maltivec, so the compiler is within its rights to
generate altivec code anywhere. This has been seen at least once in the wild:

  0:mon> di $xor_altivec_2
  c0000000000b97d0  3c4c01d9	addis   r2,r12,473
  c0000000000b97d4  3842db30	addi    r2,r2,-9424
  c0000000000b97d8  7c0802a6	mflr    r0
  c0000000000b97dc  f8010010	std     r0,16(r1)
  c0000000000b97e0  60000000	nop
  c0000000000b97e4  7c0802a6	mflr    r0
  c0000000000b97e8  faa1ffa8	std     r21,-88(r1)
  ...
  c0000000000b981c  f821ff41	stdu    r1,-192(r1)
  c0000000000b9820  7f8101ce	stvx    v28,r1,r0		<-- POP
  c0000000000b9824  38000030	li      r0,48
  c0000000000b9828  7fa101ce	stvx    v29,r1,r0
  ...
  c0000000000b984c  4bf6a06d	bl      c0000000000238b8 # enable_kernel_altivec

This patch splits the non-altivec code into xor_vmx_glue.c which calls the
altivec functions in xor_vmx.c. By compiling xor_vmx_glue.c without
-maltivec we can guarantee that altivec instruction will not be executed
outside of the enable/disable block.
Signed-off-by: default avatarMatt Brown <matthew.brown.dev@gmail.com>
[mpe: Rework change log and include disassembly]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 48a316e3
...@@ -37,7 +37,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o ...@@ -37,7 +37,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
obj-$(CONFIG_ALTIVEC) += xor_vmx.o obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec) CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
obj-$(CONFIG_PPC64) += $(obj64-y) obj-$(CONFIG_PPC64) += $(obj64-y)
...@@ -29,10 +29,7 @@ ...@@ -29,10 +29,7 @@
#define vector __attribute__((vector_size(16))) #define vector __attribute__((vector_size(16)))
#endif #endif
#include <linux/preempt.h> #include "xor_vmx.h"
#include <linux/export.h>
#include <linux/sched.h>
#include <asm/switch_to.h>
typedef vector signed char unative_t; typedef vector signed char unative_t;
...@@ -64,16 +61,13 @@ typedef vector signed char unative_t; ...@@ -64,16 +61,13 @@ typedef vector signed char unative_t;
V1##_3 = vec_xor(V1##_3, V2##_3); \ V1##_3 = vec_xor(V1##_3, V2##_3); \
} while (0) } while (0)
void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in) unsigned long *v2_in)
{ {
DEFINE(v1); DEFINE(v1);
DEFINE(v2); DEFINE(v2);
unsigned long lines = bytes / (sizeof(unative_t)) / 4; unsigned long lines = bytes / (sizeof(unative_t)) / 4;
preempt_disable();
enable_kernel_altivec();
do { do {
LOAD(v1); LOAD(v1);
LOAD(v2); LOAD(v2);
...@@ -83,13 +77,9 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, ...@@ -83,13 +77,9 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
v1 += 4; v1 += 4;
v2 += 4; v2 += 4;
} while (--lines > 0); } while (--lines > 0);
disable_kernel_altivec();
preempt_enable();
} }
EXPORT_SYMBOL(xor_altivec_2);
void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in, unsigned long *v3_in) unsigned long *v2_in, unsigned long *v3_in)
{ {
DEFINE(v1); DEFINE(v1);
...@@ -97,9 +87,6 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, ...@@ -97,9 +87,6 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
DEFINE(v3); DEFINE(v3);
unsigned long lines = bytes / (sizeof(unative_t)) / 4; unsigned long lines = bytes / (sizeof(unative_t)) / 4;
preempt_disable();
enable_kernel_altivec();
do { do {
LOAD(v1); LOAD(v1);
LOAD(v2); LOAD(v2);
...@@ -112,13 +99,9 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, ...@@ -112,13 +99,9 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
v2 += 4; v2 += 4;
v3 += 4; v3 += 4;
} while (--lines > 0); } while (--lines > 0);
disable_kernel_altivec();
preempt_enable();
} }
EXPORT_SYMBOL(xor_altivec_3);
void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in, unsigned long *v3_in, unsigned long *v2_in, unsigned long *v3_in,
unsigned long *v4_in) unsigned long *v4_in)
{ {
...@@ -128,9 +111,6 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, ...@@ -128,9 +111,6 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
DEFINE(v4); DEFINE(v4);
unsigned long lines = bytes / (sizeof(unative_t)) / 4; unsigned long lines = bytes / (sizeof(unative_t)) / 4;
preempt_disable();
enable_kernel_altivec();
do { do {
LOAD(v1); LOAD(v1);
LOAD(v2); LOAD(v2);
...@@ -146,13 +126,9 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, ...@@ -146,13 +126,9 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
v3 += 4; v3 += 4;
v4 += 4; v4 += 4;
} while (--lines > 0); } while (--lines > 0);
disable_kernel_altivec();
preempt_enable();
} }
EXPORT_SYMBOL(xor_altivec_4);
void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in, unsigned long *v3_in, unsigned long *v2_in, unsigned long *v3_in,
unsigned long *v4_in, unsigned long *v5_in) unsigned long *v4_in, unsigned long *v5_in)
{ {
...@@ -163,9 +139,6 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, ...@@ -163,9 +139,6 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
DEFINE(v5); DEFINE(v5);
unsigned long lines = bytes / (sizeof(unative_t)) / 4; unsigned long lines = bytes / (sizeof(unative_t)) / 4;
preempt_disable();
enable_kernel_altivec();
do { do {
LOAD(v1); LOAD(v1);
LOAD(v2); LOAD(v2);
...@@ -184,8 +157,4 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, ...@@ -184,8 +157,4 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
v4 += 4; v4 += 4;
v5 += 4; v5 += 4;
} while (--lines > 0); } while (--lines > 0);
disable_kernel_altivec();
preempt_enable();
} }
EXPORT_SYMBOL(xor_altivec_5);
/*
* Simple interface to link xor_vmx.c and xor_vmx_glue.c
*
* Separating these file ensures that no altivec instructions are run
* outside of the enable/disable altivec block.
*/
void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in);
void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in, unsigned long *v3_in);
void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in, unsigned long *v3_in,
unsigned long *v4_in);
void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in, unsigned long *v3_in,
unsigned long *v4_in, unsigned long *v5_in);
/*
* Altivec XOR operations
*
* Copyright 2017 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/preempt.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <asm/switch_to.h>
#include "xor_vmx.h"
void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in)
{
preempt_disable();
enable_kernel_altivec();
__xor_altivec_2(bytes, v1_in, v2_in);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_2);
void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in, unsigned long *v3_in)
{
preempt_disable();
enable_kernel_altivec();
__xor_altivec_3(bytes, v1_in, v2_in, v3_in);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_3);
void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in, unsigned long *v3_in,
unsigned long *v4_in)
{
preempt_disable();
enable_kernel_altivec();
__xor_altivec_4(bytes, v1_in, v2_in, v3_in, v4_in);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_4);
void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in, unsigned long *v3_in,
unsigned long *v4_in, unsigned long *v5_in)
{
preempt_disable();
enable_kernel_altivec();
__xor_altivec_5(bytes, v1_in, v2_in, v3_in, v4_in, v5_in);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_5);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment