Commit 31d3ec15 authored by Heiko Carstens's avatar Heiko Carstens

s390/fpu: various coding style changes

Address various checkpatch warnings, adjust whitespace, and try to increase
readability. This is just preparation, in order to avoid that subsequent
patches contain any distracting drive-by coding style changes.
Reviewed-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent b6b842be
...@@ -81,12 +81,12 @@ static inline void sfpc_safe(u32 fpc) ...@@ -81,12 +81,12 @@ static inline void sfpc_safe(u32 fpc)
#define KERNEL_VXR_V16V23 8 #define KERNEL_VXR_V16V23 8
#define KERNEL_VXR_V24V31 16 #define KERNEL_VXR_V24V31 16
#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15) #define KERNEL_VXR_LOW (KERNEL_VXR_V0V7 | KERNEL_VXR_V8V15)
#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23) #define KERNEL_VXR_MID (KERNEL_VXR_V8V15 | KERNEL_VXR_V16V23)
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31) #define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23 | KERNEL_VXR_V24V31)
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH) #define KERNEL_VXR (KERNEL_VXR_LOW | KERNEL_VXR_HIGH)
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW) #define KERNEL_FPR (KERNEL_FPC | KERNEL_VXR_LOW)
struct kernel_fpu; struct kernel_fpu;
...@@ -100,26 +100,27 @@ struct kernel_fpu; ...@@ -100,26 +100,27 @@ struct kernel_fpu;
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags); void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
void __kernel_fpu_end(struct kernel_fpu *state, u32 flags); void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags) static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{ {
preempt_disable(); preempt_disable();
state->mask = S390_lowcore.fpu_flags; state->mask = S390_lowcore.fpu_flags;
if (!test_cpu_flag(CIF_FPU)) if (!test_cpu_flag(CIF_FPU)) {
/* Save user space FPU state and register contents */ /* Save user space FPU state and register contents */
save_fpu_regs(); save_fpu_regs();
else if (state->mask & flags) } else if (state->mask & flags) {
/* Save FPU/vector register in-use by the kernel */ /* Save FPU/vector register in-use by the kernel */
__kernel_fpu_begin(state, flags); __kernel_fpu_begin(state, flags);
}
S390_lowcore.fpu_flags |= flags; S390_lowcore.fpu_flags |= flags;
} }
static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags) static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
{ {
S390_lowcore.fpu_flags = state->mask; S390_lowcore.fpu_flags = state->mask;
if (state->mask & flags) if (state->mask & flags) {
/* Restore FPU/vector register in-use by the kernel */ /* Restore FPU/vector register in-use by the kernel */
__kernel_fpu_end(state, flags); __kernel_fpu_end(state, flags);
}
preempt_enable(); preempt_enable();
} }
......
...@@ -20,11 +20,11 @@ static inline bool cpu_has_vx(void) ...@@ -20,11 +20,11 @@ static inline bool cpu_has_vx(void)
static inline void save_vx_regs(__vector128 *vxrs) static inline void save_vx_regs(__vector128 *vxrs)
{ {
asm volatile( asm volatile("\n"
" la 1,%0\n" " la 1,%0\n"
" .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */ " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */ " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
: "=Q" (*(struct vx_array *) vxrs) : : "1"); : "=Q" (*(struct vx_array *)vxrs) : : "1");
} }
static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs) static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
...@@ -50,8 +50,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) ...@@ -50,8 +50,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
if (cpu_has_vx()) if (cpu_has_vx())
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
else else
memcpy((freg_t *)&fpregs->fprs, fpu->fprs, memcpy((freg_t *)&fpregs->fprs, fpu->fprs, sizeof(fpregs->fprs));
sizeof(fpregs->fprs));
} }
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
...@@ -60,8 +59,7 @@ static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) ...@@ -60,8 +59,7 @@ static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
if (cpu_has_vx()) if (cpu_has_vx())
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
else else
memcpy(fpu->fprs, (freg_t *)&fpregs->fprs, memcpy(fpu->fprs, (freg_t *)&fpregs->fprs, sizeof(fpregs->fprs));
sizeof(fpregs->fprs));
} }
#endif /* _ASM_S390_FPU_INTERNAL_H */ #endif /* _ASM_S390_FPU_INTERNAL_H */
...@@ -23,7 +23,9 @@ struct fpu { ...@@ -23,7 +23,9 @@ struct fpu {
}; };
/* VX array structure for address operand constraints in inline assemblies */ /* VX array structure for address operand constraints in inline assemblies */
struct vx_array { __vector128 _[__NUM_VXRS]; }; struct vx_array {
__vector128 _[__NUM_VXRS];
};
/* In-kernel FPU state structure */ /* In-kernel FPU state structure */
struct kernel_fpu { struct kernel_fpu {
......
...@@ -16,14 +16,13 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags) ...@@ -16,14 +16,13 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{ {
/* /*
* Limit the save to the FPU/vector registers already * Limit the save to the FPU/vector registers already
* in use by the previous context * in use by the previous context.
*/ */
flags &= state->mask; flags &= state->mask;
if (flags & KERNEL_FPC) {
if (flags & KERNEL_FPC)
/* Save floating point control */ /* Save floating point control */
asm volatile("stfpc %0" : "=Q" (state->fpc)); asm volatile("stfpc %0" : "=Q" (state->fpc));
}
if (!cpu_has_vx()) { if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_LOW) { if (flags & KERNEL_VXR_LOW) {
/* Save floating-point registers */ /* Save floating-point registers */
...@@ -46,7 +45,6 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags) ...@@ -46,7 +45,6 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
} }
return; return;
} }
/* Test and save vector registers */ /* Test and save vector registers */
asm volatile ( asm volatile (
/* /*
...@@ -97,15 +95,14 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags) ...@@ -97,15 +95,14 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
{ {
/* /*
* Limit the restore to the FPU/vector registers of the * Limit the restore to the FPU/vector registers of the
* previous context that have been overwritte by the * previous context that have been overwritten by the
* current context * current context.
*/ */
flags &= state->mask; flags &= state->mask;
if (flags & KERNEL_FPC) {
if (flags & KERNEL_FPC)
/* Restore floating-point controls */ /* Restore floating-point controls */
asm volatile("lfpc %0" : : "Q" (state->fpc)); asm volatile("lfpc %0" : : "Q" (state->fpc));
}
if (!cpu_has_vx()) { if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_LOW) { if (flags & KERNEL_VXR_LOW) {
/* Restore floating-point registers */ /* Restore floating-point registers */
...@@ -128,7 +125,6 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags) ...@@ -128,7 +125,6 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
} }
return; return;
} }
/* Test and restore (load) vector registers */ /* Test and restore (load) vector registers */
asm volatile ( asm volatile (
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment