Commit 400e4b20 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Rename xsave.header::xstate_bv to 'xfeatures'

'xsave.header::xstate_bv' is a misnomer - what does 'bv' stand for?

It probably comes from the 'XGETBV' instruction name, but I could
not find in the Intel documentation where that abbreviation comes
from. It could mean 'bit vector' - or something else?

But how about - instead of guessing about a weird name - we named
the field in an obvious and descriptive way that tells us exactly
what it does?

So rename it to 'xfeatures', which is a bitmask of the
xfeatures that are fpstate_active in that context structure.

Eyesore like:

           fpu->state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;

is now much more readable:

           fpu->state->xsave.header.xfeatures |= XSTATE_FP;

Which form is not just infinitely more readable, but is also
shorter as well.
Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3a54450b
...@@ -261,7 +261,7 @@ static inline int fpu_save_init(struct fpu *fpu) ...@@ -261,7 +261,7 @@ static inline int fpu_save_init(struct fpu *fpu)
/* /*
* xsave header may indicate the init state of the FP. * xsave header may indicate the init state of the FP.
*/ */
if (!(fpu->state->xsave.header.xstate_bv & XSTATE_FP)) if (!(fpu->state->xsave.header.xfeatures & XSTATE_FP))
return 1; return 1;
} else if (use_fxsr()) { } else if (use_fxsr()) {
fpu_fxsave(fpu); fpu_fxsave(fpu);
......
...@@ -100,7 +100,7 @@ struct bndcsr { ...@@ -100,7 +100,7 @@ struct bndcsr {
} __packed; } __packed;
struct xstate_header { struct xstate_header {
u64 xstate_bv; u64 xfeatures;
u64 xcomp_bv; u64 xcomp_bv;
u64 reserved[6]; u64 reserved[6];
} __attribute__((packed)); } __attribute__((packed));
......
...@@ -15,7 +15,7 @@ struct user_ymmh_regs { ...@@ -15,7 +15,7 @@ struct user_ymmh_regs {
}; };
struct user_xstate_header { struct user_xstate_header {
__u64 xstate_bv; __u64 xfeatures;
__u64 reserved1[2]; __u64 reserved1[2];
__u64 reserved2[5]; __u64 reserved2[5];
}; };
...@@ -41,11 +41,11 @@ struct user_xstate_header { ...@@ -41,11 +41,11 @@ struct user_xstate_header {
* particular process/thread. * particular process/thread.
* *
* Also when the user modifies certain state FP/SSE/etc through the * Also when the user modifies certain state FP/SSE/etc through the
* ptrace interface, they must ensure that the header.xstate_bv * ptrace interface, they must ensure that the header.xfeatures
* bytes[512..519] of the memory layout are updated correspondingly. * bytes[512..519] of the memory layout are updated correspondingly.
* i.e., for example when FP state is modified to a non-init state, * i.e., for example when FP state is modified to a non-init state,
* header.xstate_bv's bit 0 must be set to '1', when SSE is modified to * header.xfeatures's bit 0 must be set to '1', when SSE is modified to
* non-init state, header.xstate_bv's bit 1 must to be set to '1', etc. * non-init state, header.xfeatures's bit 1 must to be set to '1', etc.
*/ */
#define USER_XSTATE_FX_SW_WORDS 6 #define USER_XSTATE_FX_SW_WORDS 6
#define USER_XSTATE_XCR0_WORD 0 #define USER_XSTATE_XCR0_WORD 0
......
...@@ -25,7 +25,7 @@ struct _fpx_sw_bytes { ...@@ -25,7 +25,7 @@ struct _fpx_sw_bytes {
__u32 extended_size; /* total size of the layout referred by __u32 extended_size; /* total size of the layout referred by
* fpstate pointer in the sigcontext. * fpstate pointer in the sigcontext.
*/ */
__u64 xstate_bv; __u64 xfeatures;
/* feature bit mask (including fp/sse/extended /* feature bit mask (including fp/sse/extended
* state) that is present in the memory * state) that is present in the memory
* layout. * layout.
...@@ -210,7 +210,7 @@ struct sigcontext { ...@@ -210,7 +210,7 @@ struct sigcontext {
#endif /* !__i386__ */ #endif /* !__i386__ */
struct _header { struct _header {
__u64 xstate_bv; __u64 xfeatures;
__u64 reserved1[2]; __u64 reserved1[2];
__u64 reserved2[5]; __u64 reserved2[5];
}; };
......
...@@ -470,7 +470,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, ...@@ -470,7 +470,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
* presence of FP and SSE state. * presence of FP and SSE state.
*/ */
if (cpu_has_xsave) if (cpu_has_xsave)
fpu->state->xsave.header.xstate_bv |= XSTATE_FPSSE; fpu->state->xsave.header.xfeatures |= XSTATE_FPSSE;
return ret; return ret;
} }
...@@ -528,7 +528,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, ...@@ -528,7 +528,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
* mxcsr reserved bits must be masked to zero for security reasons. * mxcsr reserved bits must be masked to zero for security reasons.
*/ */
xsave->i387.mxcsr &= mxcsr_feature_mask; xsave->i387.mxcsr &= mxcsr_feature_mask;
xsave->header.xstate_bv &= xfeatures_mask; xsave->header.xfeatures &= xfeatures_mask;
/* /*
* These bits must be zero. * These bits must be zero.
*/ */
...@@ -740,7 +740,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, ...@@ -740,7 +740,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
* presence of FP. * presence of FP.
*/ */
if (cpu_has_xsave) if (cpu_has_xsave)
fpu->state->xsave.header.xstate_bv |= XSTATE_FP; fpu->state->xsave.header.xfeatures |= XSTATE_FP;
return ret; return ret;
} }
......
...@@ -32,7 +32,7 @@ static unsigned int xfeatures_nr; ...@@ -32,7 +32,7 @@ static unsigned int xfeatures_nr;
/* /*
* If a processor implementation discern that a processor state component is * If a processor implementation discern that a processor state component is
* in its initialized state it may modify the corresponding bit in the * in its initialized state it may modify the corresponding bit in the
* header.xstate_bv as '0', with out modifying the corresponding memory * header.xfeatures as '0', with out modifying the corresponding memory
* layout in the case of xsaveopt. While presenting the xstate information to * layout in the case of xsaveopt. While presenting the xstate information to
* the user, we always ensure that the memory layout of a feature will be in * the user, we always ensure that the memory layout of a feature will be in
* the init state if the corresponding header bit is zero. This is to ensure * the init state if the corresponding header bit is zero. This is to ensure
...@@ -43,24 +43,24 @@ void __sanitize_i387_state(struct task_struct *tsk) ...@@ -43,24 +43,24 @@ void __sanitize_i387_state(struct task_struct *tsk)
{ {
struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
int feature_bit = 0x2; int feature_bit = 0x2;
u64 xstate_bv; u64 xfeatures;
if (!fx) if (!fx)
return; return;
xstate_bv = tsk->thread.fpu.state->xsave.header.xstate_bv; xfeatures = tsk->thread.fpu.state->xsave.header.xfeatures;
/* /*
* None of the feature bits are in init state. So nothing else * None of the feature bits are in init state. So nothing else
* to do for us, as the memory layout is up to date. * to do for us, as the memory layout is up to date.
*/ */
if ((xstate_bv & xfeatures_mask) == xfeatures_mask) if ((xfeatures & xfeatures_mask) == xfeatures_mask)
return; return;
/* /*
* FP is in init state * FP is in init state
*/ */
if (!(xstate_bv & XSTATE_FP)) { if (!(xfeatures & XSTATE_FP)) {
fx->cwd = 0x37f; fx->cwd = 0x37f;
fx->swd = 0; fx->swd = 0;
fx->twd = 0; fx->twd = 0;
...@@ -73,17 +73,17 @@ void __sanitize_i387_state(struct task_struct *tsk) ...@@ -73,17 +73,17 @@ void __sanitize_i387_state(struct task_struct *tsk)
/* /*
* SSE is in init state * SSE is in init state
*/ */
if (!(xstate_bv & XSTATE_SSE)) if (!(xfeatures & XSTATE_SSE))
memset(&fx->xmm_space[0], 0, 256); memset(&fx->xmm_space[0], 0, 256);
xstate_bv = (xfeatures_mask & ~xstate_bv) >> 2; xfeatures = (xfeatures_mask & ~xfeatures) >> 2;
/* /*
* Update all the other memory layouts for which the corresponding * Update all the other memory layouts for which the corresponding
* header bit is in the init state. * header bit is in the init state.
*/ */
while (xstate_bv) { while (xfeatures) {
if (xstate_bv & 0x1) { if (xfeatures & 0x1) {
int offset = xstate_offsets[feature_bit]; int offset = xstate_offsets[feature_bit];
int size = xstate_sizes[feature_bit]; int size = xstate_sizes[feature_bit];
...@@ -92,7 +92,7 @@ void __sanitize_i387_state(struct task_struct *tsk) ...@@ -92,7 +92,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
size); size);
} }
xstate_bv >>= 1; xfeatures >>= 1;
feature_bit++; feature_bit++;
} }
} }
...@@ -162,7 +162,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame) ...@@ -162,7 +162,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
{ {
struct xsave_struct __user *x = buf; struct xsave_struct __user *x = buf;
struct _fpx_sw_bytes *sw_bytes; struct _fpx_sw_bytes *sw_bytes;
u32 xstate_bv; u32 xfeatures;
int err; int err;
/* Setup the bytes not touched by the [f]xsave and reserved for SW. */ /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
...@@ -175,25 +175,25 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame) ...@@ -175,25 +175,25 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size)); err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
/* /*
* Read the xstate_bv which we copied (directly from the cpu or * Read the xfeatures which we copied (directly from the cpu or
* from the state in task struct) to the user buffers. * from the state in task struct) to the user buffers.
*/ */
err |= __get_user(xstate_bv, (__u32 *)&x->header.xstate_bv); err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
/* /*
* For legacy compatible, we always set FP/SSE bits in the bit * For legacy compatible, we always set FP/SSE bits in the bit
* vector while saving the state to the user context. This will * vector while saving the state to the user context. This will
* enable us capturing any changes(during sigreturn) to * enable us capturing any changes(during sigreturn) to
* the FP/SSE bits by the legacy applications which don't touch * the FP/SSE bits by the legacy applications which don't touch
* xstate_bv in the xsave header. * xfeatures in the xsave header.
* *
* xsave aware apps can change the xstate_bv in the xsave * xsave aware apps can change the xfeatures in the xsave
* header as well as change any contents in the memory layout. * header as well as change any contents in the memory layout.
* xrestore as part of sigreturn will capture all the changes. * xrestore as part of sigreturn will capture all the changes.
*/ */
xstate_bv |= XSTATE_FPSSE; xfeatures |= XSTATE_FPSSE;
err |= __put_user(xstate_bv, (__u32 *)&x->header.xstate_bv); err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
return err; return err;
} }
...@@ -277,7 +277,7 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -277,7 +277,7 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
static inline void static inline void
sanitize_restored_xstate(struct task_struct *tsk, sanitize_restored_xstate(struct task_struct *tsk,
struct user_i387_ia32_struct *ia32_env, struct user_i387_ia32_struct *ia32_env,
u64 xstate_bv, int fx_only) u64 xfeatures, int fx_only)
{ {
struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
struct xstate_header *header = &xsave->header; struct xstate_header *header = &xsave->header;
...@@ -291,9 +291,9 @@ sanitize_restored_xstate(struct task_struct *tsk, ...@@ -291,9 +291,9 @@ sanitize_restored_xstate(struct task_struct *tsk,
* layout and not enabled by the OS. * layout and not enabled by the OS.
*/ */
if (fx_only) if (fx_only)
header->xstate_bv = XSTATE_FPSSE; header->xfeatures = XSTATE_FPSSE;
else else
header->xstate_bv &= (xfeatures_mask & xstate_bv); header->xfeatures &= (xfeatures_mask & xfeatures);
} }
if (use_fxsr()) { if (use_fxsr()) {
...@@ -335,7 +335,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -335,7 +335,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu; struct fpu *fpu = &tsk->thread.fpu;
int state_size = xstate_size; int state_size = xstate_size;
u64 xstate_bv = 0; u64 xfeatures = 0;
int fx_only = 0; int fx_only = 0;
ia32_fxstate &= (config_enabled(CONFIG_X86_32) || ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
...@@ -369,7 +369,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -369,7 +369,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
fx_only = 1; fx_only = 1;
} else { } else {
state_size = fx_sw_user.xstate_size; state_size = fx_sw_user.xstate_size;
xstate_bv = fx_sw_user.xstate_bv; xfeatures = fx_sw_user.xfeatures;
} }
} }
...@@ -398,7 +398,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -398,7 +398,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
fpstate_init(fpu); fpstate_init(fpu);
err = -1; err = -1;
} else { } else {
sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
} }
fpu->fpstate_active = 1; fpu->fpstate_active = 1;
...@@ -415,7 +415,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -415,7 +415,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
* state to the registers directly (with exceptions handled). * state to the registers directly (with exceptions handled).
*/ */
user_fpu_begin(); user_fpu_begin();
if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
fpu_reset_state(fpu); fpu_reset_state(fpu);
return -1; return -1;
} }
...@@ -441,7 +441,7 @@ static void prepare_fx_sw_frame(void) ...@@ -441,7 +441,7 @@ static void prepare_fx_sw_frame(void)
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
fx_sw_reserved.extended_size = size; fx_sw_reserved.extended_size = size;
fx_sw_reserved.xstate_bv = xfeatures_mask; fx_sw_reserved.xfeatures = xfeatures_mask;
fx_sw_reserved.xstate_size = xstate_size; fx_sw_reserved.xstate_size = xstate_size;
if (config_enabled(CONFIG_IA32_EMULATION)) { if (config_enabled(CONFIG_IA32_EMULATION)) {
...@@ -576,7 +576,7 @@ static void __init setup_init_fpu_buf(void) ...@@ -576,7 +576,7 @@ static void __init setup_init_fpu_buf(void)
if (cpu_has_xsaves) { if (cpu_has_xsaves) {
init_xstate_buf->header.xcomp_bv = init_xstate_buf->header.xcomp_bv =
(u64)1 << 63 | xfeatures_mask; (u64)1 << 63 | xfeatures_mask;
init_xstate_buf->header.xstate_bv = xfeatures_mask; init_xstate_buf->header.xfeatures = xfeatures_mask;
} }
/* /*
......
...@@ -3197,7 +3197,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, ...@@ -3197,7 +3197,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
{ {
struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
u64 xstate_bv = xsave->header.xstate_bv; u64 xstate_bv = xsave->header.xfeatures;
u64 valid; u64 valid;
/* /*
...@@ -3243,7 +3243,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) ...@@ -3243,7 +3243,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
memcpy(xsave, src, XSAVE_HDR_OFFSET); memcpy(xsave, src, XSAVE_HDR_OFFSET);
/* Set XSTATE_BV and possibly XCOMP_BV. */ /* Set XSTATE_BV and possibly XCOMP_BV. */
xsave->header.xstate_bv = xstate_bv; xsave->header.xfeatures = xstate_bv;
if (cpu_has_xsaves) if (cpu_has_xsaves)
xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment