Commit 56583c9a authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Clarify parameter names in the copy_xstate_to_*() methods

Right now there's a confusing mixture of 'offset' and 'size' parameters:

 - __copy_xstate_to_*() input parameter 'end_pos' not not really an offset,
   but the full size of the copy to be performed.

 - input parameter 'count' to copy_xstate_to_*() shadows that of
   __copy_xstate_to_*()'s 'count' parameter name - but the roles
   are different: the first one is the total number of bytes to
   be copied, while the second one is a partial copy size.

To unconfuse all this, use a consistent set of parameter names:

 - 'size' is the partial copy size within a single xstate component
 - 'size_total' is the total copy requested
 - 'offset_start' is the requested starting offset.
 - 'offset' is the offset within an xstate component.

No change in functionality.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/20170923130016.21448-9-mingo@kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8a5b7318
...@@ -48,8 +48,8 @@ void fpu__xstate_clear_all_cpu_caps(void); ...@@ -48,8 +48,8 @@ void fpu__xstate_clear_all_cpu_caps(void);
void *get_xsave_addr(struct xregs_state *xsave, int xstate); void *get_xsave_addr(struct xregs_state *xsave, int xstate);
const void *get_xsave_field_ptr(int xstate_field); const void *get_xsave_field_ptr(int xstate_field);
int using_compacted_format(void); int using_compacted_format(void);
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int pos, unsigned int count); int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int pos, unsigned int count); int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_user_to_xstate(const void *kbuf, const void __user *ubuf, int copy_user_to_xstate(const void *kbuf, const void __user *ubuf,
struct xregs_state *xsave); struct xregs_state *xsave);
#endif #endif
...@@ -927,15 +927,15 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, ...@@ -927,15 +927,15 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
static inline int static inline int
__copy_xstate_to_kernel(void *kbuf, __copy_xstate_to_kernel(void *kbuf,
const void *data, const void *data,
unsigned int pos, unsigned int count, int end_pos) unsigned int offset, unsigned int size, int size_total)
{ {
if (!count) if (!size)
return 0; return 0;
if (end_pos < 0 || pos < end_pos) { if (size_total < 0 || offset < size_total) {
unsigned int copy = end_pos < 0 ? count : min(count, end_pos - pos); unsigned int copy = size_total < 0 ? size : min(size, size_total - offset);
memcpy(kbuf + pos, data, copy); memcpy(kbuf + offset, data, copy);
} }
return 0; return 0;
} }
...@@ -947,7 +947,7 @@ __copy_xstate_to_kernel(void *kbuf, ...@@ -947,7 +947,7 @@ __copy_xstate_to_kernel(void *kbuf,
* It supports partial copy but pos always starts from zero. This is called * It supports partial copy but pos always starts from zero. This is called
* from xstateregs_get() and there we check the CPU has XSAVES. * from xstateregs_get() and there we check the CPU has XSAVES.
*/ */
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int pos, unsigned int count) int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
{ {
unsigned int offset, size; unsigned int offset, size;
int ret, i; int ret, i;
...@@ -956,7 +956,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po ...@@ -956,7 +956,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po
/* /*
* Currently copy_regset_to_user() starts from pos 0: * Currently copy_regset_to_user() starts from pos 0:
*/ */
if (unlikely(pos != 0)) if (unlikely(offset_start != 0))
return -EFAULT; return -EFAULT;
/* /*
...@@ -972,7 +972,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po ...@@ -972,7 +972,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po
offset = offsetof(struct xregs_state, header); offset = offsetof(struct xregs_state, header);
size = sizeof(header); size = sizeof(header);
ret = __copy_xstate_to_kernel(kbuf, &header, offset, size, count); ret = __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
if (ret) if (ret)
return ret; return ret;
...@@ -986,11 +986,11 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po ...@@ -986,11 +986,11 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po
offset = xstate_offsets[i]; offset = xstate_offsets[i];
size = xstate_sizes[i]; size = xstate_sizes[i];
ret = __copy_xstate_to_kernel(kbuf, src, offset, size, count); ret = __copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
if (ret) if (ret)
return ret; return ret;
if (offset + size >= count) if (offset + size >= size_total)
break; break;
} }
...@@ -1002,7 +1002,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po ...@@ -1002,7 +1002,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po
offset = offsetof(struct fxregs_state, sw_reserved); offset = offsetof(struct fxregs_state, sw_reserved);
size = sizeof(xstate_fx_sw_bytes); size = sizeof(xstate_fx_sw_bytes);
ret = __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, count); ret = __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
if (ret) if (ret)
return ret; return ret;
...@@ -1010,15 +1010,15 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po ...@@ -1010,15 +1010,15 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po
} }
static inline int static inline int
__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int pos, unsigned int count, int end_pos) __copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, int size_total)
{ {
if (!count) if (!size)
return 0; return 0;
if (end_pos < 0 || pos < end_pos) { if (size_total < 0 || offset < size_total) {
unsigned int copy = end_pos < 0 ? count : min(count, end_pos - pos); unsigned int copy = size_total < 0 ? size : min(size, size_total - offset);
if (__copy_to_user(ubuf + pos, data, copy)) if (__copy_to_user(ubuf + offset, data, copy))
return -EFAULT; return -EFAULT;
} }
return 0; return 0;
...@@ -1030,7 +1030,7 @@ __copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int pos, uns ...@@ -1030,7 +1030,7 @@ __copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int pos, uns
* zero. This is called from xstateregs_get() and there we check the CPU * zero. This is called from xstateregs_get() and there we check the CPU
* has XSAVES. * has XSAVES.
*/ */
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int pos, unsigned int count) int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
{ {
unsigned int offset, size; unsigned int offset, size;
int ret, i; int ret, i;
...@@ -1039,7 +1039,7 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i ...@@ -1039,7 +1039,7 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i
/* /*
* Currently copy_regset_to_user() starts from pos 0: * Currently copy_regset_to_user() starts from pos 0:
*/ */
if (unlikely(pos != 0)) if (unlikely(offset_start != 0))
return -EFAULT; return -EFAULT;
/* /*
...@@ -1055,7 +1055,7 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i ...@@ -1055,7 +1055,7 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i
offset = offsetof(struct xregs_state, header); offset = offsetof(struct xregs_state, header);
size = sizeof(header); size = sizeof(header);
ret = __copy_xstate_to_user(ubuf, &header, offset, size, count); ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total);
if (ret) if (ret)
return ret; return ret;
...@@ -1069,11 +1069,11 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i ...@@ -1069,11 +1069,11 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i
offset = xstate_offsets[i]; offset = xstate_offsets[i];
size = xstate_sizes[i]; size = xstate_sizes[i];
ret = __copy_xstate_to_user(ubuf, src, offset, size, count); ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total);
if (ret) if (ret)
return ret; return ret;
if (offset + size >= count) if (offset + size >= size_total)
break; break;
} }
...@@ -1085,7 +1085,7 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i ...@@ -1085,7 +1085,7 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i
offset = offsetof(struct fxregs_state, sw_reserved); offset = offsetof(struct fxregs_state, sw_reserved);
size = sizeof(xstate_fx_sw_bytes); size = sizeof(xstate_fx_sw_bytes);
ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, count); ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment