Commit 4d981cf2 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Remove 'ubuf' parameter from the copy_xstate_to_kernel() APIs

The 'ubuf' parameter is unused in the _kernel() side of the API, remove it.

This simplifies the code and makes it easier to think about.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/20170923130016.21448-4-mingo@kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f0d4f30a
...@@ -48,7 +48,7 @@ void fpu__xstate_clear_all_cpu_caps(void); ...@@ -48,7 +48,7 @@ void fpu__xstate_clear_all_cpu_caps(void);
void *get_xsave_addr(struct xregs_state *xsave, int xstate); void *get_xsave_addr(struct xregs_state *xsave, int xstate);
const void *get_xsave_field_ptr(int xstate_field); const void *get_xsave_field_ptr(int xstate_field);
int using_compacted_format(void); int using_compacted_format(void);
int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf, struct xregs_state *xsave); int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, struct xregs_state *xsave);
int copy_xstate_to_user(unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf, struct xregs_state *xsave); int copy_xstate_to_user(unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf, struct xregs_state *xsave);
int copy_user_to_xstate(const void *kbuf, const void __user *ubuf, int copy_user_to_xstate(const void *kbuf, const void __user *ubuf,
struct xregs_state *xsave); struct xregs_state *xsave);
......
...@@ -93,7 +93,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, ...@@ -93,7 +93,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
if (using_compacted_format()) { if (using_compacted_format()) {
if (kbuf) if (kbuf)
ret = copy_xstate_to_kernel(pos, count, kbuf, ubuf, xsave); ret = copy_xstate_to_kernel(pos, count, kbuf, xsave);
else else
ret = copy_xstate_to_user(pos, count, kbuf, ubuf, xsave); ret = copy_xstate_to_user(pos, count, kbuf, ubuf, xsave);
} else { } else {
......
...@@ -926,7 +926,7 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, ...@@ -926,7 +926,7 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
*/ */
static inline int static inline int
__copy_xstate_to_kernel(unsigned int pos, unsigned int count, __copy_xstate_to_kernel(unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf, void *kbuf,
const void *data, const int start_pos, const void *data, const int start_pos,
const int end_pos) const int end_pos)
{ {
...@@ -936,12 +936,7 @@ __copy_xstate_to_kernel(unsigned int pos, unsigned int count, ...@@ -936,12 +936,7 @@ __copy_xstate_to_kernel(unsigned int pos, unsigned int count,
if (end_pos < 0 || pos < end_pos) { if (end_pos < 0 || pos < end_pos) {
unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos)); unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos));
if (kbuf) { memcpy(kbuf + pos, data, copy);
memcpy(kbuf + pos, data, copy);
} else {
if (__copy_to_user(ubuf + pos, data, copy))
return -EFAULT;
}
} }
return 0; return 0;
} }
...@@ -953,8 +948,7 @@ __copy_xstate_to_kernel(unsigned int pos, unsigned int count, ...@@ -953,8 +948,7 @@ __copy_xstate_to_kernel(unsigned int pos, unsigned int count,
* It supports partial copy but pos always starts from zero. This is called * It supports partial copy but pos always starts from zero. This is called
* from xstateregs_get() and there we check the CPU has XSAVES. * from xstateregs_get() and there we check the CPU has XSAVES.
*/ */
int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, struct xregs_state *xsave)
void __user *ubuf, struct xregs_state *xsave)
{ {
unsigned int offset, size; unsigned int offset, size;
int ret, i; int ret, i;
...@@ -979,8 +973,7 @@ int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, ...@@ -979,8 +973,7 @@ int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf,
offset = offsetof(struct xregs_state, header); offset = offsetof(struct xregs_state, header);
size = sizeof(header); size = sizeof(header);
ret = __copy_xstate_to_kernel(offset, size, kbuf, ubuf, &header, 0, count); ret = __copy_xstate_to_kernel(offset, size, kbuf, &header, 0, count);
if (ret) if (ret)
return ret; return ret;
...@@ -994,8 +987,7 @@ int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, ...@@ -994,8 +987,7 @@ int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf,
offset = xstate_offsets[i]; offset = xstate_offsets[i];
size = xstate_sizes[i]; size = xstate_sizes[i];
ret = __copy_xstate_to_kernel(offset, size, kbuf, ubuf, src, 0, count); ret = __copy_xstate_to_kernel(offset, size, kbuf, src, 0, count);
if (ret) if (ret)
return ret; return ret;
...@@ -1011,8 +1003,7 @@ int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, ...@@ -1011,8 +1003,7 @@ int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf,
offset = offsetof(struct fxregs_state, sw_reserved); offset = offsetof(struct fxregs_state, sw_reserved);
size = sizeof(xstate_fx_sw_bytes); size = sizeof(xstate_fx_sw_bytes);
ret = __copy_xstate_to_kernel(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count); ret = __copy_xstate_to_kernel(offset, size, kbuf, xstate_fx_sw_bytes, 0, count);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment