Commit c8e3dd86 authored by Al Viro's avatar Al Viro

x86 user stack frame reads: switch to explicit __get_user()

rather than relying upon the magic in raw_copy_from_user()
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent bb6d3fb3
...@@ -2490,7 +2490,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent ...@@ -2490,7 +2490,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
/* 32-bit process in 64-bit kernel. */ /* 32-bit process in 64-bit kernel. */
unsigned long ss_base, cs_base; unsigned long ss_base, cs_base;
struct stack_frame_ia32 frame; struct stack_frame_ia32 frame;
const void __user *fp; const struct stack_frame_ia32 __user *fp;
if (!test_thread_flag(TIF_IA32)) if (!test_thread_flag(TIF_IA32))
return 0; return 0;
...@@ -2501,18 +2501,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent ...@@ -2501,18 +2501,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
fp = compat_ptr(ss_base + regs->bp); fp = compat_ptr(ss_base + regs->bp);
pagefault_disable(); pagefault_disable();
while (entry->nr < entry->max_stack) { while (entry->nr < entry->max_stack) {
unsigned long bytes;
frame.next_frame = 0;
frame.return_address = 0;
if (!valid_user_frame(fp, sizeof(frame))) if (!valid_user_frame(fp, sizeof(frame)))
break; break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); if (__get_user(frame.next_frame, &fp->next_frame))
if (bytes != 0)
break; break;
bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4); if (__get_user(frame.return_address, &fp->return_address))
if (bytes != 0)
break; break;
perf_callchain_store(entry, cs_base + frame.return_address); perf_callchain_store(entry, cs_base + frame.return_address);
...@@ -2533,7 +2527,7 @@ void ...@@ -2533,7 +2527,7 @@ void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct stack_frame frame; struct stack_frame frame;
const unsigned long __user *fp; const struct stack_frame __user *fp;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */ /* TODO: We don't support guest os callchain now */
...@@ -2546,7 +2540,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs ...@@ -2546,7 +2540,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
return; return;
fp = (unsigned long __user *)regs->bp; fp = (void __user *)regs->bp;
perf_callchain_store(entry, regs->ip); perf_callchain_store(entry, regs->ip);
...@@ -2558,19 +2552,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs ...@@ -2558,19 +2552,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
pagefault_disable(); pagefault_disable();
while (entry->nr < entry->max_stack) { while (entry->nr < entry->max_stack) {
unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;
if (!valid_user_frame(fp, sizeof(frame))) if (!valid_user_frame(fp, sizeof(frame)))
break; break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); if (__get_user(frame.next_frame, &fp->next_frame))
if (bytes != 0)
break; break;
bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp)); if (__get_user(frame.return_address, &fp->return_address))
if (bytes != 0)
break; break;
perf_callchain_store(entry, frame.return_address); perf_callchain_store(entry, frame.return_address);
......
...@@ -694,15 +694,6 @@ extern struct movsl_mask { ...@@ -694,15 +694,6 @@ extern struct movsl_mask {
# include <asm/uaccess_64.h> # include <asm/uaccess_64.h>
#endif #endif
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
*
* Caller must use pagefault_enable/disable, or run in interrupt context,
* and also do a uaccess_ok() check
*/
#define __copy_from_user_nmi __copy_from_user_inatomic
/* /*
* The "unsafe" user accesses aren't really "unsafe", but the naming * The "unsafe" user accesses aren't really "unsafe", but the naming
* is a big fat warning: you have to not only do the access_ok() * is a big fat warning: you have to not only do the access_ok()
......
...@@ -96,7 +96,8 @@ struct stack_frame_user { ...@@ -96,7 +96,8 @@ struct stack_frame_user {
}; };
static int static int
copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) copy_stack_frame(const struct stack_frame_user __user *fp,
struct stack_frame_user *frame)
{ {
int ret; int ret;
...@@ -105,7 +106,8 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) ...@@ -105,7 +106,8 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
ret = 1; ret = 1;
pagefault_disable(); pagefault_disable();
if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) if (__get_user(frame->next_fp, &fp->next_fp) ||
__get_user(frame->ret_addr, &fp->ret_addr))
ret = 0; ret = 0;
pagefault_enable(); pagefault_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment