Commit 8a4d0b56 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'uprobes/core' of...

Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc into perf/core

Pull uprobes updates from Oleg Nesterov:

 " [...] this way the upcoming ARM port doesn't (almost) need
   changes outside of arch/arm and thus it would be simpler to
   route everything via the ARM trees. "
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents f891d8cf f72d41fa
...@@ -37,6 +37,7 @@ typedef ppc_opcode_t uprobe_opcode_t; ...@@ -37,6 +37,7 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe { struct arch_uprobe {
union { union {
u8 insn[MAX_UINSN_BYTES]; u8 insn[MAX_UINSN_BYTES];
u8 ixol[MAX_UINSN_BYTES];
u32 ainsn; u32 ainsn;
}; };
}; };
...@@ -45,11 +46,4 @@ struct arch_uprobe_task { ...@@ -45,11 +46,4 @@ struct arch_uprobe_task {
unsigned long saved_trap_nr; unsigned long saved_trap_nr;
}; };
extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
#endif /* _ASM_UPROBES_H */ #endif /* _ASM_UPROBES_H */
...@@ -35,7 +35,10 @@ typedef u8 uprobe_opcode_t; ...@@ -35,7 +35,10 @@ typedef u8 uprobe_opcode_t;
struct arch_uprobe { struct arch_uprobe {
u16 fixups; u16 fixups;
u8 insn[MAX_UINSN_BYTES]; union {
u8 insn[MAX_UINSN_BYTES];
u8 ixol[MAX_UINSN_BYTES];
};
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
unsigned long rip_rela_target_address; unsigned long rip_rela_target_address;
#endif #endif
...@@ -49,11 +52,4 @@ struct arch_uprobe_task { ...@@ -49,11 +52,4 @@ struct arch_uprobe_task {
unsigned int saved_tf; unsigned int saved_tf;
}; };
extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
#endif /* _ASM_UPROBES_H */ #endif /* _ASM_UPROBES_H */
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
struct vm_area_struct; struct vm_area_struct;
struct mm_struct; struct mm_struct;
struct inode; struct inode;
struct notifier_block;
#ifdef CONFIG_ARCH_SUPPORTS_UPROBES #ifdef CONFIG_ARCH_SUPPORTS_UPROBES
# include <asm/uprobes.h> # include <asm/uprobes.h>
...@@ -108,6 +109,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign ...@@ -108,6 +109,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
extern bool __weak is_trap_insn(uprobe_opcode_t *insn); extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
...@@ -125,6 +127,13 @@ extern void uprobe_notify_resume(struct pt_regs *regs); ...@@ -125,6 +127,13 @@ extern void uprobe_notify_resume(struct pt_regs *regs);
extern bool uprobe_deny_signal(void); extern bool uprobe_deny_signal(void);
extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs); extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
extern void uprobe_clear_state(struct mm_struct *mm); extern void uprobe_clear_state(struct mm_struct *mm);
extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
#else /* !CONFIG_UPROBES */ #else /* !CONFIG_UPROBES */
struct uprobes_state { struct uprobes_state {
}; };
......
...@@ -245,12 +245,12 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t ...@@ -245,12 +245,12 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t
* the architecture. If an arch has variable length instruction and the * the architecture. If an arch has variable length instruction and the
* breakpoint instruction is not of the smallest length instruction * breakpoint instruction is not of the smallest length instruction
* supported by that architecture then we need to modify is_trap_at_addr and * supported by that architecture then we need to modify is_trap_at_addr and
* write_opcode accordingly. This would never be a problem for archs that * uprobe_write_opcode accordingly. This would never be a problem for archs
* have fixed length instructions. * that have fixed length instructions.
*/ */
/* /*
* write_opcode - write the opcode at a given virtual address. * uprobe_write_opcode - write the opcode at a given virtual address.
* @mm: the probed process address space. * @mm: the probed process address space.
* @vaddr: the virtual address to store the opcode. * @vaddr: the virtual address to store the opcode.
* @opcode: opcode to be written at @vaddr. * @opcode: opcode to be written at @vaddr.
...@@ -261,7 +261,7 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t ...@@ -261,7 +261,7 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t
* For mm @mm, write the opcode at @vaddr. * For mm @mm, write the opcode at @vaddr.
* Return 0 (success) or a negative errno. * Return 0 (success) or a negative errno.
*/ */
static int write_opcode(struct mm_struct *mm, unsigned long vaddr, int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
uprobe_opcode_t opcode) uprobe_opcode_t opcode)
{ {
struct page *old_page, *new_page; struct page *old_page, *new_page;
...@@ -315,7 +315,7 @@ static int write_opcode(struct mm_struct *mm, unsigned long vaddr, ...@@ -315,7 +315,7 @@ static int write_opcode(struct mm_struct *mm, unsigned long vaddr,
*/ */
int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{ {
return write_opcode(mm, vaddr, UPROBE_SWBP_INSN); return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
} }
/** /**
...@@ -330,7 +330,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned ...@@ -330,7 +330,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned
int __weak int __weak
set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{ {
return write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
} }
static int match_uprobe(struct uprobe *l, struct uprobe *r) static int match_uprobe(struct uprobe *l, struct uprobe *r)
...@@ -577,7 +577,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, ...@@ -577,7 +577,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
if (ret) if (ret)
goto out; goto out;
/* write_opcode() assumes we don't cross page boundary */ /* uprobe_write_opcode() assumes we don't cross page boundary */
BUG_ON((uprobe->offset & ~PAGE_MASK) + BUG_ON((uprobe->offset & ~PAGE_MASK) +
UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
...@@ -1264,7 +1264,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) ...@@ -1264,7 +1264,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
return 0; return 0;
/* Initialize the slot */ /* Initialize the slot */
copy_to_page(area->page, xol_vaddr, uprobe->arch.insn, MAX_UINSN_BYTES); copy_to_page(area->page, xol_vaddr,
uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
/* /*
* We probably need flush_icache_user_range() but it needs vma. * We probably need flush_icache_user_range() but it needs vma.
* This should work on supported architectures too. * This should work on supported architectures too.
...@@ -1941,9 +1942,4 @@ static int __init init_uprobes(void) ...@@ -1941,9 +1942,4 @@ static int __init init_uprobes(void)
return register_die_notifier(&uprobe_exception_nb); return register_die_notifier(&uprobe_exception_nb);
} }
module_init(init_uprobes); __initcall(init_uprobes);
static void __exit exit_uprobes(void)
{
}
module_exit(exit_uprobes);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment