Commit 03c8a4a4 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Ingo Molnar

csky: kprobes: Use generic kretprobe trampoline handler

Use the generic kretprobe trampoline handler. Don't use
framepointer verification.
Signed-off-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarGuo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/159870605775.1229682.2627276871589951304.stgit@devnote2
parent f75dd136
...@@ -404,87 +404,14 @@ int __init arch_populate_kprobe_blacklist(void) ...@@ -404,87 +404,14 @@ int __init arch_populate_kprobe_blacklist(void)
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
{ {
struct kretprobe_instance *ri = NULL; return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =
(unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because multiple functions in the call path have
* return probes installed on them, and/or more than one
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always pushed into the head of the list
* - when multiple return probes are registered for the same
* function, the (chronologically) first instance's ret_addr
* will be the real return address, and all the rest will
* point to kretprobe_trampoline.
*/
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
return (void *)orig_ret_address;
} }
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs) struct pt_regs *regs)
{ {
ri->ret_addr = (kprobe_opcode_t *)regs->lr; ri->ret_addr = (kprobe_opcode_t *)regs->lr;
ri->fp = NULL;
regs->lr = (unsigned long) &kretprobe_trampoline; regs->lr = (unsigned long) &kretprobe_trampoline;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment