Commit a658084b authored by Jiri Olsa's avatar Jiri Olsa Committed by Kelsey Skunberg

kretprobe: Prevent triggering kretprobe from within kprobe_flush_task

BugLink: https://bugs.launchpad.net/bugs/1885932

[ Upstream commit 9b38cc70 ]

Ziqian reported lockup when adding retprobe on _raw_spin_lock_irqsave.
My test was also able to trigger lockdep output:

 ============================================
 WARNING: possible recursive locking detected
 5.6.0-rc6+ #6 Not tainted
 --------------------------------------------
 sched-messaging/2767 is trying to acquire lock:
 ffffffff9a492798 (&(kretprobe_table_locks[i].lock)){-.-.}, at: kretprobe_hash_lock+0x52/0xa0

 but task is already holding lock:
 ffffffff9a491a18 (&(kretprobe_table_locks[i].lock)){-.-.}, at: kretprobe_trampoline+0x0/0x50

 other info that might help us debug this:
  Possible unsafe locking scenario:

        CPU0
        ----
   lock(&(kretprobe_table_locks[i].lock));
   lock(&(kretprobe_table_locks[i].lock));

  *** DEADLOCK ***

  May be due to missing lock nesting notation

 1 lock held by sched-messaging/2767:
  #0: ffffffff9a491a18 (&(kretprobe_table_locks[i].lock)){-.-.}, at: kretprobe_trampoline+0x0/0x50

 stack backtrace:
 CPU: 3 PID: 2767 Comm: sched-messaging Not tainted 5.6.0-rc6+ #6
 Call Trace:
  dump_stack+0x96/0xe0
  __lock_acquire.cold.57+0x173/0x2b7
  ? native_queued_spin_lock_slowpath+0x42b/0x9e0
  ? lockdep_hardirqs_on+0x590/0x590
  ? __lock_acquire+0xf63/0x4030
  lock_acquire+0x15a/0x3d0
  ? kretprobe_hash_lock+0x52/0xa0
  _raw_spin_lock_irqsave+0x36/0x70
  ? kretprobe_hash_lock+0x52/0xa0
  kretprobe_hash_lock+0x52/0xa0
  trampoline_handler+0xf8/0x940
  ? kprobe_fault_handler+0x380/0x380
  ? find_held_lock+0x3a/0x1c0
  kretprobe_trampoline+0x25/0x50
  ? lock_acquired+0x392/0xbc0
  ? _raw_spin_lock_irqsave+0x50/0x70
  ? __get_valid_kprobe+0x1f0/0x1f0
  ? _raw_spin_unlock_irqrestore+0x3b/0x40
  ? finish_task_switch+0x4b9/0x6d0
  ? __switch_to_asm+0x34/0x70
  ? __switch_to_asm+0x40/0x70

The code within the kretprobe handler checks for probe reentrancy,
so we won't trigger any _raw_spin_lock_irqsave probe in there.

The problem is in outside kprobe_flush_task, where we call:

  kprobe_flush_task
    kretprobe_table_lock
      raw_spin_lock_irqsave
        _raw_spin_lock_irqsave

where _raw_spin_lock_irqsave triggers the kretprobe and installs
kretprobe_trampoline handler on _raw_spin_lock_irqsave return.

The kretprobe_trampoline handler is then executed with already
locked kretprobe_table_locks, and first thing it does is to
lock kretprobe_table_locks ;-) the whole lockup path like:

  kprobe_flush_task
    kretprobe_table_lock
      raw_spin_lock_irqsave
        _raw_spin_lock_irqsave ---> probe triggered, kretprobe_trampoline installed

        ---> kretprobe_table_locks locked

        kretprobe_trampoline
          trampoline_handler
            kretprobe_hash_lock(current, &head, &flags);  <--- deadlock

Adding kprobe_busy_begin/end helpers that mark code with fake
probe installed to prevent triggering of another kprobe within
this code.

Using these helpers in kprobe_flush_task, so the probe recursion
protection check is hit and the probe is never set to prevent
above lockup.

Link: http://lkml.kernel.org/r/158927059835.27680.7011202830041561604.stgit@devnote2

Fixes: ef53d9c5 ("kprobes: improve kretprobe scalability with hashed locking")
Cc: Ingo Molnar <mingo@kernel.org>
Cc: "Gustavo A . R . Silva" <gustavoars@kernel.org>
Cc: Anders Roxell <anders.roxell@linaro.org>
Cc: "Naveen N . Rao" <naveen.n.rao@linux.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David Miller <davem@davemloft.net>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Reported-by: default avatar"Ziqian SUN (Zamir)" <zsun@redhat.com>
Acked-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
Signed-off-by: default avatarKamal Mostafa <kamal@canonical.com>
Signed-off-by: default avatarKelsey Skunberg <kelsey.skunberg@canonical.com>
parent d34e7952
...@@ -738,16 +738,11 @@ static void __used kretprobe_trampoline_holder(void) ...@@ -738,16 +738,11 @@ static void __used kretprobe_trampoline_holder(void)
NOKPROBE_SYMBOL(kretprobe_trampoline_holder); NOKPROBE_SYMBOL(kretprobe_trampoline_holder);
NOKPROBE_SYMBOL(kretprobe_trampoline); NOKPROBE_SYMBOL(kretprobe_trampoline);
static struct kprobe kretprobe_kprobe = {
.addr = (void *)kretprobe_trampoline,
};
/* /*
* Called from kretprobe_trampoline * Called from kretprobe_trampoline
*/ */
__visible __used void *trampoline_handler(struct pt_regs *regs) __visible __used void *trampoline_handler(struct pt_regs *regs)
{ {
struct kprobe_ctlblk *kcb;
struct kretprobe_instance *ri = NULL; struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp; struct hlist_head *head, empty_rp;
struct hlist_node *tmp; struct hlist_node *tmp;
...@@ -757,16 +752,12 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) ...@@ -757,16 +752,12 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
void *frame_pointer; void *frame_pointer;
bool skipped = false; bool skipped = false;
preempt_disable();
/* /*
* Set a dummy kprobe for avoiding kretprobe recursion. * Set a dummy kprobe for avoiding kretprobe recursion.
* Since kretprobe never run in kprobe handler, kprobe must not * Since kretprobe never run in kprobe handler, kprobe must not
* be running at this point. * be running at this point.
*/ */
kcb = get_kprobe_ctlblk(); kprobe_busy_begin();
__this_cpu_write(current_kprobe, &kretprobe_kprobe);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
INIT_HLIST_HEAD(&empty_rp); INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags); kretprobe_hash_lock(current, &head, &flags);
...@@ -845,7 +836,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) ...@@ -845,7 +836,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
__this_cpu_write(current_kprobe, &ri->rp->kp); __this_cpu_write(current_kprobe, &ri->rp->kp);
ri->ret_addr = correct_ret_addr; ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs); ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, &kretprobe_kprobe); __this_cpu_write(current_kprobe, &kprobe_busy);
} }
recycle_rp_inst(ri, &empty_rp); recycle_rp_inst(ri, &empty_rp);
...@@ -861,8 +852,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) ...@@ -861,8 +852,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
kretprobe_hash_unlock(current, &flags); kretprobe_hash_unlock(current, &flags);
__this_cpu_write(current_kprobe, NULL); kprobe_busy_end();
preempt_enable();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
......
...@@ -366,6 +366,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) ...@@ -366,6 +366,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
return this_cpu_ptr(&kprobe_ctlblk); return this_cpu_ptr(&kprobe_ctlblk);
} }
extern struct kprobe kprobe_busy;
void kprobe_busy_begin(void);
void kprobe_busy_end(void);
int register_kprobe(struct kprobe *p); int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num); int register_kprobes(struct kprobe **kps, int num);
......
...@@ -1150,6 +1150,26 @@ __releases(hlist_lock) ...@@ -1150,6 +1150,26 @@ __releases(hlist_lock)
} }
NOKPROBE_SYMBOL(kretprobe_table_unlock); NOKPROBE_SYMBOL(kretprobe_table_unlock);
struct kprobe kprobe_busy = {
.addr = (void *) get_kprobe,
};
void kprobe_busy_begin(void)
{
struct kprobe_ctlblk *kcb;
preempt_disable();
__this_cpu_write(current_kprobe, &kprobe_busy);
kcb = get_kprobe_ctlblk();
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
}
void kprobe_busy_end(void)
{
__this_cpu_write(current_kprobe, NULL);
preempt_enable();
}
/* /*
* This function is called from finish_task_switch when task tk becomes dead, * This function is called from finish_task_switch when task tk becomes dead,
* so that we can recycle any function-return probe instances associated * so that we can recycle any function-return probe instances associated
...@@ -1167,6 +1187,8 @@ void kprobe_flush_task(struct task_struct *tk) ...@@ -1167,6 +1187,8 @@ void kprobe_flush_task(struct task_struct *tk)
/* Early boot. kretprobe_table_locks not yet initialized. */ /* Early boot. kretprobe_table_locks not yet initialized. */
return; return;
kprobe_busy_begin();
INIT_HLIST_HEAD(&empty_rp); INIT_HLIST_HEAD(&empty_rp);
hash = hash_ptr(tk, KPROBE_HASH_BITS); hash = hash_ptr(tk, KPROBE_HASH_BITS);
head = &kretprobe_inst_table[hash]; head = &kretprobe_inst_table[hash];
...@@ -1180,6 +1202,8 @@ void kprobe_flush_task(struct task_struct *tk) ...@@ -1180,6 +1202,8 @@ void kprobe_flush_task(struct task_struct *tk)
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
kfree(ri); kfree(ri);
} }
kprobe_busy_end();
} }
NOKPROBE_SYMBOL(kprobe_flush_task); NOKPROBE_SYMBOL(kprobe_flush_task);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment