Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
e46bc9b6
Commit
e46bc9b6
authored
Apr 07, 2011
by
Oleg Nesterov
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'ptrace' of
git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc
into ptrace
parents
2b9accbe
321fb561
Changes
6
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
442 additions
and
180 deletions
+442
-180
fs/exec.c
fs/exec.c
+1
-0
include/linux/sched.h
include/linux/sched.h
+14
-3
include/linux/tracehook.h
include/linux/tracehook.h
+0
-27
kernel/exit.c
kernel/exit.c
+67
-17
kernel/ptrace.c
kernel/ptrace.c
+81
-37
kernel/signal.c
kernel/signal.c
+279
-96
No files found.
fs/exec.c
View file @
e46bc9b6
...
...
@@ -1659,6 +1659,7 @@ static int zap_process(struct task_struct *start, int exit_code)
t
=
start
;
do
{
task_clear_group_stop_pending
(
t
);
if
(
t
!=
current
&&
t
->
mm
)
{
sigaddset
(
&
t
->
pending
.
signal
,
SIGKILL
);
signal_wake_up
(
t
,
1
);
...
...
include/linux/sched.h
View file @
e46bc9b6
...
...
@@ -653,9 +653,8 @@ struct signal_struct {
* Bits in flags field of signal_struct.
*/
#define SIGNAL_STOP_STOPPED 0x00000001
/* job control stop in effect */
#define SIGNAL_STOP_DEQUEUED 0x00000002
/* stop signal dequeued */
#define SIGNAL_STOP_CONTINUED 0x00000004
/* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000008
/* group exit in progress */
#define SIGNAL_STOP_CONTINUED 0x00000002
/* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000004
/* group exit in progress */
/*
* Pending notifications to parent.
*/
...
...
@@ -1261,6 +1260,7 @@ struct task_struct {
int
exit_state
;
int
exit_code
,
exit_signal
;
int
pdeath_signal
;
/* The signal sent when the parent dies */
unsigned
int
group_stop
;
/* GROUP_STOP_*, siglock protected */
/* ??? */
unsigned
int
personality
;
unsigned
did_exec
:
1
;
...
...
@@ -1777,6 +1777,17 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
/*
* task->group_stop flags
*/
#define GROUP_STOP_SIGMASK 0xffff
/* signr of the last group stop */
#define GROUP_STOP_PENDING (1 << 16)
/* task should stop for group stop */
#define GROUP_STOP_CONSUME (1 << 17)
/* consume group stop count */
#define GROUP_STOP_TRAPPING (1 << 18)
/* switching from STOPPED to TRACED */
#define GROUP_STOP_DEQUEUED (1 << 19)
/* stop signal dequeued */
extern
void
task_clear_group_stop_pending
(
struct
task_struct
*
task
);
#ifdef CONFIG_PREEMPT_RCU
#define RCU_READ_UNLOCK_BLOCKED (1 << 0)
/* blocked while in RCU read-side. */
...
...
include/linux/tracehook.h
View file @
e46bc9b6
...
...
@@ -468,33 +468,6 @@ static inline int tracehook_get_signal(struct task_struct *task,
return
0
;
}
/**
* tracehook_notify_jctl - report about job control stop/continue
* @notify: zero, %CLD_STOPPED or %CLD_CONTINUED
* @why: %CLD_STOPPED or %CLD_CONTINUED
*
* This is called when we might call do_notify_parent_cldstop().
*
* @notify is zero if we would not ordinarily send a %SIGCHLD,
* or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD.
*
* @why is %CLD_STOPPED when about to stop for job control;
* we are already in %TASK_STOPPED state, about to call schedule().
* It might also be that we have just exited (check %PF_EXITING),
* but need to report that a group-wide stop is complete.
*
* @why is %CLD_CONTINUED when waking up after job control stop and
* ready to make a delayed @notify report.
*
* Return the %CLD_* value for %SIGCHLD, or zero to generate no signal.
*
* Called with the siglock held.
*/
static
inline
int
tracehook_notify_jctl
(
int
notify
,
int
why
)
{
return
notify
?:
(
current
->
ptrace
&
PT_PTRACED
)
?
why
:
0
;
}
/**
* tracehook_finish_jctl - report about return from job control stop
*
...
...
kernel/exit.c
View file @
e46bc9b6
...
...
@@ -1538,33 +1538,83 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
return
0
;
}
if
(
likely
(
!
ptrace
)
&&
unlikely
(
task_ptrace
(
p
)))
{
/* dead body doesn't have much to contribute */
if
(
p
->
exit_state
==
EXIT_DEAD
)
return
0
;
/* slay zombie? */
if
(
p
->
exit_state
==
EXIT_ZOMBIE
)
{
/*
* This child is hidden by ptrace.
* We aren't allowed to see it now, but eventually we will.
* A zombie ptracee is only visible to its ptracer.
* Notification and reaping will be cascaded to the real
* parent when the ptracer detaches.
*/
if
(
likely
(
!
ptrace
)
&&
unlikely
(
task_ptrace
(
p
)))
{
/* it will become visible, clear notask_error */
wo
->
notask_error
=
0
;
return
0
;
}
if
(
p
->
exit_state
==
EXIT_DEAD
)
return
0
;
/* we don't reap group leaders with subthreads */
if
(
!
delay_group_leader
(
p
))
return
wait_task_zombie
(
wo
,
p
);
/*
* We don't reap group leaders with subthreads.
* Allow access to stopped/continued state via zombie by
* falling through. Clearing of notask_error is complex.
*
* When !@ptrace:
*
* If WEXITED is set, notask_error should naturally be
* cleared. If not, subset of WSTOPPED|WCONTINUED is set,
* so, if there are live subthreads, there are events to
* wait for. If all subthreads are dead, it's still safe
* to clear - this function will be called again in finite
* amount time once all the subthreads are released and
* will then return without clearing.
*
* When @ptrace:
*
* Stopped state is per-task and thus can't change once the
* target task dies. Only continued and exited can happen.
* Clear notask_error if WCONTINUED | WEXITED.
*/
if
(
likely
(
!
ptrace
)
||
(
wo
->
wo_flags
&
(
WCONTINUED
|
WEXITED
)))
wo
->
notask_error
=
0
;
}
else
{
/*
* If @p is ptraced by a task in its real parent's group,
* hide group stop/continued state when looking at @p as
* the real parent; otherwise, a single stop can be
* reported twice as group and ptrace stops.
*
* If a ptracer wants to distinguish the two events for its
* own children, it should create a separate process which
* takes the role of real parent.
*/
if
(
p
->
exit_state
==
EXIT_ZOMBIE
&&
!
delay_group_leader
(
p
))
return
wait_task_zombie
(
wo
,
p
);
if
(
likely
(
!
ptrace
)
&&
task_ptrace
(
p
)
&&
same_thread_group
(
p
->
parent
,
p
->
real_parent
))
return
0
;
/*
* It's stopped or running now, so it might
* later continue, exit, or stop again
.
* @p is alive and it's gonna stop, continue or exit, so
* there always is something to wait for
.
*/
wo
->
notask_error
=
0
;
}
/*
* Wait for stopped. Depending on @ptrace, different stopped state
* is used and the two don't interact with each other.
*/
if
(
task_stopped_code
(
p
,
ptrace
))
return
wait_task_stopped
(
wo
,
ptrace
,
p
);
/*
* Wait for continued. There's only one continued state and the
* ptracer can consume it which can confuse the real parent. Don't
* use WCONTINUED from ptracer. You don't need or want it.
*/
return
wait_task_continued
(
wo
,
p
);
}
...
...
kernel/ptrace.c
View file @
e46bc9b6
...
...
@@ -37,35 +37,33 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
child
->
parent
=
new_parent
;
}
/*
* Turn a tracing stop into a normal stop now, since with no tracer there
* would be no way to wake it up with SIGCONT or SIGKILL. If there was a
* signal sent that would resume the child, but didn't because it was in
* TASK_TRACED, resume it now.
* Requires that irqs be disabled.
*/
static
void
ptrace_untrace
(
struct
task_struct
*
child
)
{
spin_lock
(
&
child
->
sighand
->
siglock
);
if
(
task_is_traced
(
child
))
{
/*
* If the group stop is completed or in progress,
* this thread was already counted as stopped.
*/
if
(
child
->
signal
->
flags
&
SIGNAL_STOP_STOPPED
||
child
->
signal
->
group_stop_count
)
__set_task_state
(
child
,
TASK_STOPPED
);
else
signal_wake_up
(
child
,
1
);
}
spin_unlock
(
&
child
->
sighand
->
siglock
);
}
/*
* unptrace a task: move it back to its original parent and
* remove it from the ptrace list.
/**
* __ptrace_unlink - unlink ptracee and restore its execution state
* @child: ptracee to be unlinked
*
* Must be called with the tasklist lock write-held.
* Remove @child from the ptrace list, move it back to the original parent,
* and restore the execution state so that it conforms to the group stop
* state.
*
* Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
* exiting. For PTRACE_DETACH, unless the ptracee has been killed between
* ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
* If the ptracer is exiting, the ptracee can be in any state.
*
* After detach, the ptracee should be in a state which conforms to the
* group stop. If the group is stopped or in the process of stopping, the
* ptracee should be put into TASK_STOPPED; otherwise, it should be woken
* up from TASK_TRACED.
*
* If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
* it goes through TRACED -> RUNNING -> STOPPED transition which is similar
* to but in the opposite direction of what happens while attaching to a
* stopped task. However, in this direction, the intermediate RUNNING
* state is not hidden even from the current ptracer and if it immediately
* re-attaches and performs a WNOHANG wait(2), it may fail.
*
* CONTEXT:
* write_lock_irq(tasklist_lock)
*/
void
__ptrace_unlink
(
struct
task_struct
*
child
)
{
...
...
@@ -75,8 +73,27 @@ void __ptrace_unlink(struct task_struct *child)
child
->
parent
=
child
->
real_parent
;
list_del_init
(
&
child
->
ptrace_entry
);
if
(
task_is_traced
(
child
))
ptrace_untrace
(
child
);
spin_lock
(
&
child
->
sighand
->
siglock
);
/*
* Reinstate GROUP_STOP_PENDING if group stop is in effect and
* @child isn't dead.
*/
if
(
!
(
child
->
flags
&
PF_EXITING
)
&&
(
child
->
signal
->
flags
&
SIGNAL_STOP_STOPPED
||
child
->
signal
->
group_stop_count
))
child
->
group_stop
|=
GROUP_STOP_PENDING
;
/*
* If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
* @child in the butt. Note that @resume should be used iff @child
* is in TASK_TRACED; otherwise, we might unduly disrupt
* TASK_KILLABLE sleeps.
*/
if
(
child
->
group_stop
&
GROUP_STOP_PENDING
||
task_is_traced
(
child
))
signal_wake_up
(
child
,
task_is_traced
(
child
));
spin_unlock
(
&
child
->
sighand
->
siglock
);
}
/*
...
...
@@ -95,16 +112,14 @@ int ptrace_check_attach(struct task_struct *child, int kill)
*/
read_lock
(
&
tasklist_lock
);
if
((
child
->
ptrace
&
PT_PTRACED
)
&&
child
->
parent
==
current
)
{
ret
=
0
;
/*
* child->sighand can't be NULL, release_task()
* does ptrace_unlink() before __exit_signal().
*/
spin_lock_irq
(
&
child
->
sighand
->
siglock
);
if
(
task_is_stopped
(
child
))
child
->
state
=
TASK_TRACED
;
else
if
(
!
task_is_traced
(
child
)
&&
!
kill
)
ret
=
-
ESRCH
;
WARN_ON_ONCE
(
task_is_stopped
(
child
));
if
(
task_is_traced
(
child
)
||
kill
)
ret
=
0
;
spin_unlock_irq
(
&
child
->
sighand
->
siglock
);
}
read_unlock
(
&
tasklist_lock
);
...
...
@@ -168,6 +183,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
static
int
ptrace_attach
(
struct
task_struct
*
task
)
{
bool
wait_trap
=
false
;
int
retval
;
audit_ptrace
(
task
);
...
...
@@ -207,12 +223,42 @@ static int ptrace_attach(struct task_struct *task)
__ptrace_link
(
task
,
current
);
send_sig_info
(
SIGSTOP
,
SEND_SIG_FORCED
,
task
);
spin_lock
(
&
task
->
sighand
->
siglock
);
/*
* If the task is already STOPPED, set GROUP_STOP_PENDING and
* TRAPPING, and kick it so that it transits to TRACED. TRAPPING
* will be cleared if the child completes the transition or any
* event which clears the group stop states happens. We'll wait
* for the transition to complete before returning from this
* function.
*
* This hides STOPPED -> RUNNING -> TRACED transition from the
* attaching thread but a different thread in the same group can
* still observe the transient RUNNING state. IOW, if another
* thread's WNOHANG wait(2) on the stopped tracee races against
* ATTACH, the wait(2) may fail due to the transient RUNNING.
*
* The following task_is_stopped() test is safe as both transitions
* in and out of STOPPED are protected by siglock.
*/
if
(
task_is_stopped
(
task
))
{
task
->
group_stop
|=
GROUP_STOP_PENDING
|
GROUP_STOP_TRAPPING
;
signal_wake_up
(
task
,
1
);
wait_trap
=
true
;
}
spin_unlock
(
&
task
->
sighand
->
siglock
);
retval
=
0
;
unlock_tasklist:
write_unlock_irq
(
&
tasklist_lock
);
unlock_creds:
mutex_unlock
(
&
task
->
signal
->
cred_guard_mutex
);
out:
if
(
wait_trap
)
wait_event
(
current
->
signal
->
wait_chldexit
,
!
(
task
->
group_stop
&
GROUP_STOP_TRAPPING
));
return
retval
;
}
...
...
@@ -315,8 +361,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
if
(
child
->
ptrace
)
{
child
->
exit_code
=
data
;
dead
=
__ptrace_detach
(
current
,
child
);
if
(
!
child
->
exit_state
)
wake_up_state
(
child
,
TASK_TRACED
|
TASK_STOPPED
);
}
write_unlock_irq
(
&
tasklist_lock
);
...
...
kernel/signal.c
View file @
e46bc9b6
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment