Commit e32cf5df authored by Eric W. Biederman's avatar Eric W. Biederman

kthread: Generalize pf_io_worker so it can point to struct kthread

The point of using set_child_tid to hold the kthread pointer was that
it already did what is necessary.  There are now restrictions on when
set_child_tid can be initialized and when set_child_tid can be used in
schedule_tail.  Which indicates that continuing to use set_child_tid
to hold the kthread pointer is a bad idea.

Instead of continuing to use the set_child_tid field of task_struct
generalize the pf_io_worker field of task_struct and use it to hold
the kthread pointer.

Rename pf_io_worker (which is a void * pointer) to worker_private so
it can be used to store kthreads struct kthread pointer.  Update the
kthread code to store the kthread pointer in the worker_private field.
Remove the places where set_child_tid had to be dealt with carefully
because kthreads also used it.

Link: https://lkml.kernel.org/r/CAHk-=wgtFAA9SbVYg0gR1tqPMC17-NYcs0GQkaYg1bGhh1uJQQ@mail.gmail.com
Link: https://lkml.kernel.org/r/87a6grvqy8.fsf_-_@email.froward.int.ebiederm.orgSuggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatar"Eric W. Biederman" <ebiederm@xmission.com>
parent 00580f03
...@@ -657,7 +657,7 @@ static int io_wqe_worker(void *data) ...@@ -657,7 +657,7 @@ static int io_wqe_worker(void *data)
*/ */
void io_wq_worker_running(struct task_struct *tsk) void io_wq_worker_running(struct task_struct *tsk)
{ {
struct io_worker *worker = tsk->pf_io_worker; struct io_worker *worker = tsk->worker_private;
if (!worker) if (!worker)
return; return;
...@@ -675,7 +675,7 @@ void io_wq_worker_running(struct task_struct *tsk) ...@@ -675,7 +675,7 @@ void io_wq_worker_running(struct task_struct *tsk)
*/ */
void io_wq_worker_sleeping(struct task_struct *tsk) void io_wq_worker_sleeping(struct task_struct *tsk)
{ {
struct io_worker *worker = tsk->pf_io_worker; struct io_worker *worker = tsk->worker_private;
if (!worker) if (!worker)
return; return;
...@@ -694,7 +694,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk) ...@@ -694,7 +694,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
struct task_struct *tsk) struct task_struct *tsk)
{ {
tsk->pf_io_worker = worker; tsk->worker_private = worker;
worker->task = tsk; worker->task = tsk;
set_cpus_allowed_ptr(tsk, wqe->cpu_mask); set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
tsk->flags |= PF_NO_SETAFFINITY; tsk->flags |= PF_NO_SETAFFINITY;
......
...@@ -200,6 +200,6 @@ static inline void io_wq_worker_running(struct task_struct *tsk) ...@@ -200,6 +200,6 @@ static inline void io_wq_worker_running(struct task_struct *tsk)
static inline bool io_wq_current_is_worker(void) static inline bool io_wq_current_is_worker(void)
{ {
return in_task() && (current->flags & PF_IO_WORKER) && return in_task() && (current->flags & PF_IO_WORKER) &&
current->pf_io_worker; current->worker_private;
} }
#endif #endif
...@@ -987,8 +987,8 @@ struct task_struct { ...@@ -987,8 +987,8 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */ /* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid; int __user *clear_child_tid;
/* PF_IO_WORKER */ /* PF_KTHREAD | PF_IO_WORKER */
void *pf_io_worker; void *worker_private;
u64 utime; u64 utime;
u64 stime; u64 stime;
......
...@@ -950,7 +950,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) ...@@ -950,7 +950,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->splice_pipe = NULL; tsk->splice_pipe = NULL;
tsk->task_frag.page = NULL; tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL; tsk->wake_q.next = NULL;
tsk->pf_io_worker = NULL; tsk->worker_private = NULL;
account_kernel_stack(tsk, 1); account_kernel_stack(tsk, 1);
...@@ -2032,12 +2032,6 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2032,12 +2032,6 @@ static __latent_entropy struct task_struct *copy_process(
siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
} }
/*
* This _must_ happen before we call free_task(), i.e. before we jump
* to any of the bad_fork_* labels. This is to avoid freeing
* p->set_child_tid which is (ab)used as a kthread's data pointer for
* kernel threads (PF_KTHREAD).
*/
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
/* /*
* Clear TID on mm_release()? * Clear TID on mm_release()?
......
...@@ -72,7 +72,7 @@ enum KTHREAD_BITS { ...@@ -72,7 +72,7 @@ enum KTHREAD_BITS {
static inline struct kthread *to_kthread(struct task_struct *k) static inline struct kthread *to_kthread(struct task_struct *k)
{ {
WARN_ON(!(k->flags & PF_KTHREAD)); WARN_ON(!(k->flags & PF_KTHREAD));
return (__force void *)k->set_child_tid; return k->worker_private;
} }
/* /*
...@@ -80,7 +80,7 @@ static inline struct kthread *to_kthread(struct task_struct *k) ...@@ -80,7 +80,7 @@ static inline struct kthread *to_kthread(struct task_struct *k)
* *
* Per construction; when: * Per construction; when:
* *
* (p->flags & PF_KTHREAD) && p->set_child_tid * (p->flags & PF_KTHREAD) && p->worker_private
* *
* the task is both a kthread and struct kthread is persistent. However * the task is both a kthread and struct kthread is persistent. However
* PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
...@@ -88,7 +88,7 @@ static inline struct kthread *to_kthread(struct task_struct *k) ...@@ -88,7 +88,7 @@ static inline struct kthread *to_kthread(struct task_struct *k)
*/ */
static inline struct kthread *__to_kthread(struct task_struct *p) static inline struct kthread *__to_kthread(struct task_struct *p)
{ {
void *kthread = (__force void *)p->set_child_tid; void *kthread = p->worker_private;
if (kthread && !(p->flags & PF_KTHREAD)) if (kthread && !(p->flags & PF_KTHREAD))
kthread = NULL; kthread = NULL;
return kthread; return kthread;
...@@ -109,11 +109,7 @@ bool set_kthread_struct(struct task_struct *p) ...@@ -109,11 +109,7 @@ bool set_kthread_struct(struct task_struct *p)
init_completion(&kthread->parked); init_completion(&kthread->parked);
p->vfork_done = &kthread->exited; p->vfork_done = &kthread->exited;
/* p->worker_private = kthread;
* We abuse ->set_child_tid to avoid the new member and because it
* can't be wrongly copied by copy_process().
*/
p->set_child_tid = (__force void __user *)kthread;
return true; return true;
} }
...@@ -128,7 +124,7 @@ void free_kthread_struct(struct task_struct *k) ...@@ -128,7 +124,7 @@ void free_kthread_struct(struct task_struct *k)
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
WARN_ON_ONCE(kthread && kthread->blkcg_css); WARN_ON_ONCE(kthread && kthread->blkcg_css);
#endif #endif
k->set_child_tid = (__force void __user *)NULL; k->worker_private = NULL;
kfree(kthread); kfree(kthread);
} }
......
...@@ -4908,7 +4908,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) ...@@ -4908,7 +4908,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
finish_task_switch(prev); finish_task_switch(prev);
preempt_enable(); preempt_enable();
if (!(current->flags & PF_KTHREAD) && current->set_child_tid) if (current->set_child_tid)
put_user(task_pid_vnr(current), current->set_child_tid); put_user(task_pid_vnr(current), current->set_child_tid);
calculate_sigpending(); calculate_sigpending();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment