Commit 3545832f authored by Byungchul Park's avatar Byungchul Park Committed by Paul E. McKenney

rcu: Change return type of rcu_spawn_one_boost_kthread()

The return value of rcu_spawn_one_boost_kthread() is not used any longer.
This commit therefore changes its return type from int to void, and
removes the cast to void from its callers.
Signed-off-by: default avatarByungchul Park <byungchul.park@lge.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.ibm.com>
parent 7e210a65
...@@ -1123,7 +1123,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) ...@@ -1123,7 +1123,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
* already exist. We only create this kthread for preemptible RCU. * already exist. We only create this kthread for preemptible RCU.
* Returns zero if all is well, a negated errno otherwise. * Returns zero if all is well, a negated errno otherwise.
*/ */
static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp) static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
{ {
int rnp_index = rnp - rcu_get_root(); int rnp_index = rnp - rcu_get_root();
unsigned long flags; unsigned long flags;
...@@ -1131,25 +1131,27 @@ static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp) ...@@ -1131,25 +1131,27 @@ static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
struct task_struct *t; struct task_struct *t;
if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
return 0; return;
if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
return 0; return;
rcu_state.boost = 1; rcu_state.boost = 1;
if (rnp->boost_kthread_task != NULL) if (rnp->boost_kthread_task != NULL)
return 0; return;
t = kthread_create(rcu_boost_kthread, (void *)rnp, t = kthread_create(rcu_boost_kthread, (void *)rnp,
"rcub/%d", rnp_index); "rcub/%d", rnp_index);
if (IS_ERR(t)) if (WARN_ON_ONCE(IS_ERR(t)))
return PTR_ERR(t); return;
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp->boost_kthread_task = t; rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
sp.sched_priority = kthread_prio; sp.sched_priority = kthread_prio;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
return 0;
} }
/* /*
...@@ -1190,7 +1192,7 @@ static void __init rcu_spawn_boost_kthreads(void) ...@@ -1190,7 +1192,7 @@ static void __init rcu_spawn_boost_kthreads(void)
struct rcu_node *rnp; struct rcu_node *rnp;
rcu_for_each_leaf_node(rnp) rcu_for_each_leaf_node(rnp)
(void)rcu_spawn_one_boost_kthread(rnp); rcu_spawn_one_boost_kthread(rnp);
} }
static void rcu_prepare_kthreads(int cpu) static void rcu_prepare_kthreads(int cpu)
...@@ -1200,7 +1202,7 @@ static void rcu_prepare_kthreads(int cpu) ...@@ -1200,7 +1202,7 @@ static void rcu_prepare_kthreads(int cpu)
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
if (rcu_scheduler_fully_active) if (rcu_scheduler_fully_active)
(void)rcu_spawn_one_boost_kthread(rnp); rcu_spawn_one_boost_kthread(rnp);
} }
#else /* #ifdef CONFIG_RCU_BOOST */ #else /* #ifdef CONFIG_RCU_BOOST */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment