Commit ad4e25a3 authored by Paul E. McKenney's avatar Paul E. McKenney

Merge branches 'doc.2017.10.20a', 'fixes.2017.10.19a', 'stall.2017.10.09a' and...

Merge branches 'doc.2017.10.20a', 'fixes.2017.10.19a', 'stall.2017.10.09a' and 'torture.2017.10.09a' into HEAD

doc.2017.10.20a: Documentation updates.
fixes.2017.10.19a: Miscellaneous fixes.
stall.2017.10.09a: RCU CPU stall-warning updates.
torture.2017.10.09a: Torture-test updates.
...@@ -3539,6 +3539,9 @@ ...@@ -3539,6 +3539,9 @@
rcutorture.stall_cpu_holdoff= [KNL] rcutorture.stall_cpu_holdoff= [KNL]
Time to wait (s) after boot before inducing stall. Time to wait (s) after boot before inducing stall.
rcutorture.stall_cpu_irqsoff= [KNL]
Disable interrupts while stalling if set.
rcutorture.stat_interval= [KNL] rcutorture.stat_interval= [KNL]
Time (s) between statistics printk()s. Time (s) between statistics printk()s.
......
...@@ -33,10 +33,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) ...@@ -33,10 +33,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
bool irq_work_queue(struct irq_work *work); bool irq_work_queue(struct irq_work *work);
#ifdef CONFIG_SMP
bool irq_work_queue_on(struct irq_work *work, int cpu); bool irq_work_queue_on(struct irq_work *work, int cpu);
#endif
void irq_work_tick(void); void irq_work_tick(void);
void irq_work_sync(struct irq_work *work); void irq_work_sync(struct irq_work *work);
......
...@@ -56,7 +56,6 @@ void __weak arch_irq_work_raise(void) ...@@ -56,7 +56,6 @@ void __weak arch_irq_work_raise(void)
*/ */
} }
#ifdef CONFIG_SMP
/* /*
* Enqueue the irq_work @work on @cpu unless it's already pending * Enqueue the irq_work @work on @cpu unless it's already pending
* somewhere. * somewhere.
...@@ -68,6 +67,8 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) ...@@ -68,6 +67,8 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
/* All work should have been flushed before going offline */ /* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(cpu)); WARN_ON_ONCE(cpu_is_offline(cpu));
#ifdef CONFIG_SMP
/* Arch remote IPI send/receive backend aren't NMI safe */ /* Arch remote IPI send/receive backend aren't NMI safe */
WARN_ON_ONCE(in_nmi()); WARN_ON_ONCE(in_nmi());
...@@ -78,10 +79,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) ...@@ -78,10 +79,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
arch_send_call_function_single_ipi(cpu); arch_send_call_function_single_ipi(cpu);
#else /* #ifdef CONFIG_SMP */
irq_work_queue(work);
#endif /* #else #ifdef CONFIG_SMP */
return true; return true;
} }
EXPORT_SYMBOL_GPL(irq_work_queue_on);
#endif
/* Enqueue the irq work @work on the current CPU */ /* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work) bool irq_work_queue(struct irq_work *work)
......
...@@ -203,6 +203,21 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) ...@@ -203,6 +203,21 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
extern int rcu_cpu_stall_suppress; extern int rcu_cpu_stall_suppress;
int rcu_jiffies_till_stall_check(void); int rcu_jiffies_till_stall_check(void);
#define rcu_ftrace_dump_stall_suppress() \
do { \
if (!rcu_cpu_stall_suppress) \
rcu_cpu_stall_suppress = 3; \
} while (0)
#define rcu_ftrace_dump_stall_unsuppress() \
do { \
if (rcu_cpu_stall_suppress == 3) \
rcu_cpu_stall_suppress = 0; \
} while (0)
#else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
#define rcu_ftrace_dump_stall_suppress()
#define rcu_ftrace_dump_stall_unsuppress()
#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
/* /*
...@@ -220,8 +235,12 @@ do { \ ...@@ -220,8 +235,12 @@ do { \
static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
\ \
if (!atomic_read(&___rfd_beenhere) && \ if (!atomic_read(&___rfd_beenhere) && \
!atomic_xchg(&___rfd_beenhere, 1)) \ !atomic_xchg(&___rfd_beenhere, 1)) { \
tracing_off(); \
rcu_ftrace_dump_stall_suppress(); \
ftrace_dump(oops_dump_mode); \ ftrace_dump(oops_dump_mode); \
rcu_ftrace_dump_stall_unsuppress(); \
} \
} while (0) } while (0)
void rcu_early_boot_tests(void); void rcu_early_boot_tests(void);
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include "rcu_segcblist.h" #include "rcu_segcblist.h"
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <linux/torture.h> #include <linux/torture.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/sched/debug.h>
#include "rcu.h" #include "rcu.h"
...@@ -89,6 +90,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); ...@@ -89,6 +90,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
torture_param(int, stall_cpu_holdoff, 10, torture_param(int, stall_cpu_holdoff, 10,
"Time to wait before starting stall (s)."); "Time to wait before starting stall (s).");
torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
torture_param(int, stat_interval, 60, torture_param(int, stat_interval, 60,
"Number of seconds between stats printk()s"); "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of seconds to run/halt test"); torture_param(int, stutter, 5, "Number of seconds to run/halt test");
...@@ -1239,6 +1241,7 @@ rcu_torture_stats_print(void) ...@@ -1239,6 +1241,7 @@ rcu_torture_stats_print(void)
long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
static unsigned long rtcv_snap = ULONG_MAX; static unsigned long rtcv_snap = ULONG_MAX;
static bool splatted;
struct task_struct *wtp; struct task_struct *wtp;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
...@@ -1324,6 +1327,10 @@ rcu_torture_stats_print(void) ...@@ -1324,6 +1327,10 @@ rcu_torture_stats_print(void)
gpnum, completed, flags, gpnum, completed, flags,
wtp == NULL ? ~0UL : wtp->state, wtp == NULL ? ~0UL : wtp->state,
wtp == NULL ? -1 : (int)task_cpu(wtp)); wtp == NULL ? -1 : (int)task_cpu(wtp));
if (!splatted && wtp) {
sched_show_task(wtp);
splatted = true;
}
show_rcu_gp_kthreads(); show_rcu_gp_kthreads();
rcu_ftrace_dump(DUMP_ALL); rcu_ftrace_dump(DUMP_ALL);
} }
...@@ -1357,7 +1364,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) ...@@ -1357,7 +1364,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
"fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
"test_boost=%d/%d test_boost_interval=%d " "test_boost=%d/%d test_boost_interval=%d "
"test_boost_duration=%d shutdown_secs=%d " "test_boost_duration=%d shutdown_secs=%d "
"stall_cpu=%d stall_cpu_holdoff=%d " "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
"n_barrier_cbs=%d " "n_barrier_cbs=%d "
"onoff_interval=%d onoff_holdoff=%d\n", "onoff_interval=%d onoff_holdoff=%d\n",
torture_type, tag, nrealreaders, nfakewriters, torture_type, tag, nrealreaders, nfakewriters,
...@@ -1365,7 +1372,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) ...@@ -1365,7 +1372,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
test_boost, cur_ops->can_boost, test_boost, cur_ops->can_boost,
test_boost_interval, test_boost_duration, shutdown_secs, test_boost_interval, test_boost_duration, shutdown_secs,
stall_cpu, stall_cpu_holdoff, stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
n_barrier_cbs, n_barrier_cbs,
onoff_interval, onoff_holdoff); onoff_interval, onoff_holdoff);
} }
...@@ -1430,12 +1437,19 @@ static int rcu_torture_stall(void *args) ...@@ -1430,12 +1437,19 @@ static int rcu_torture_stall(void *args)
if (!kthread_should_stop()) { if (!kthread_should_stop()) {
stop_at = get_seconds() + stall_cpu; stop_at = get_seconds() + stall_cpu;
/* RCU CPU stall is expected behavior in following code. */ /* RCU CPU stall is expected behavior in following code. */
pr_alert("rcu_torture_stall start.\n");
rcu_read_lock(); rcu_read_lock();
preempt_disable(); if (stall_cpu_irqsoff)
local_irq_disable();
else
preempt_disable();
pr_alert("rcu_torture_stall start on CPU %d.\n",
smp_processor_id());
while (ULONG_CMP_LT(get_seconds(), stop_at)) while (ULONG_CMP_LT(get_seconds(), stop_at))
continue; /* Induce RCU CPU stall warning. */ continue; /* Induce RCU CPU stall warning. */
preempt_enable(); if (stall_cpu_irqsoff)
local_irq_enable();
else
preempt_enable();
rcu_read_unlock(); rcu_read_unlock();
pr_alert("rcu_torture_stall end.\n"); pr_alert("rcu_torture_stall end.\n");
} }
......
This diff is collapsed.
...@@ -103,6 +103,7 @@ struct rcu_node { ...@@ -103,6 +103,7 @@ struct rcu_node {
/* Online CPUs for next expedited GP. */ /* Online CPUs for next expedited GP. */
/* Any CPU that has ever been online will */ /* Any CPU that has ever been online will */
/* have its bit set. */ /* have its bit set. */
unsigned long ffmask; /* Fully functional CPUs. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */ unsigned long grpmask; /* Mask to apply to parent qsmask. */
/* Only one bit will be set in this mask. */ /* Only one bit will be set in this mask. */
int grplo; /* lowest-numbered CPU or group here. */ int grplo; /* lowest-numbered CPU or group here. */
...@@ -285,6 +286,10 @@ struct rcu_data { ...@@ -285,6 +286,10 @@ struct rcu_data {
/* 8) RCU CPU stall data. */ /* 8) RCU CPU stall data. */
unsigned int softirq_snap; /* Snapshot of softirq activity. */ unsigned int softirq_snap; /* Snapshot of softirq activity. */
/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
struct irq_work rcu_iw; /* Check for non-irq activity. */
bool rcu_iw_pending; /* Is ->rcu_iw pending? */
unsigned long rcu_iw_gpnum; /* ->gpnum associated with ->rcu_iw. */
int cpu; int cpu;
struct rcu_state *rsp; struct rcu_state *rsp;
......
...@@ -54,6 +54,7 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work); ...@@ -54,6 +54,7 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
* This probably needs to be excluded from -rt builds. * This probably needs to be excluded from -rt builds.
*/ */
#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) #define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1)
#endif /* #else #ifdef CONFIG_RCU_BOOST */ #endif /* #else #ifdef CONFIG_RCU_BOOST */
...@@ -530,7 +531,7 @@ void rcu_read_unlock_special(struct task_struct *t) ...@@ -530,7 +531,7 @@ void rcu_read_unlock_special(struct task_struct *t)
/* Unboost if we were boosted. */ /* Unboost if we were boosted. */
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
rt_mutex_unlock(&rnp->boost_mtx); rt_mutex_futex_unlock(&rnp->boost_mtx);
/* /*
* If this was the last task on the expedited lists, * If this was the last task on the expedited lists,
...@@ -911,8 +912,6 @@ void exit_rcu(void) ...@@ -911,8 +912,6 @@ void exit_rcu(void)
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
#include "../locking/rtmutex_common.h"
static void rcu_wake_cond(struct task_struct *t, int status) static void rcu_wake_cond(struct task_struct *t, int status)
{ {
/* /*
...@@ -1507,7 +1506,7 @@ static void rcu_prepare_for_idle(void) ...@@ -1507,7 +1506,7 @@ static void rcu_prepare_for_idle(void)
rdtp->last_accelerate = jiffies; rdtp->last_accelerate = jiffies;
for_each_rcu_flavor(rsp) { for_each_rcu_flavor(rsp) {
rdp = this_cpu_ptr(rsp->rda); rdp = this_cpu_ptr(rsp->rda);
if (rcu_segcblist_pend_cbs(&rdp->cblist)) if (!rcu_segcblist_pend_cbs(&rdp->cblist))
continue; continue;
rnp = rdp->mynode; rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
...@@ -1671,6 +1670,7 @@ static void print_cpu_stall_info_begin(void) ...@@ -1671,6 +1670,7 @@ static void print_cpu_stall_info_begin(void)
*/ */
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
{ {
unsigned long delta;
char fast_no_hz[72]; char fast_no_hz[72];
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_dynticks *rdtp = rdp->dynticks; struct rcu_dynticks *rdtp = rdp->dynticks;
...@@ -1685,11 +1685,15 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) ...@@ -1685,11 +1685,15 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
ticks_value = rsp->gpnum - rdp->gpnum; ticks_value = rsp->gpnum - rdp->gpnum;
} }
print_cpu_stall_fast_no_hz(fast_no_hz, cpu); print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
pr_err("\t%d-%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n", delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
cpu, cpu,
"O."[!!cpu_online(cpu)], "O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
"N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
!IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
"!."[!delta],
ticks_value, ticks_title, ticks_value, ticks_title,
rcu_dynticks_snap(rdtp) & 0xfff, rcu_dynticks_snap(rdtp) & 0xfff,
rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
......
...@@ -494,6 +494,7 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); ...@@ -494,6 +494,7 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
#endif #endif
int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
module_param(rcu_cpu_stall_suppress, int, 0644); module_param(rcu_cpu_stall_suppress, int, 0644);
...@@ -575,7 +576,6 @@ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); ...@@ -575,7 +576,6 @@ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
module_param(rcu_task_stall_timeout, int, 0644); module_param(rcu_task_stall_timeout, int, 0644);
static void rcu_spawn_tasks_kthread(void);
static struct task_struct *rcu_tasks_kthread_ptr; static struct task_struct *rcu_tasks_kthread_ptr;
/** /**
...@@ -600,7 +600,6 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) ...@@ -600,7 +600,6 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
{ {
unsigned long flags; unsigned long flags;
bool needwake; bool needwake;
bool havetask = READ_ONCE(rcu_tasks_kthread_ptr);
rhp->next = NULL; rhp->next = NULL;
rhp->func = func; rhp->func = func;
...@@ -610,11 +609,8 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) ...@@ -610,11 +609,8 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
rcu_tasks_cbs_tail = &rhp->next; rcu_tasks_cbs_tail = &rhp->next;
raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
/* We can't create the thread unless interrupts are enabled. */ /* We can't create the thread unless interrupts are enabled. */
if ((needwake && havetask) || if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
(!havetask && !irqs_disabled_flags(flags))) {
rcu_spawn_tasks_kthread();
wake_up(&rcu_tasks_cbs_wq); wake_up(&rcu_tasks_cbs_wq);
}
} }
EXPORT_SYMBOL_GPL(call_rcu_tasks); EXPORT_SYMBOL_GPL(call_rcu_tasks);
...@@ -853,27 +849,18 @@ static int __noreturn rcu_tasks_kthread(void *arg) ...@@ -853,27 +849,18 @@ static int __noreturn rcu_tasks_kthread(void *arg)
} }
} }
/* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */ /* Spawn rcu_tasks_kthread() at core_initcall() time. */
static void rcu_spawn_tasks_kthread(void) static int __init rcu_spawn_tasks_kthread(void)
{ {
static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
struct task_struct *t; struct task_struct *t;
if (READ_ONCE(rcu_tasks_kthread_ptr)) {
smp_mb(); /* Ensure caller sees full kthread. */
return;
}
mutex_lock(&rcu_tasks_kthread_mutex);
if (rcu_tasks_kthread_ptr) {
mutex_unlock(&rcu_tasks_kthread_mutex);
return;
}
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
BUG_ON(IS_ERR(t)); BUG_ON(IS_ERR(t));
smp_mb(); /* Ensure others see full kthread. */ smp_mb(); /* Ensure others see full kthread. */
WRITE_ONCE(rcu_tasks_kthread_ptr, t); WRITE_ONCE(rcu_tasks_kthread_ptr, t);
mutex_unlock(&rcu_tasks_kthread_mutex); return 0;
} }
core_initcall(rcu_spawn_tasks_kthread);
/* Do the srcu_read_lock() for the above synchronize_srcu(). */ /* Do the srcu_read_lock() for the above synchronize_srcu(). */
void exit_tasks_rcu_start(void) void exit_tasks_rcu_start(void)
......
...@@ -505,8 +505,7 @@ void resched_cpu(int cpu) ...@@ -505,8 +505,7 @@ void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long flags; unsigned long flags;
if (!raw_spin_trylock_irqsave(&rq->lock, flags)) raw_spin_lock_irqsave(&rq->lock, flags);
return;
resched_curr(rq); resched_curr(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
} }
...@@ -4842,6 +4841,7 @@ int __sched _cond_resched(void) ...@@ -4842,6 +4841,7 @@ int __sched _cond_resched(void)
preempt_schedule_common(); preempt_schedule_common();
return 1; return 1;
} }
rcu_all_qs();
return 0; return 0;
} }
EXPORT_SYMBOL(_cond_resched); EXPORT_SYMBOL(_cond_resched);
...@@ -5165,6 +5165,7 @@ void sched_show_task(struct task_struct *p) ...@@ -5165,6 +5165,7 @@ void sched_show_task(struct task_struct *p)
show_stack(p, NULL); show_stack(p, NULL);
put_task_stack(p); put_task_stack(p);
} }
EXPORT_SYMBOL_GPL(sched_show_task);
static inline bool static inline bool
state_filter_match(unsigned long state_filter, struct task_struct *p) state_filter_match(unsigned long state_filter, struct task_struct *p)
......
...@@ -42,7 +42,7 @@ else ...@@ -42,7 +42,7 @@ else
exit 1 exit 1
fi fi
T=/tmp/config_override.sh.$$ T=${TMPDIR-/tmp}/config_override.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
# #
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
T=/tmp/abat-chk-config.sh.$$ T=${TMPDIR-/tmp}/abat-chk-config.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
# #
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
T=/tmp/configinit.sh.$$ T=${TMPDIR-/tmp}/configinit.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -35,7 +35,7 @@ then ...@@ -35,7 +35,7 @@ then
exit 1 exit 1
fi fi
T=/tmp/test-linux.sh.$$ T=${TMPDIR-/tmp}/test-linux.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
# #
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
T=/tmp/kvm-test-1-run.sh.$$ T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
scriptname=$0 scriptname=$0
args="$*" args="$*"
T=/tmp/kvm.sh.$$ T=${TMPDIR-/tmp}/kvm.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
...@@ -222,7 +222,7 @@ do ...@@ -222,7 +222,7 @@ do
exit 1 exit 1
fi fi
done done
sort -k2nr $T/cfgcpu > $T/cfgcpu.sort sort -k2nr $T/cfgcpu -T="$T" > $T/cfgcpu.sort
# Use a greedy bin-packing algorithm, sorting the list accordingly. # Use a greedy bin-packing algorithm, sorting the list accordingly.
awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus ' awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus '
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
F=$1 F=$1
title=$2 title=$2
T=/tmp/parse-build.sh.$$ T=${TMPDIR-/tmp}/parse-build.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
# #
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
T=/tmp/parse-torture.sh.$$ T=${TMPDIR-/tmp}/parse-torture.sh.$$
file="$1" file="$1"
title="$2" title="$2"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment