Commit 18952651 authored by Paul E. McKenney's avatar Paul E. McKenney

Merge branches 'fixes1.2018.07.12b' and 'torture1.2018.07.12b' into HEAD

fixes1.2018.07.12b: Post-gp_seq miscellaneous fixes
torture1.2018.07.12b: Post-gp_seq torture-test updates
parents c7cd161e bf5b6435
...@@ -3632,8 +3632,8 @@ ...@@ -3632,8 +3632,8 @@
Set time (s) after boot for CPU-hotplug testing. Set time (s) after boot for CPU-hotplug testing.
rcutorture.onoff_interval= [KNL] rcutorture.onoff_interval= [KNL]
Set time (s) between CPU-hotplug operations, or Set time (jiffies) between CPU-hotplug operations,
zero to disable CPU-hotplug testing. or zero to disable CPU-hotplug testing.
rcutorture.shuffle_interval= [KNL] rcutorture.shuffle_interval= [KNL]
Set task-shuffle interval (s). Shuffling tasks Set task-shuffle interval (s). Shuffling tasks
......
...@@ -64,6 +64,8 @@ struct torture_random_state { ...@@ -64,6 +64,8 @@ struct torture_random_state {
long trs_count; long trs_count;
}; };
#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 } #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
DEFINE_PER_CPU(struct torture_random_state, name)
unsigned long torture_random(struct torture_random_state *trsp); unsigned long torture_random(struct torture_random_state *trsp);
/* Task shuffler, which causes CPUs to occasionally go idle. */ /* Task shuffler, which causes CPUs to occasionally go idle. */
......
...@@ -467,7 +467,6 @@ enum rcutorture_type { ...@@ -467,7 +467,6 @@ enum rcutorture_type {
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
unsigned long *gp_seq); unsigned long *gp_seq);
void rcutorture_record_test_transition(void);
void rcutorture_record_progress(unsigned long vernum); void rcutorture_record_progress(unsigned long vernum);
void do_trace_rcu_torture_read(const char *rcutorturename, void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp, struct rcu_head *rhp,
...@@ -481,7 +480,6 @@ static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, ...@@ -481,7 +480,6 @@ static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
*flags = 0; *flags = 0;
*gp_seq = 0; *gp_seq = 0;
} }
static inline void rcutorture_record_test_transition(void) { }
static inline void rcutorture_record_progress(unsigned long vernum) { } static inline void rcutorture_record_progress(unsigned long vernum) { }
#ifdef CONFIG_RCU_TRACE #ifdef CONFIG_RCU_TRACE
void do_trace_rcu_torture_read(const char *rcutorturename, void do_trace_rcu_torture_read(const char *rcutorturename,
...@@ -527,9 +525,8 @@ static inline void rcu_force_quiescent_state(void) { } ...@@ -527,9 +525,8 @@ static inline void rcu_force_quiescent_state(void) { }
static inline void rcu_bh_force_quiescent_state(void) { } static inline void rcu_bh_force_quiescent_state(void) { }
static inline void rcu_sched_force_quiescent_state(void) { } static inline void rcu_sched_force_quiescent_state(void) { }
static inline void show_rcu_gp_kthreads(void) { } static inline void show_rcu_gp_kthreads(void) { }
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
#else /* #ifdef CONFIG_TINY_RCU */ #else /* #ifdef CONFIG_TINY_RCU */
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
unsigned long rcu_get_gp_seq(void); unsigned long rcu_get_gp_seq(void);
unsigned long rcu_bh_get_gp_seq(void); unsigned long rcu_bh_get_gp_seq(void);
unsigned long rcu_sched_get_gp_seq(void); unsigned long rcu_sched_get_gp_seq(void);
...@@ -537,6 +534,7 @@ unsigned long rcu_exp_batches_completed(void); ...@@ -537,6 +534,7 @@ unsigned long rcu_exp_batches_completed(void);
unsigned long rcu_exp_batches_completed_sched(void); unsigned long rcu_exp_batches_completed_sched(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp); unsigned long srcu_batches_completed(struct srcu_struct *sp);
void show_rcu_gp_kthreads(void); void show_rcu_gp_kthreads(void);
int rcu_get_gp_kthreads_prio(void);
void rcu_force_quiescent_state(void); void rcu_force_quiescent_state(void);
void rcu_bh_force_quiescent_state(void); void rcu_bh_force_quiescent_state(void);
void rcu_sched_force_quiescent_state(void); void rcu_sched_force_quiescent_state(void);
......
...@@ -369,11 +369,6 @@ static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old) ...@@ -369,11 +369,6 @@ static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
return cur_ops->gp_diff(new, old); return cur_ops->gp_diff(new, old);
} }
static bool __maybe_unused torturing_tasks(void)
{
return cur_ops == &tasks_ops;
}
/* /*
* If performance tests complete, wait for shutdown to commence. * If performance tests complete, wait for shutdown to commence.
*/ */
......
This diff is collapsed.
...@@ -183,6 +183,13 @@ module_param(gp_init_delay, int, 0444); ...@@ -183,6 +183,13 @@ module_param(gp_init_delay, int, 0444);
static int gp_cleanup_delay; static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444); module_param(gp_cleanup_delay, int, 0444);
/* Retreive RCU kthreads priority for rcutorture */
int rcu_get_gp_kthreads_prio(void)
{
return kthread_prio;
}
EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
/* /*
* Number of grace periods between delays, normalized by the duration of * Number of grace periods between delays, normalized by the duration of
* the delay. The longer the delay, the more the grace periods between * the delay. The longer the delay, the more the grace periods between
...@@ -194,18 +201,6 @@ module_param(gp_cleanup_delay, int, 0444); ...@@ -194,18 +201,6 @@ module_param(gp_cleanup_delay, int, 0444);
*/ */
#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
/*
* Track the rcutorture test sequence number and the update version
* number within a given test. The rcutorture_testseq is incremented
* on every rcutorture module load and unload, so has an odd value
* when a test is running. The rcutorture_vernum is set to zero
* when rcutorture starts and is incremented on each rcutorture update.
* These variables enable correlating rcutorture output with the
* RCU tracing information.
*/
unsigned long rcutorture_testseq;
unsigned long rcutorture_vernum;
/* /*
* Compute the mask of online CPUs for the specified rcu_node structure. * Compute the mask of online CPUs for the specified rcu_node structure.
* This will not be stable unless the rcu_node structure's ->lock is * This will not be stable unless the rcu_node structure's ->lock is
...@@ -667,20 +662,6 @@ void show_rcu_gp_kthreads(void) ...@@ -667,20 +662,6 @@ void show_rcu_gp_kthreads(void)
} }
EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
/*
* Record the number of times rcutorture tests have been initiated and
* terminated. This information allows the debugfs tracing stats to be
* correlated to the rcutorture messages, even when the rcutorture module
* is being repeatedly loaded and unloaded. In other words, we cannot
* store this state in rcutorture itself.
*/
void rcutorture_record_test_transition(void)
{
rcutorture_testseq++;
rcutorture_vernum = 0;
}
EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
/* /*
* Send along grace-period-related data for rcutorture diagnostics. * Send along grace-period-related data for rcutorture diagnostics.
*/ */
...@@ -709,17 +690,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, ...@@ -709,17 +690,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
} }
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
/*
* Record the number of writer passes through the current rcutorture test.
* This is also used to correlate debugfs tracing stats with the rcutorture
* messages.
*/
void rcutorture_record_progress(unsigned long vernum)
{
rcutorture_vernum++;
}
EXPORT_SYMBOL_GPL(rcutorture_record_progress);
/* /*
* Return the root node of the specified rcu_state structure. * Return the root node of the specified rcu_state structure.
*/ */
......
rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30 rcutorture.onoff_interval=200 rcutorture.onoff_holdoff=30
rcutree.gp_preinit_delay=12 rcutree.gp_preinit_delay=12
rcutree.gp_init_delay=3 rcutree.gp_init_delay=3
rcutree.gp_cleanup_delay=3 rcutree.gp_cleanup_delay=3
......
...@@ -39,7 +39,7 @@ rcutorture_param_onoff () { ...@@ -39,7 +39,7 @@ rcutorture_param_onoff () {
if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2" if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
then then
echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2 echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30 echo rcutorture.onoff_interval=1000 rcutorture.onoff_holdoff=30
fi fi
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment