Commit 7d26e2d5 authored by venkatesh.pallipadi@intel.com's avatar venkatesh.pallipadi@intel.com Committed by Dave Jones

[CPUFREQ] Eliminate the recent lockdep warnings in cpufreq

Commit b14893a6 although it was very
much needed to properly cleanup ondemand timer, opened-up a can of worms
related to locking dependencies in cpufreq.

Patch here defines the need for dbs_mutex and cleans up its usage in
ondemand governor. This also resolves the lockdep warnings reported here

http://lkml.indiana.edu/hypermail/linux/kernel/0906.1/01925.html
http://lkml.indiana.edu/hypermail/linux/kernel/0907.0/00820.html

and few others..
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarDave Jones <davej@redhat.com>
parent faf80d62
...@@ -1070,8 +1070,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) ...@@ -1070,8 +1070,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
spin_unlock_irqrestore(&cpufreq_driver_lock, flags); spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif #endif
unlock_policy_rwsem_write(cpu);
if (cpufreq_driver->target) if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_STOP); __cpufreq_governor(data, CPUFREQ_GOV_STOP);
...@@ -1088,6 +1086,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) ...@@ -1088,6 +1086,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
if (cpufreq_driver->exit) if (cpufreq_driver->exit)
cpufreq_driver->exit(data); cpufreq_driver->exit(data);
unlock_policy_rwsem_write(cpu);
free_cpumask_var(data->related_cpus); free_cpumask_var(data->related_cpus);
free_cpumask_var(data->cpus); free_cpumask_var(data->cpus);
kfree(data); kfree(data);
......
...@@ -70,15 +70,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); ...@@ -70,15 +70,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */ static unsigned int dbs_enable; /* number of CPUs using this policy */
/* /*
* DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
* lock and dbs_mutex. cpu_hotplug lock should always be held before * different CPUs. It protects dbs_enable in governor start/stop. It also
* dbs_mutex. If any function that can potentially take cpu_hotplug lock * serializes governor limit_change with do_dbs_timer. We do not want
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then * do_dbs_timer to run when user is changing the governor or limits.
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
* DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
* would deadlock with cancel_delayed_work_sync(), which is needed for proper
* raceless workqueue teardown.
*/ */
static DEFINE_MUTEX(dbs_mutex); static DEFINE_MUTEX(dbs_mutex);
...@@ -488,18 +483,17 @@ static void do_dbs_timer(struct work_struct *work) ...@@ -488,18 +483,17 @@ static void do_dbs_timer(struct work_struct *work)
delay -= jiffies % delay; delay -= jiffies % delay;
if (lock_policy_rwsem_write(cpu) < 0) mutex_lock(&dbs_mutex);
return;
if (!dbs_info->enable) { if (!dbs_info->enable) {
unlock_policy_rwsem_write(cpu); mutex_unlock(&dbs_mutex);
return; return;
} }
dbs_check_cpu(dbs_info); dbs_check_cpu(dbs_info);
queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
unlock_policy_rwsem_write(cpu); mutex_unlock(&dbs_mutex);
} }
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
...@@ -590,15 +584,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -590,15 +584,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
&dbs_cpufreq_notifier_block, &dbs_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER); CPUFREQ_TRANSITION_NOTIFIER);
} }
dbs_timer_init(this_dbs_info);
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
dbs_timer_init(this_dbs_info);
break; break;
case CPUFREQ_GOV_STOP: case CPUFREQ_GOV_STOP:
mutex_lock(&dbs_mutex);
dbs_timer_exit(this_dbs_info); dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
sysfs_remove_group(&policy->kobj, &dbs_attr_group); sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--; dbs_enable--;
......
...@@ -78,15 +78,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); ...@@ -78,15 +78,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */ static unsigned int dbs_enable; /* number of CPUs using this policy */
/* /*
* DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
* lock and dbs_mutex. cpu_hotplug lock should always be held before * different CPUs. It protects dbs_enable in governor start/stop. It also
* dbs_mutex. If any function that can potentially take cpu_hotplug lock * serializes governor limit_change with do_dbs_timer. We do not want
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then * do_dbs_timer to run when user is changing the governor or limits.
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
* DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
* would deadlock with cancel_delayed_work_sync(), which is needed for proper
* raceless workqueue teardown.
*/ */
static DEFINE_MUTEX(dbs_mutex); static DEFINE_MUTEX(dbs_mutex);
...@@ -494,11 +489,10 @@ static void do_dbs_timer(struct work_struct *work) ...@@ -494,11 +489,10 @@ static void do_dbs_timer(struct work_struct *work)
delay -= jiffies % delay; delay -= jiffies % delay;
if (lock_policy_rwsem_write(cpu) < 0) mutex_lock(&dbs_mutex);
return;
if (!dbs_info->enable) { if (!dbs_info->enable) {
unlock_policy_rwsem_write(cpu); mutex_unlock(&dbs_mutex);
return; return;
} }
...@@ -517,7 +511,7 @@ static void do_dbs_timer(struct work_struct *work) ...@@ -517,7 +511,7 @@ static void do_dbs_timer(struct work_struct *work)
dbs_info->freq_lo, CPUFREQ_RELATION_H); dbs_info->freq_lo, CPUFREQ_RELATION_H);
} }
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
unlock_policy_rwsem_write(cpu); mutex_unlock(&dbs_mutex);
} }
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
...@@ -598,14 +592,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -598,14 +592,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
max(min_sampling_rate, max(min_sampling_rate,
latency * LATENCY_MULTIPLIER); latency * LATENCY_MULTIPLIER);
} }
dbs_timer_init(this_dbs_info);
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
dbs_timer_init(this_dbs_info);
break; break;
case CPUFREQ_GOV_STOP: case CPUFREQ_GOV_STOP:
mutex_lock(&dbs_mutex);
dbs_timer_exit(this_dbs_info); dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
sysfs_remove_group(&policy->kobj, &dbs_attr_group); sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--; dbs_enable--;
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment