Commit e0a1e44c authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpuidle'

* pm-cpuidle:
  intel_idle: correct BXT support
  intel_idle: re-work bxt_idle_state_table_update() and its helper
  idle_intel: Add Denverton
  drivers/idle: make intel_idle.c driver more explicitly non-modular
parents 9def970e bef45096
...@@ -46,8 +46,6 @@ ...@@ -46,8 +46,6 @@
* to avoid complications with the lapic timer workaround. * to avoid complications with the lapic timer workaround.
* Have not seen issues with suspend, but may need same workaround here. * Have not seen issues with suspend, but may need same workaround here.
* *
* There is currently no kernel-based automatic probing/loading mechanism
* if the driver is built as a module.
*/ */
/* un-comment DEBUG to enable pr_debug() statements */ /* un-comment DEBUG to enable pr_debug() statements */
...@@ -60,7 +58,7 @@ ...@@ -60,7 +58,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/module.h> #include <linux/moduleparam.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/intel-family.h> #include <asm/intel-family.h>
#include <asm/mwait.h> #include <asm/mwait.h>
...@@ -828,6 +826,35 @@ static struct cpuidle_state bxt_cstates[] = { ...@@ -828,6 +826,35 @@ static struct cpuidle_state bxt_cstates[] = {
.enter = NULL } .enter = NULL }
}; };
static struct cpuidle_state dnv_cstates[] = {
{
.name = "C1-DNV",
.desc = "MWAIT 0x00",
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
.name = "C1E-DNV",
.desc = "MWAIT 0x01",
.flags = MWAIT2flg(0x01),
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
.name = "C6-DNV",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 50,
.target_residency = 500,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
.enter = NULL }
};
/** /**
* intel_idle * intel_idle
* @dev: cpuidle_device * @dev: cpuidle_device
...@@ -1017,6 +1044,11 @@ static const struct idle_cpu idle_cpu_bxt = { ...@@ -1017,6 +1044,11 @@ static const struct idle_cpu idle_cpu_bxt = {
.disable_promotion_to_c1e = true, .disable_promotion_to_c1e = true,
}; };
static const struct idle_cpu idle_cpu_dnv = {
.state_table = dnv_cstates,
.disable_promotion_to_c1e = true,
};
#define ICPU(model, cpu) \ #define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
...@@ -1053,9 +1085,9 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { ...@@ -1053,9 +1085,9 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx), ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx),
ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl), ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt), ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
{} {}
}; };
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
/* /*
* intel_idle_probe() * intel_idle_probe()
...@@ -1155,7 +1187,10 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) ...@@ -1155,7 +1187,10 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)
{ {
unsigned long long ns; unsigned long long ns;
ns = irtl_ns_units[(irtl >> 10) & 0x3]; if (!irtl)
return 0;
ns = irtl_ns_units[(irtl >> 10) & 0x7];
return div64_u64((irtl & 0x3FF) * ns, 1000); return div64_u64((irtl & 0x3FF) * ns, 1000);
} }
...@@ -1168,43 +1203,39 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) ...@@ -1168,43 +1203,39 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)
static void bxt_idle_state_table_update(void) static void bxt_idle_state_table_update(void)
{ {
unsigned long long msr; unsigned long long msr;
unsigned int usec;
rdmsrl(MSR_PKGC6_IRTL, msr); rdmsrl(MSR_PKGC6_IRTL, msr);
if (msr) { usec = irtl_2_usec(msr);
unsigned int usec = irtl_2_usec(msr); if (usec) {
bxt_cstates[2].exit_latency = usec; bxt_cstates[2].exit_latency = usec;
bxt_cstates[2].target_residency = usec; bxt_cstates[2].target_residency = usec;
} }
rdmsrl(MSR_PKGC7_IRTL, msr); rdmsrl(MSR_PKGC7_IRTL, msr);
if (msr) { usec = irtl_2_usec(msr);
unsigned int usec = irtl_2_usec(msr); if (usec) {
bxt_cstates[3].exit_latency = usec; bxt_cstates[3].exit_latency = usec;
bxt_cstates[3].target_residency = usec; bxt_cstates[3].target_residency = usec;
} }
rdmsrl(MSR_PKGC8_IRTL, msr); rdmsrl(MSR_PKGC8_IRTL, msr);
if (msr) { usec = irtl_2_usec(msr);
unsigned int usec = irtl_2_usec(msr); if (usec) {
bxt_cstates[4].exit_latency = usec; bxt_cstates[4].exit_latency = usec;
bxt_cstates[4].target_residency = usec; bxt_cstates[4].target_residency = usec;
} }
rdmsrl(MSR_PKGC9_IRTL, msr); rdmsrl(MSR_PKGC9_IRTL, msr);
if (msr) { usec = irtl_2_usec(msr);
unsigned int usec = irtl_2_usec(msr); if (usec) {
bxt_cstates[5].exit_latency = usec; bxt_cstates[5].exit_latency = usec;
bxt_cstates[5].target_residency = usec; bxt_cstates[5].target_residency = usec;
} }
rdmsrl(MSR_PKGC10_IRTL, msr); rdmsrl(MSR_PKGC10_IRTL, msr);
if (msr) { usec = irtl_2_usec(msr);
unsigned int usec = irtl_2_usec(msr); if (usec) {
bxt_cstates[6].exit_latency = usec; bxt_cstates[6].exit_latency = usec;
bxt_cstates[6].target_residency = usec; bxt_cstates[6].target_residency = usec;
} }
...@@ -1416,34 +1447,12 @@ static int __init intel_idle_init(void) ...@@ -1416,34 +1447,12 @@ static int __init intel_idle_init(void)
return 0; return 0;
} }
device_initcall(intel_idle_init);
static void __exit intel_idle_exit(void) /*
{ * We are not really modular, but we used to support that. Meaning we also
struct cpuidle_device *dev; * support "intel_idle.max_cstate=..." at boot and also a read-only export of
int i; * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param
* is the easiest way (currently) to continue doing that.
cpu_notifier_register_begin(); */
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
__unregister_cpu_notifier(&cpu_hotplug_notifier);
for_each_possible_cpu(i) {
dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
cpuidle_unregister_device(dev);
}
cpu_notifier_register_done();
cpuidle_unregister_driver(&intel_idle_driver);
free_percpu(intel_idle_cpuidle_devices);
}
module_init(intel_idle_init);
module_exit(intel_idle_exit);
module_param(max_cstate, int, 0444); module_param(max_cstate, int, 0444);
MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
MODULE_LICENSE("GPL");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment