Commit 18320f2a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm+acpi-3.20-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more ACPI and power management updates from Rafael Wysocki:
 "These are two reverts related to system suspend breakage by one of a
  recent commits, a fix for a recently introduced bug in devfreq and a
  bunch of other things that didn't make it into my previous pull
  request, but otherwise are ready to go.

  Specifics:

   - Revert two ACPI EC driver commits, one that broke system suspend on
     Acer Aspire S5 and one that depends on it (Rafael J Wysocki).

   - Fix a typo leading to an incorrect check in the exynos-ppmu devfreq
     driver (Dan Carpenter).

   - Add support for one more Broadwell CPU model to intel_idle (Len Brown).

   - Fix an obscure problem with state transitions related to interrupts
     in the speedstep-smi cpufreq driver (Mikulas Patocka).

   - Remove some unnecessary messages related to the "out of memory"
     condition from the core PM code (Quentin Lambert).

   - Update turbostat parameters and documentation, add support for one
     more Broadwell CPU model to it and modify it to skip printing
     disabled package C-states (Len Brown)"

* tag 'pm+acpi-3.20-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM / devfreq: event: testing the wrong variable
  cpufreq: speedstep-smi: enable interrupts when waiting
  PM / OPP / clk: Remove unnecessary OOM message
  Revert "ACPI / EC: Add query flushing support"
  Revert "ACPI / EC: Add GPE reference counting debugging messages"
  tools/power turbostat: support additional Broadwell model
  intel_idle: support additional Broadwell model
  tools/power turbostat: update parameters, documentation
  tools/power turbostat: Skip printing disabled package C-states
parents db3ecdee c7fb90df
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
/* Uncomment next line to get verbose printout */ /* Uncomment next line to get verbose printout */
/* #define DEBUG */ /* #define DEBUG */
#define DEBUG_REF 0
#define pr_fmt(fmt) "ACPI : EC: " fmt #define pr_fmt(fmt) "ACPI : EC: " fmt
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -77,9 +76,7 @@ enum ec_command { ...@@ -77,9 +76,7 @@ enum ec_command {
* when trying to clear the EC */ * when trying to clear the EC */
enum { enum {
EC_FLAGS_EVENT_ENABLED, /* Event is enabled */ EC_FLAGS_QUERY_PENDING, /* Query is pending */
EC_FLAGS_EVENT_PENDING, /* Event is pending */
EC_FLAGS_EVENT_DETECTED, /* Event is detected */
EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
* OpReg are installed */ * OpReg are installed */
EC_FLAGS_STARTED, /* Driver is started */ EC_FLAGS_STARTED, /* Driver is started */
...@@ -91,13 +88,6 @@ enum { ...@@ -91,13 +88,6 @@ enum {
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
#define ec_debug_ref(ec, fmt, ...) \
do { \
if (DEBUG_REF) \
pr_debug("%lu: " fmt, ec->reference_count, \
## __VA_ARGS__); \
} while (0)
/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
module_param(ec_delay, uint, 0644); module_param(ec_delay, uint, 0644);
...@@ -161,12 +151,6 @@ static bool acpi_ec_flushed(struct acpi_ec *ec) ...@@ -161,12 +151,6 @@ static bool acpi_ec_flushed(struct acpi_ec *ec)
return ec->reference_count == 1; return ec->reference_count == 1;
} }
static bool acpi_ec_has_pending_event(struct acpi_ec *ec)
{
return test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
test_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
}
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
* EC Registers * EC Registers
* -------------------------------------------------------------------------- */ * -------------------------------------------------------------------------- */
...@@ -334,97 +318,34 @@ static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag) ...@@ -334,97 +318,34 @@ static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
* the flush operation is not in * the flush operation is not in
* progress * progress
* @ec: the EC device * @ec: the EC device
* @allow_event: whether event should be handled
* *
* This function must be used before taking a new action that should hold * This function must be used before taking a new action that should hold
* the reference count. If this function returns false, then the action * the reference count. If this function returns false, then the action
* must be discarded or it will prevent the flush operation from being * must be discarded or it will prevent the flush operation from being
* completed. * completed.
*
* During flushing, QR_EC command need to pass this check when there is a
* pending event, so that the reference count held for the pending event
* can be decreased by the completion of the QR_EC command.
*/ */
static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec, static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
bool allow_event)
{ {
if (!acpi_ec_started(ec)) { if (!acpi_ec_started(ec))
if (!allow_event || !acpi_ec_has_pending_event(ec)) return false;
return false;
}
acpi_ec_submit_request(ec); acpi_ec_submit_request(ec);
return true; return true;
} }
static void acpi_ec_submit_event(struct acpi_ec *ec) static void acpi_ec_submit_query(struct acpi_ec *ec)
{ {
if (!test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) || if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
!test_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags)) pr_debug("***** Event started *****\n");
return;
/* Hold reference for pending event */
if (!acpi_ec_submit_flushable_request(ec, true))
return;
ec_debug_ref(ec, "Increase event\n");
if (!test_and_set_bit(EC_FLAGS_EVENT_PENDING, &ec->flags)) {
pr_debug("***** Event query started *****\n");
schedule_work(&ec->work); schedule_work(&ec->work);
return;
} }
acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease event\n");
} }
static void acpi_ec_complete_event(struct acpi_ec *ec) static void acpi_ec_complete_query(struct acpi_ec *ec)
{ {
if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
clear_bit(EC_FLAGS_EVENT_PENDING, &ec->flags); clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
pr_debug("***** Event query stopped *****\n"); pr_debug("***** Event stopped *****\n");
/* Unhold reference for pending event */
acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease event\n");
/* Check if there is another SCI_EVT detected */
acpi_ec_submit_event(ec);
}
}
static void acpi_ec_submit_detection(struct acpi_ec *ec)
{
/* Hold reference for query submission */
if (!acpi_ec_submit_flushable_request(ec, false))
return;
ec_debug_ref(ec, "Increase query\n");
if (!test_and_set_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags)) {
pr_debug("***** Event detection blocked *****\n");
acpi_ec_submit_event(ec);
return;
} }
acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease query\n");
}
static void acpi_ec_complete_detection(struct acpi_ec *ec)
{
if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
clear_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags);
pr_debug("***** Event detetion unblocked *****\n");
/* Unhold reference for query submission */
acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease query\n");
}
}
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
set_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags);
/*
* An event may be pending even with SCI_EVT=0, so QR_EC should
* always be issued right after started.
*/
acpi_ec_submit_detection(ec);
spin_unlock_irqrestore(&ec->lock, flags);
} }
static int ec_transaction_completed(struct acpi_ec *ec) static int ec_transaction_completed(struct acpi_ec *ec)
...@@ -468,7 +389,6 @@ static void advance_transaction(struct acpi_ec *ec) ...@@ -468,7 +389,6 @@ static void advance_transaction(struct acpi_ec *ec)
t->rdata[t->ri++] = acpi_ec_read_data(ec); t->rdata[t->ri++] = acpi_ec_read_data(ec);
if (t->rlen == t->ri) { if (t->rlen == t->ri) {
t->flags |= ACPI_EC_COMMAND_COMPLETE; t->flags |= ACPI_EC_COMMAND_COMPLETE;
acpi_ec_complete_event(ec);
if (t->command == ACPI_EC_COMMAND_QUERY) if (t->command == ACPI_EC_COMMAND_QUERY)
pr_debug("***** Command(%s) hardware completion *****\n", pr_debug("***** Command(%s) hardware completion *****\n",
acpi_ec_cmd_string(t->command)); acpi_ec_cmd_string(t->command));
...@@ -479,7 +399,6 @@ static void advance_transaction(struct acpi_ec *ec) ...@@ -479,7 +399,6 @@ static void advance_transaction(struct acpi_ec *ec)
} else if (t->wlen == t->wi && } else if (t->wlen == t->wi &&
(status & ACPI_EC_FLAG_IBF) == 0) { (status & ACPI_EC_FLAG_IBF) == 0) {
t->flags |= ACPI_EC_COMMAND_COMPLETE; t->flags |= ACPI_EC_COMMAND_COMPLETE;
acpi_ec_complete_event(ec);
wakeup = true; wakeup = true;
} }
goto out; goto out;
...@@ -488,17 +407,16 @@ static void advance_transaction(struct acpi_ec *ec) ...@@ -488,17 +407,16 @@ static void advance_transaction(struct acpi_ec *ec)
!(status & ACPI_EC_FLAG_SCI) && !(status & ACPI_EC_FLAG_SCI) &&
(t->command == ACPI_EC_COMMAND_QUERY)) { (t->command == ACPI_EC_COMMAND_QUERY)) {
t->flags |= ACPI_EC_COMMAND_POLL; t->flags |= ACPI_EC_COMMAND_POLL;
acpi_ec_complete_detection(ec); acpi_ec_complete_query(ec);
t->rdata[t->ri++] = 0x00; t->rdata[t->ri++] = 0x00;
t->flags |= ACPI_EC_COMMAND_COMPLETE; t->flags |= ACPI_EC_COMMAND_COMPLETE;
acpi_ec_complete_event(ec);
pr_debug("***** Command(%s) software completion *****\n", pr_debug("***** Command(%s) software completion *****\n",
acpi_ec_cmd_string(t->command)); acpi_ec_cmd_string(t->command));
wakeup = true; wakeup = true;
} else if ((status & ACPI_EC_FLAG_IBF) == 0) { } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
acpi_ec_write_cmd(ec, t->command); acpi_ec_write_cmd(ec, t->command);
t->flags |= ACPI_EC_COMMAND_POLL; t->flags |= ACPI_EC_COMMAND_POLL;
acpi_ec_complete_detection(ec); acpi_ec_complete_query(ec);
} else } else
goto err; goto err;
goto out; goto out;
...@@ -519,7 +437,7 @@ static void advance_transaction(struct acpi_ec *ec) ...@@ -519,7 +437,7 @@ static void advance_transaction(struct acpi_ec *ec)
} }
out: out:
if (status & ACPI_EC_FLAG_SCI) if (status & ACPI_EC_FLAG_SCI)
acpi_ec_submit_detection(ec); acpi_ec_submit_query(ec);
if (wakeup && in_interrupt()) if (wakeup && in_interrupt())
wake_up(&ec->wait); wake_up(&ec->wait);
} }
...@@ -580,11 +498,10 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, ...@@ -580,11 +498,10 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
/* start transaction */ /* start transaction */
spin_lock_irqsave(&ec->lock, tmp); spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */ /* Enable GPE for command processing (IBF=0/OBF=1) */
if (!acpi_ec_submit_flushable_request(ec, true)) { if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL; ret = -EINVAL;
goto unlock; goto unlock;
} }
ec_debug_ref(ec, "Increase command\n");
/* following two actions should be kept atomic */ /* following two actions should be kept atomic */
ec->curr = t; ec->curr = t;
pr_debug("***** Command(%s) started *****\n", pr_debug("***** Command(%s) started *****\n",
...@@ -600,7 +517,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, ...@@ -600,7 +517,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ec->curr = NULL; ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */ /* Disable GPE for command processing (IBF=0/OBF=1) */
acpi_ec_complete_request(ec); acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease command\n");
unlock: unlock:
spin_unlock_irqrestore(&ec->lock, tmp); spin_unlock_irqrestore(&ec->lock, tmp);
return ret; return ret;
...@@ -762,10 +678,8 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming) ...@@ -762,10 +678,8 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) { if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
pr_debug("+++++ Starting EC +++++\n"); pr_debug("+++++ Starting EC +++++\n");
/* Enable GPE for event processing (SCI_EVT=1) */ /* Enable GPE for event processing (SCI_EVT=1) */
if (!resuming) { if (!resuming)
acpi_ec_submit_request(ec); acpi_ec_submit_request(ec);
ec_debug_ref(ec, "Increase driver\n");
}
pr_info("+++++ EC started +++++\n"); pr_info("+++++ EC started +++++\n");
} }
spin_unlock_irqrestore(&ec->lock, flags); spin_unlock_irqrestore(&ec->lock, flags);
...@@ -794,10 +708,8 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) ...@@ -794,10 +708,8 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
wait_event(ec->wait, acpi_ec_stopped(ec)); wait_event(ec->wait, acpi_ec_stopped(ec));
spin_lock_irqsave(&ec->lock, flags); spin_lock_irqsave(&ec->lock, flags);
/* Disable GPE for event processing (SCI_EVT=1) */ /* Disable GPE for event processing (SCI_EVT=1) */
if (!suspending) { if (!suspending)
acpi_ec_complete_request(ec); acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease driver\n");
}
clear_bit(EC_FLAGS_STARTED, &ec->flags); clear_bit(EC_FLAGS_STARTED, &ec->flags);
clear_bit(EC_FLAGS_STOPPED, &ec->flags); clear_bit(EC_FLAGS_STOPPED, &ec->flags);
pr_info("+++++ EC stopped +++++\n"); pr_info("+++++ EC stopped +++++\n");
...@@ -967,9 +879,7 @@ static void acpi_ec_gpe_poller(struct work_struct *work) ...@@ -967,9 +879,7 @@ static void acpi_ec_gpe_poller(struct work_struct *work)
{ {
struct acpi_ec *ec = container_of(work, struct acpi_ec, work); struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
pr_debug("***** Event poller started *****\n");
acpi_ec_query(ec, NULL); acpi_ec_query(ec, NULL);
pr_debug("***** Event poller stopped *****\n");
} }
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
...@@ -1039,6 +949,7 @@ static struct acpi_ec *make_acpi_ec(void) ...@@ -1039,6 +949,7 @@ static struct acpi_ec *make_acpi_ec(void)
if (!ec) if (!ec)
return NULL; return NULL;
ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
mutex_init(&ec->mutex); mutex_init(&ec->mutex);
init_waitqueue_head(&ec->wait); init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list); INIT_LIST_HEAD(&ec->list);
...@@ -1189,7 +1100,7 @@ static int acpi_ec_add(struct acpi_device *device) ...@@ -1189,7 +1100,7 @@ static int acpi_ec_add(struct acpi_device *device)
ret = ec_install_handlers(ec); ret = ec_install_handlers(ec);
/* EC is fully operational, allow queries */ /* EC is fully operational, allow queries */
acpi_ec_enable_event(ec); clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
/* Clear stale _Q events if hardware might require that */ /* Clear stale _Q events if hardware might require that */
if (EC_FLAGS_CLEAR_ON_RESUME) if (EC_FLAGS_CLEAR_ON_RESUME)
......
...@@ -855,7 +855,6 @@ fw_create_instance(struct firmware *firmware, const char *fw_name, ...@@ -855,7 +855,6 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL); fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
if (!fw_priv) { if (!fw_priv) {
dev_err(device, "%s: kmalloc failed\n", __func__);
fw_priv = ERR_PTR(-ENOMEM); fw_priv = ERR_PTR(-ENOMEM);
goto exit; goto exit;
} }
......
...@@ -81,10 +81,8 @@ static int __pm_clk_add(struct device *dev, const char *con_id, ...@@ -81,10 +81,8 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
return -EINVAL; return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL); ce = kzalloc(sizeof(*ce), GFP_KERNEL);
if (!ce) { if (!ce)
dev_err(dev, "Not enough memory for clock entry.\n");
return -ENOMEM; return -ENOMEM;
}
if (con_id) { if (con_id) {
ce->con_id = kstrdup(con_id, GFP_KERNEL); ce->con_id = kstrdup(con_id, GFP_KERNEL);
......
...@@ -474,10 +474,8 @@ static int _opp_add_dynamic(struct device *dev, unsigned long freq, ...@@ -474,10 +474,8 @@ static int _opp_add_dynamic(struct device *dev, unsigned long freq,
/* allocate new OPP node */ /* allocate new OPP node */
new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) { if (!new_opp)
dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
return -ENOMEM; return -ENOMEM;
}
/* Hold our list modification lock here */ /* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock); mutex_lock(&dev_opp_list_lock);
...@@ -695,10 +693,8 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, ...@@ -695,10 +693,8 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
/* keep the node allocated */ /* keep the node allocated */
new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) { if (!new_opp)
dev_warn(dev, "%s: Unable to create OPP\n", __func__);
return -ENOMEM; return -ENOMEM;
}
mutex_lock(&dev_opp_list_lock); mutex_lock(&dev_opp_list_lock);
......
...@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, ...@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
pr_debug("previous speed is %u\n", prev_speed); pr_debug("previous speed is %u\n", prev_speed);
preempt_disable();
local_irq_save(flags); local_irq_save(flags);
/* switch to low state */ /* switch to low state */
...@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, ...@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
out: out:
local_irq_restore(flags); local_irq_restore(flags);
preempt_enable();
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(speedstep_get_freqs); EXPORT_SYMBOL_GPL(speedstep_get_freqs);
......
...@@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state) ...@@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state)
return; return;
/* Disable IRQs */ /* Disable IRQs */
preempt_disable();
local_irq_save(flags); local_irq_save(flags);
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
...@@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state) ...@@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state)
do { do {
if (retry) { if (retry) {
/*
* We need to enable interrupts, otherwise the blockage
* won't resolve.
*
* We disable preemption so that other processes don't
* run. If other processes were running, they could
* submit more DMA requests, making the blockage worse.
*/
pr_debug("retry %u, previous result %u, waiting...\n", pr_debug("retry %u, previous result %u, waiting...\n",
retry, result); retry, result);
local_irq_enable();
mdelay(retry * 50); mdelay(retry * 50);
local_irq_disable();
} }
retry++; retry++;
__asm__ __volatile__( __asm__ __volatile__(
...@@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state) ...@@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state)
/* enable IRQs */ /* enable IRQs */
local_irq_restore(flags); local_irq_restore(flags);
preempt_enable();
if (new_state == state) if (new_state == state)
pr_debug("change to %u MHz succeeded after %u tries " pr_debug("change to %u MHz succeeded after %u tries "
......
...@@ -327,8 +327,8 @@ static int exynos_ppmu_probe(struct platform_device *pdev) ...@@ -327,8 +327,8 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
for (i = 0; i < info->num_events; i++) { for (i = 0; i < info->num_events; i++) {
edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]); edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
if (IS_ERR(edev)) { if (IS_ERR(edev[i])) {
ret = PTR_ERR(edev); ret = PTR_ERR(edev[i]);
dev_err(&pdev->dev, dev_err(&pdev->dev,
"failed to add devfreq-event device\n"); "failed to add devfreq-event device\n");
goto err; goto err;
......
...@@ -727,6 +727,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { ...@@ -727,6 +727,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x46, idle_cpu_hsw), ICPU(0x46, idle_cpu_hsw),
ICPU(0x4d, idle_cpu_avn), ICPU(0x4d, idle_cpu_avn),
ICPU(0x3d, idle_cpu_bdw), ICPU(0x3d, idle_cpu_bdw),
ICPU(0x47, idle_cpu_bdw),
ICPU(0x4f, idle_cpu_bdw), ICPU(0x4f, idle_cpu_bdw),
ICPU(0x56, idle_cpu_bdw), ICPU(0x56, idle_cpu_bdw),
{} {}
......
...@@ -9,7 +9,7 @@ turbostat \- Report processor frequency and idle statistics ...@@ -9,7 +9,7 @@ turbostat \- Report processor frequency and idle statistics
.br .br
.B turbostat .B turbostat
.RB [ Options ] .RB [ Options ]
.RB [ "\-i interval_sec" ] .RB [ "\--interval seconds" ]
.SH DESCRIPTION .SH DESCRIPTION
\fBturbostat \fP reports processor topology, frequency, \fBturbostat \fP reports processor topology, frequency,
idle power-state statistics, temperature and power on X86 processors. idle power-state statistics, temperature and power on X86 processors.
...@@ -18,31 +18,41 @@ The first method is to supply a ...@@ -18,31 +18,41 @@ The first method is to supply a
\fBcommand\fP, which is forked and statistics are printed \fBcommand\fP, which is forked and statistics are printed
upon its completion. upon its completion.
The second method is to omit the command, The second method is to omit the command,
and turbodstat will print statistics every 5 seconds. and turbostat displays statistics every 5 seconds.
The 5-second interval can changed using the -i option. The 5-second interval can be changed using the --interval option.
Some information is not availalbe on older processors. Some information is not available on older processors.
.SS Options .SS Options
The \fB-p\fP option limits output to the 1st thread in 1st core of each package. \fB--Counter MSR#\fP shows the delta of the specified 64-bit MSR counter.
.PP .PP
The \fB-P\fP option limits output to the 1st thread in each Package. \fB--counter MSR#\fP shows the delta of the specified 32-bit MSR counter.
.PP .PP
The \fB-S\fP option limits output to a 1-line System Summary for each interval. \fB--Dump\fP displays the raw counter values.
.PP .PP
The \fB-v\fP option increases verbosity. \fB--debug\fP displays additional system configuration information. Invoking this parameter
more than once may also enable internal turbostat debug information.
.PP .PP
The \fB-c MSR#\fP option includes the delta of the specified 32-bit MSR counter. \fB--interval seconds\fP overrides the default 5-second measurement interval.
.PP .PP
The \fB-C MSR#\fP option includes the delta of the specified 64-bit MSR counter. \fB--help\fP displays usage for the most common parameters.
.PP .PP
The \fB-m MSR#\fP option includes the the specified 32-bit MSR value. \fB--Joules\fP displays energy in Joules, rather than dividing Joules by time to print power in Watts.
.PP .PP
The \fB-M MSR#\fP option includes the the specified 64-bit MSR value. \fB--MSR MSR#\fP shows the specified 64-bit MSR value.
.PP .PP
The \fB-i interval_sec\fP option prints statistics every \fiinterval_sec\fP seconds. \fB--msr MSR#\fP shows the specified 32-bit MSR value.
The default is 5 seconds.
.PP .PP
The \fBcommand\fP parameter forks \fBcommand\fP and upon its exit, \fB--Package\fP limits output to the system summary plus the 1st thread in each Package.
.PP
\fB--processor\fP limits output to the system summary plus the 1st thread in each processor of each package. Ie. it skips hyper-threaded siblings.
.PP
\fB--Summary\fP limits output to a 1-line System Summary for each interval.
.PP
\fB--TCC temperature\fP sets the Thermal Control Circuit temperature for systems which do not export that value. This is used for making sense of the Digital Thermal Sensor outputs, as they return degrees Celsius below the TCC activation temperature.
.PP
\fB--version\fP displays the version.
.PP
The \fBcommand\fP parameter forks \fBcommand\fP, and upon its exit,
displays the statistics gathered since it was forked. displays the statistics gathered since it was forked.
.PP .PP
.SH FIELD DESCRIPTIONS .SH FIELD DESCRIPTIONS
...@@ -52,7 +62,7 @@ displays the statistics gathered since it was forked. ...@@ -52,7 +62,7 @@ displays the statistics gathered since it was forked.
\fBCPU\fP Linux CPU (logical processor) number. \fBCPU\fP Linux CPU (logical processor) number.
Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology. Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology.
\fBAVG_MHz\fP number of cycles executed divided by time elapsed. \fBAVG_MHz\fP number of cycles executed divided by time elapsed.
\fB%Buzy\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state. \fB%Busy\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state.
\fBBzy_MHz\fP average clock rate while the CPU was busy (in "c0" state). \fBBzy_MHz\fP average clock rate while the CPU was busy (in "c0" state).
\fBTSC_MHz\fP average MHz that the TSC ran during the entire interval. \fBTSC_MHz\fP average MHz that the TSC ran during the entire interval.
\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. \fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.
...@@ -68,7 +78,7 @@ Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading T ...@@ -68,7 +78,7 @@ Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading T
.fi .fi
.PP .PP
.SH EXAMPLE .SH EXAMPLE
Without any parameters, turbostat prints out counters ever 5 seconds. Without any parameters, turbostat displays statistics ever 5 seconds.
(override interval with "-i sec" option, or specify a command (override interval with "-i sec" option, or specify a command
for turbostat to fork). for turbostat to fork).
...@@ -91,19 +101,19 @@ Subsequent rows show per-CPU statistics. ...@@ -91,19 +101,19 @@ Subsequent rows show per-CPU statistics.
3 3 3 0.20 1596 3492 0 0.44 0.00 99.37 0.00 23 3 3 3 0.20 1596 3492 0 0.44 0.00 99.37 0.00 23
3 7 5 0.31 1596 3492 0 0.33 3 7 5 0.31 1596 3492 0 0.33
.fi .fi
.SH VERBOSE EXAMPLE .SH DEBUG EXAMPLE
The "-v" option adds verbosity to the output: The "--debug" option prints additional system information before measurements:
.nf .nf
[root@ivy]# turbostat -v turbostat version 4.0 10-Feb, 2015 - Len Brown <lenb@kernel.org>
turbostat v3.0 November 23, 2012 - Len Brown <lenb@kernel.org>
CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3a:9 (6:58:9) CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3a:9 (6:58:9)
CPUID(6): APERF, DTS, PTM, EPB CPUID(6): APERF, DTS, PTM, EPB
RAPL: 851 sec. Joule Counter Range RAPL: 851 sec. Joule Counter Range, at 77 Watts
cpu0: MSR_NHM_PLATFORM_INFO: 0x81010f0012300 cpu0: MSR_NHM_PLATFORM_INFO: 0x81010f0012300
16 * 100 = 1600 MHz max efficiency 16 * 100 = 1600 MHz max efficiency
35 * 100 = 3500 MHz TSC frequency 35 * 100 = 3500 MHz TSC frequency
cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e008402 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, locked: pkg-cstate-limit=2: pc6-noret) cpu0: MSR_IA32_POWER_CTL: 0x0014005d (C1E auto-promotion: DISabled)
cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e008402 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, locked: pkg-cstate-limit=2: pc6n)
cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727 cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727
37 * 100 = 3700 MHz max turbo 4 active cores 37 * 100 = 3700 MHz max turbo 4 active cores
38 * 100 = 3800 MHz max turbo 3 active cores 38 * 100 = 3800 MHz max turbo 3 active cores
...@@ -112,9 +122,9 @@ cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727 ...@@ -112,9 +122,9 @@ cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727
cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x00000006 (balanced) cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x00000006 (balanced)
cpu0: MSR_RAPL_POWER_UNIT: 0x000a1003 (0.125000 Watts, 0.000015 Joules, 0.000977 sec.) cpu0: MSR_RAPL_POWER_UNIT: 0x000a1003 (0.125000 Watts, 0.000015 Joules, 0.000977 sec.)
cpu0: MSR_PKG_POWER_INFO: 0x01e00268 (77 W TDP, RAPL 60 - 0 W, 0.000000 sec.) cpu0: MSR_PKG_POWER_INFO: 0x01e00268 (77 W TDP, RAPL 60 - 0 W, 0.000000 sec.)
cpu0: MSR_PKG_POWER_LIMIT: 0x830000148268 (UNlocked) cpu0: MSR_PKG_POWER_LIMIT: 0x30000148268 (UNlocked)
cpu0: PKG Limit #1: ENabled (77.000000 Watts, 1.000000 sec, clamp DISabled) cpu0: PKG Limit #1: ENabled (77.000000 Watts, 1.000000 sec, clamp DISabled)
cpu0: PKG Limit #2: ENabled (96.000000 Watts, 0.000977* sec, clamp DISabled) cpu0: PKG Limit #2: DISabled (96.000000 Watts, 0.000977* sec, clamp DISabled)
cpu0: MSR_PP0_POLICY: 0 cpu0: MSR_PP0_POLICY: 0
cpu0: MSR_PP0_POWER_LIMIT: 0x00000000 (UNlocked) cpu0: MSR_PP0_POWER_LIMIT: 0x00000000 (UNlocked)
cpu0: Cores Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled) cpu0: Cores Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
...@@ -123,9 +133,9 @@ cpu0: MSR_PP1_POWER_LIMIT: 0x00000000 (UNlocked) ...@@ -123,9 +133,9 @@ cpu0: MSR_PP1_POWER_LIMIT: 0x00000000 (UNlocked)
cpu0: GFX Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled) cpu0: GFX Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00691400 (105 C) cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00691400 (105 C)
cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x884e0000 (27 C) cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x884e0000 (27 C)
cpu0: MSR_IA32_THERM_STATUS: 0x88560000 (19 C +/- 1) cpu0: MSR_IA32_THERM_STATUS: 0x88580000 (17 C +/- 1)
cpu1: MSR_IA32_THERM_STATUS: 0x88560000 (19 C +/- 1) cpu1: MSR_IA32_THERM_STATUS: 0x885a0000 (15 C +/- 1)
cpu2: MSR_IA32_THERM_STATUS: 0x88540000 (21 C +/- 1) cpu2: MSR_IA32_THERM_STATUS: 0x88570000 (18 C +/- 1)
cpu3: MSR_IA32_THERM_STATUS: 0x884e0000 (27 C +/- 1) cpu3: MSR_IA32_THERM_STATUS: 0x884e0000 (27 C +/- 1)
... ...
.fi .fi
...@@ -195,7 +205,7 @@ in those kernels. ...@@ -195,7 +205,7 @@ in those kernels.
AVG_MHz = APERF_delta/measurement_interval. This is the actual AVG_MHz = APERF_delta/measurement_interval. This is the actual
number of elapsed cycles divided by the entire sample interval -- number of elapsed cycles divided by the entire sample interval --
including idle time. Note that this calculation is resiliant including idle time. Note that this calculation is resilient
to systems lacking a non-stop TSC. to systems lacking a non-stop TSC.
TSC_MHz = TSC_delta/measurement_interval. TSC_MHz = TSC_delta/measurement_interval.
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment