Commit 97b80e68 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpuidle'

* pm-cpuidle:
  PM / suspend: Always use deepest C-state in the "freeze" sleep state
  cpuidle / menu: move repeated correction factor check to init
  cpuidle / menu: Return (-1) if there are no suitable states
  cpuidle: Combine cpuidle_enabled() with cpuidle_select()
  ARM: clps711x: Add cpuidle driver
parents 91ab377b 9051785f
......@@ -13,6 +13,12 @@ config ARM_BIG_LITTLE_CPUIDLE
define different C-states for little and big cores through the
multiple CPU idle drivers infrastructure.
config ARM_CLPS711X_CPUIDLE
bool "CPU Idle Driver for CLPS711X processors"
depends on ARCH_CLPS711X || COMPILE_TEST
help
Select this to enable cpuidle on Cirrus Logic CLPS711X SOCs.
config ARM_HIGHBANK_CPUIDLE
bool "CPU Idle Driver for Calxeda processors"
depends on ARM_PSCI
......
......@@ -8,6 +8,7 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
##################################################################################
# ARM SoC drivers
obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o
obj-$(CONFIG_ARM_CLPS711X_CPUIDLE) += cpuidle-clps711x.o
obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
......
/*
* CLPS711X CPU idle driver
*
* Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/cpuidle.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#define CLPS711X_CPUIDLE_NAME "clps711x-cpuidle"
static void __iomem *clps711x_halt;
static int clps711x_cpuidle_halt(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
writel(0xaa, clps711x_halt);
return index;
}
static struct cpuidle_driver clps711x_idle_driver = {
.name = CLPS711X_CPUIDLE_NAME,
.owner = THIS_MODULE,
.states[0] = {
.name = "HALT",
.desc = "CLPS711X HALT",
.enter = clps711x_cpuidle_halt,
.exit_latency = 1,
},
.state_count = 1,
};
static int __init clps711x_cpuidle_probe(struct platform_device *pdev)
{
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
clps711x_halt = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(clps711x_halt))
return PTR_ERR(clps711x_halt);
return cpuidle_register(&clps711x_idle_driver, NULL);
}
static struct platform_driver clps711x_cpuidle_driver = {
.driver = {
.name = CLPS711X_CPUIDLE_NAME,
.owner = THIS_MODULE,
},
};
module_platform_driver_probe(clps711x_cpuidle_driver, clps711x_cpuidle_probe);
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("CLPS711X CPU idle driver");
MODULE_LICENSE("GPL");
......@@ -32,6 +32,7 @@ LIST_HEAD(cpuidle_detected_devices);
static int enabled_devices;
static int off __read_mostly;
static int initialized __read_mostly;
static bool use_deepest_state __read_mostly;
int cpuidle_disabled(void)
{
......@@ -65,23 +66,42 @@ int cpuidle_play_dead(void)
}
/**
* cpuidle_enabled - check if the cpuidle framework is ready
* @dev: cpuidle device for this cpu
* @drv: cpuidle driver for this cpu
* cpuidle_use_deepest_state - Enable/disable the "deepest idle" mode.
* @enable: Whether enable or disable the feature.
*
* If the "deepest idle" mode is enabled, cpuidle will ignore the governor and
* always use the state with the greatest exit latency (out of the states that
* are not disabled).
*
* Return 0 on success, otherwise:
* -NODEV : the cpuidle framework is not available
* -EBUSY : the cpuidle framework is not initialized
* This function can only be called after cpuidle_pause() to avoid races.
*/
int cpuidle_enabled(struct cpuidle_driver *drv, struct cpuidle_device *dev)
void cpuidle_use_deepest_state(bool enable)
{
if (off || !initialized)
return -ENODEV;
use_deepest_state = enable;
}
if (!drv || !dev || !dev->enabled)
return -EBUSY;
/**
* cpuidle_find_deepest_state - Find the state of the greatest exit latency.
* @drv: cpuidle driver for a given CPU.
* @dev: cpuidle device for a given CPU.
*/
static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
unsigned int latency_req = 0;
int i, ret = CPUIDLE_DRIVER_STATE_START - 1;
return 0;
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable || s->exit_latency <= latency_req)
continue;
latency_req = s->exit_latency;
ret = i;
}
return ret;
}
/**
......@@ -138,6 +158,15 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
*/
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
if (off || !initialized)
return -ENODEV;
if (!drv || !dev || !dev->enabled)
return -EBUSY;
if (unlikely(use_deepest_state))
return cpuidle_find_deepest_state(drv, dev);
return cpuidle_curr_governor->select(drv, dev);
}
......@@ -169,7 +198,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
void cpuidle_reflect(struct cpuidle_device *dev, int index)
{
if (cpuidle_curr_governor->reflect)
if (cpuidle_curr_governor->reflect && !unlikely(use_deepest_state))
cpuidle_curr_governor->reflect(dev, index);
}
......
......@@ -296,7 +296,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->needs_update = 0;
}
data->last_state_idx = 0;
data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0))
......@@ -310,13 +310,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->bucket = which_bucket(data->next_timer_us);
/*
* if the correction factor is 0 (eg first time init or cpu hotplug
* etc), we actually want to start out with a unity factor.
*/
if (data->correction_factor[data->bucket] == 0)
data->correction_factor[data->bucket] = RESOLUTION * DECAY;
/*
* Force the result of multiplication to be 64 bits even if both
* operands are 32 bits.
......@@ -466,9 +459,17 @@ static int menu_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
int i;
memset(data, 0, sizeof(struct menu_device));
/*
* if the correction factor is 0 (eg first time init or cpu hotplug
* etc), we actually want to start out with a unity factor.
*/
for(i = 0; i < BUCKETS; i++)
data->correction_factor[i] = RESOLUTION * DECAY;
return 0;
}
......
......@@ -120,8 +120,6 @@ struct cpuidle_driver {
#ifdef CONFIG_CPU_IDLE
extern void disable_cpuidle(void);
extern int cpuidle_enabled(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_enter(struct cpuidle_driver *drv,
......@@ -145,13 +143,11 @@ extern void cpuidle_resume(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void);
extern void cpuidle_use_deepest_state(bool enable);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
#else
static inline void disable_cpuidle(void) { }
static inline int cpuidle_enabled(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
......@@ -180,6 +176,7 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; }
static inline void cpuidle_use_deepest_state(bool enable) {}
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; }
#endif
......
......@@ -54,9 +54,11 @@ static void freeze_begin(void)
static void freeze_enter(void)
{
cpuidle_use_deepest_state(true);
cpuidle_resume();
wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
cpuidle_pause();
cpuidle_use_deepest_state(false);
}
void freeze_wake(void)
......
......@@ -101,19 +101,13 @@ static int cpuidle_idle_call(void)
rcu_idle_enter();
/*
* Check if the cpuidle framework is ready, otherwise fallback
* to the default arch specific idle method
* Ask the cpuidle framework to choose a convenient idle state.
* Fall back to the default arch specific idle method on errors.
*/
ret = cpuidle_enabled(drv, dev);
if (!ret) {
/*
* Ask the governor to choose an idle state it thinks
* it is convenient to go to. There is *always* a
* convenient idle state
*/
next_state = cpuidle_select(drv, dev);
next_state = cpuidle_select(drv, dev);
ret = next_state;
if (ret >= 0) {
/*
* The idle task must be scheduled, it is pointless to
* go to idle, just update no idle residency and get
......@@ -140,7 +134,7 @@ static int cpuidle_idle_call(void)
CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
&dev->cpu);
if (!ret) {
if (ret >= 0) {
trace_cpu_idle_rcuidle(next_state, dev->cpu);
/*
......@@ -175,7 +169,7 @@ static int cpuidle_idle_call(void)
* We can't use the cpuidle framework, let's use the default
* idle routine
*/
if (ret)
if (ret < 0)
arch_cpu_idle();
__current_set_polling();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment