Commit a1b5fd8f authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpuidle'

* pm-cpuidle:
  cpuidle: Make drivers initialize polling state
  cpuidle: Move polling state initialization code to separate file
  cpuidle: Eliminate the CPUIDLE_DRIVER_STATE_START symbol
  cpuidle: Convert to using %pOF instead of full_name
parents ab271bc9 1b39e3f8
...@@ -48,6 +48,8 @@ ...@@ -48,6 +48,8 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT #define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle"); ACPI_MODULE_NAME("processor_idle");
#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0000); module_param(max_cstate, uint, 0000);
static unsigned int nocst __read_mostly; static unsigned int nocst __read_mostly;
...@@ -761,7 +763,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev, ...@@ -761,7 +763,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
if (cx->type != ACPI_STATE_C1) { if (cx->type != ACPI_STATE_C1) {
if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
index = CPUIDLE_DRIVER_STATE_START; index = ACPI_IDLE_STATE_START;
cx = per_cpu(acpi_cstate[index], dev->cpu); cx = per_cpu(acpi_cstate[index], dev->cpu);
} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
if (cx->bm_sts_skip || !acpi_idle_bm_check()) { if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
...@@ -813,7 +815,7 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev, ...@@ -813,7 +815,7 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
struct cpuidle_device *dev) struct cpuidle_device *dev)
{ {
int i, count = CPUIDLE_DRIVER_STATE_START; int i, count = ACPI_IDLE_STATE_START;
struct acpi_processor_cx *cx; struct acpi_processor_cx *cx;
if (max_cstate == 0) if (max_cstate == 0)
...@@ -840,7 +842,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, ...@@ -840,7 +842,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
static int acpi_processor_setup_cstates(struct acpi_processor *pr) static int acpi_processor_setup_cstates(struct acpi_processor *pr)
{ {
int i, count = CPUIDLE_DRIVER_STATE_START; int i, count;
struct acpi_processor_cx *cx; struct acpi_processor_cx *cx;
struct cpuidle_state *state; struct cpuidle_state *state;
struct cpuidle_driver *drv = &acpi_idle_driver; struct cpuidle_driver *drv = &acpi_idle_driver;
...@@ -848,6 +850,13 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) ...@@ -848,6 +850,13 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
if (max_cstate == 0) if (max_cstate == 0)
max_cstate = 1; max_cstate = 1;
if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
cpuidle_poll_state_init(drv);
count = 1;
} else {
count = 0;
}
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i]; cx = &pr->power.states[i];
...@@ -1291,7 +1300,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) ...@@ -1291,7 +1300,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
return -EINVAL; return -EINVAL;
drv->safe_state_index = -1; drv->safe_state_index = -1;
for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) { for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
drv->states[i].name[0] = '\0'; drv->states[i].name[0] = '\0';
drv->states[i].desc[0] = '\0'; drv->states[i].desc[0] = '\0';
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o
################################################################################## ##################################################################################
# ARM SoC drivers # ARM SoC drivers
......
...@@ -179,36 +179,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) ...@@ -179,36 +179,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
} }
} }
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
static int __cpuidle poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
local_irq_enable();
if (!current_set_polling_and_test()) {
while (!need_resched())
cpu_relax();
}
current_clr_polling();
return index;
}
static void poll_idle_init(struct cpuidle_driver *drv)
{
struct cpuidle_state *state = &drv->states[0];
snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
state->exit_latency = 0;
state->target_residency = 0;
state->power_usage = -1;
state->enter = poll_idle;
state->disabled = false;
}
#else
static void poll_idle_init(struct cpuidle_driver *drv) {}
#endif /* !CONFIG_ARCH_HAS_CPU_RELAX */
/** /**
* __cpuidle_register_driver: register the driver * __cpuidle_register_driver: register the driver
* @drv: a valid pointer to a struct cpuidle_driver * @drv: a valid pointer to a struct cpuidle_driver
...@@ -246,8 +216,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) ...@@ -246,8 +216,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
(void *)1, 1); (void *)1, 1);
poll_idle_init(drv);
return 0; return 0;
} }
......
...@@ -53,16 +53,16 @@ static int init_state_node(struct cpuidle_state *idle_state, ...@@ -53,16 +53,16 @@ static int init_state_node(struct cpuidle_state *idle_state,
err = of_property_read_u32(state_node, "entry-latency-us", err = of_property_read_u32(state_node, "entry-latency-us",
&entry_latency); &entry_latency);
if (err) { if (err) {
pr_debug(" * %s missing entry-latency-us property\n", pr_debug(" * %pOF missing entry-latency-us property\n",
state_node->full_name); state_node);
return -EINVAL; return -EINVAL;
} }
err = of_property_read_u32(state_node, "exit-latency-us", err = of_property_read_u32(state_node, "exit-latency-us",
&exit_latency); &exit_latency);
if (err) { if (err) {
pr_debug(" * %s missing exit-latency-us property\n", pr_debug(" * %pOF missing exit-latency-us property\n",
state_node->full_name); state_node);
return -EINVAL; return -EINVAL;
} }
/* /*
...@@ -75,8 +75,8 @@ static int init_state_node(struct cpuidle_state *idle_state, ...@@ -75,8 +75,8 @@ static int init_state_node(struct cpuidle_state *idle_state,
err = of_property_read_u32(state_node, "min-residency-us", err = of_property_read_u32(state_node, "min-residency-us",
&idle_state->target_residency); &idle_state->target_residency);
if (err) { if (err) {
pr_debug(" * %s missing min-residency-us property\n", pr_debug(" * %pOF missing min-residency-us property\n",
state_node->full_name); state_node);
return -EINVAL; return -EINVAL;
} }
...@@ -186,8 +186,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, ...@@ -186,8 +186,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
} }
if (!idle_state_valid(state_node, i, cpumask)) { if (!idle_state_valid(state_node, i, cpumask)) {
pr_warn("%s idle state not valid, bailing out\n", pr_warn("%pOF idle state not valid, bailing out\n",
state_node->full_name); state_node);
err = -EINVAL; err = -EINVAL;
break; break;
} }
...@@ -200,8 +200,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, ...@@ -200,8 +200,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
idle_state = &drv->states[state_idx++]; idle_state = &drv->states[state_idx++];
err = init_state_node(idle_state, matches, state_node); err = init_state_node(idle_state, matches, state_node);
if (err) { if (err) {
pr_err("Parsing idle state node %s failed with err %d\n", pr_err("Parsing idle state node %pOF failed with err %d\n",
state_node->full_name, err); state_node, err);
err = -EINVAL; err = -EINVAL;
break; break;
} }
......
...@@ -69,6 +69,7 @@ static int ladder_select_state(struct cpuidle_driver *drv, ...@@ -69,6 +69,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state; struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx; int last_residency, last_idx = ldev->last_state_idx;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
/* Special case when user has set very strict latency requirement */ /* Special case when user has set very strict latency requirement */
...@@ -96,13 +97,13 @@ static int ladder_select_state(struct cpuidle_driver *drv, ...@@ -96,13 +97,13 @@ static int ladder_select_state(struct cpuidle_driver *drv,
} }
/* consider demotion */ /* consider demotion */
if (last_idx > CPUIDLE_DRIVER_STATE_START && if (last_idx > first_idx &&
(drv->states[last_idx].disabled || (drv->states[last_idx].disabled ||
dev->states_usage[last_idx].disable || dev->states_usage[last_idx].disable ||
drv->states[last_idx].exit_latency > latency_req)) { drv->states[last_idx].exit_latency > latency_req)) {
int i; int i;
for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { for (i = last_idx - 1; i > first_idx; i--) {
if (drv->states[i].exit_latency <= latency_req) if (drv->states[i].exit_latency <= latency_req)
break; break;
} }
...@@ -110,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv, ...@@ -110,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
return i; return i;
} }
if (last_idx > CPUIDLE_DRIVER_STATE_START && if (last_idx > first_idx &&
last_residency < last_state->threshold.demotion_time) { last_residency < last_state->threshold.demotion_time) {
last_state->stats.demotion_count++; last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0; last_state->stats.promotion_count = 0;
...@@ -133,13 +134,14 @@ static int ladder_enable_device(struct cpuidle_driver *drv, ...@@ -133,13 +134,14 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev) struct cpuidle_device *dev)
{ {
int i; int i;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
struct ladder_device_state *lstate; struct ladder_device_state *lstate;
struct cpuidle_state *state; struct cpuidle_state *state;
ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; ldev->last_state_idx = first_idx;
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { for (i = first_idx; i < drv->state_count; i++) {
state = &drv->states[i]; state = &drv->states[i];
lstate = &ldev->states[i]; lstate = &ldev->states[i];
...@@ -151,7 +153,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv, ...@@ -151,7 +153,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
if (i < drv->state_count - 1) if (i < drv->state_count - 1)
lstate->threshold.promotion_time = state->exit_latency; lstate->threshold.promotion_time = state->exit_latency;
if (i > CPUIDLE_DRIVER_STATE_START) if (i > first_idx)
lstate->threshold.demotion_time = state->exit_latency; lstate->threshold.demotion_time = state->exit_latency;
} }
......
...@@ -324,8 +324,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -324,8 +324,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
expected_interval = get_typical_interval(data); expected_interval = get_typical_interval(data);
expected_interval = min(expected_interval, data->next_timer_us); expected_interval = min(expected_interval, data->next_timer_us);
if (CPUIDLE_DRIVER_STATE_START > 0) { first_idx = 0;
struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START]; if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
struct cpuidle_state *s = &drv->states[1];
unsigned int polling_threshold; unsigned int polling_threshold;
/* /*
...@@ -336,12 +337,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -336,12 +337,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
polling_threshold = max_t(unsigned int, 20, s->target_residency); polling_threshold = max_t(unsigned int, 20, s->target_residency);
if (data->next_timer_us > polling_threshold && if (data->next_timer_us > polling_threshold &&
latency_req > s->exit_latency && !s->disabled && latency_req > s->exit_latency && !s->disabled &&
!dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable) !dev->states_usage[1].disable)
first_idx = CPUIDLE_DRIVER_STATE_START; first_idx = 1;
else
first_idx = CPUIDLE_DRIVER_STATE_START - 1;
} else {
first_idx = 0;
} }
/* /*
......
/*
* poll_state.c - Polling idle state
*
* This file is released under the GPLv2.
*/
#include <linux/cpuidle.h>
#include <linux/sched.h>
#include <linux/sched/idle.h>
static int __cpuidle poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
local_irq_enable();
if (!current_set_polling_and_test()) {
while (!need_resched())
cpu_relax();
}
current_clr_polling();
return index;
}
void cpuidle_poll_state_init(struct cpuidle_driver *drv)
{
struct cpuidle_state *state = &drv->states[0];
snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
state->exit_latency = 0;
state->target_residency = 0;
state->power_usage = -1;
state->enter = poll_idle;
state->disabled = false;
state->flags = CPUIDLE_FLAG_POLLING;
}
EXPORT_SYMBOL_GPL(cpuidle_poll_state_init);
...@@ -1331,6 +1331,7 @@ static void __init intel_idle_cpuidle_driver_init(void) ...@@ -1331,6 +1331,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
intel_idle_state_table_update(); intel_idle_state_table_update();
cpuidle_poll_state_init(drv);
drv->state_count = 1; drv->state_count = 1;
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
......
...@@ -63,6 +63,7 @@ struct cpuidle_state { ...@@ -63,6 +63,7 @@ struct cpuidle_state {
/* Idle State Flags */ /* Idle State Flags */
#define CPUIDLE_FLAG_NONE (0x00) #define CPUIDLE_FLAG_NONE (0x00)
#define CPUIDLE_FLAG_POLLING (0x01) /* polling state */
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
...@@ -224,6 +225,12 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, ...@@ -224,6 +225,12 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
} }
#endif #endif
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
void cpuidle_poll_state_init(struct cpuidle_driver *drv);
#else
static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
#endif
/****************************** /******************************
* CPUIDLE GOVERNOR INTERFACE * * CPUIDLE GOVERNOR INTERFACE *
******************************/ ******************************/
...@@ -250,12 +257,6 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) ...@@ -250,12 +257,6 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;} {return 0;}
#endif #endif
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
#define CPUIDLE_DRIVER_STATE_START 1
#else
#define CPUIDLE_DRIVER_STATE_START 0
#endif
#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
({ \ ({ \
int __ret; \ int __ret; \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment