Commit 7b01463e authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-sleep'

* pm-sleep:
  ACPI / PM: Check low power idle constraints for debug only
  PM / s2idle: Rename platform operations structure
  PM / s2idle: Rename ->enter_freeze to ->enter_s2idle
  PM / s2idle: Rename freeze_state enum and related items
  PM / s2idle: Rename PM_SUSPEND_FREEZE to PM_SUSPEND_TO_IDLE
  ACPI / PM: Prefer suspend-to-idle over S3 on some systems
  platform/x86: intel-hid: Wake up Dell Latitude 7275 from suspend-to-idle
  PM / suspend: Define pr_fmt() in suspend.c
  PM / suspend: Use mem_sleep_labels[] strings in messages
  PM / sleep: Put pm_test under CONFIG_PM_SLEEP_DEBUG
  PM / sleep: Check pm_wakeup_pending() in __device_suspend_noirq()
  PM / core: Add error argument to dpm_show_time()
  PM / core: Split dpm_suspend_noirq() and dpm_resume_noirq()
  PM / s2idle: Rearrange the main suspend-to-idle loop
  PM / timekeeping: Print debug messages when requested
  PM / sleep: Mark suspend/hibernation start and finish
  PM / sleep: Do not print debug messages by default
  PM / suspend: Export pm_suspend_target_state
parents a1b5fd8f 726fb6b4
......@@ -273,3 +273,15 @@ Description:
This output is useful for system wakeup diagnostics of spurious
wakeup interrupts.
What: /sys/power/pm_debug_messages
Date: July 2017
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description:
The /sys/power/pm_debug_messages file controls the printing
of debug messages from the system suspend/hiberbation
infrastructure to the kernel log.
Writing a "1" to this file enables the debug messages and
writing a "0" (default) to it disables them. Reads from
this file return the current value.
......@@ -35,7 +35,9 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
The default suspend mode (ie. the one to be used without writing anything into
/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
"s2idle", but it can be overridden by the value of the "mem_sleep_default"
parameter in the kernel command line.
parameter in the kernel command line. On some ACPI-based systems, depending on
the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
is supported.
The properties of all of the sleep states are described below.
......
......@@ -60,7 +60,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev,
return index;
}
static void tegra114_idle_enter_freeze(struct cpuidle_device *dev,
static void tegra114_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
......@@ -77,7 +77,7 @@ static struct cpuidle_driver tegra_idle_driver = {
#ifdef CONFIG_PM_SLEEP
[1] = {
.enter = tegra114_idle_power_down,
.enter_freeze = tegra114_idle_enter_freeze,
.enter_s2idle = tegra114_idle_enter_s2idle,
.exit_latency = 500,
.target_residency = 1000,
.flags = CPUIDLE_FLAG_TIMER_STOP,
......
......@@ -791,7 +791,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return index;
}
static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
......@@ -876,14 +876,14 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
drv->safe_state_index = count;
}
/*
* Halt-induced C1 is not good for ->enter_freeze, because it
* Halt-induced C1 is not good for ->enter_s2idle, because it
* re-enables interrupts on exit. Moreover, C1 is generally not
* particularly interesting from the suspend-to-idle angle, so
* avoid C1 and the situations in which we may need to fall back
* to it altogether.
*/
if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
state->enter_freeze = acpi_idle_enter_freeze;
state->enter_s2idle = acpi_idle_enter_s2idle;
count++;
if (count == CPUIDLE_STATE_MAX)
......
......@@ -669,6 +669,7 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
#define ACPI_LPS0_SCREEN_OFF 3
#define ACPI_LPS0_SCREEN_ON 4
#define ACPI_LPS0_ENTRY 5
......@@ -680,6 +681,166 @@ static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
static char lps0_dsm_func_mask;
/* Device constraint entry structure */
struct lpi_device_info {
char *name;
int enabled;
union acpi_object *package;
};
/* Constraint package structure */
struct lpi_device_constraint {
int uid;
int min_dstate;
int function_states;
};
struct lpi_constraints {
acpi_handle handle;
int min_dstate;
};
static struct lpi_constraints *lpi_constraints_table;
static int lpi_constraints_table_size;
static void lpi_device_get_constraints(void)
{
union acpi_object *out_obj;
int i;
out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
NULL, ACPI_TYPE_PACKAGE);
acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
out_obj ? "successful" : "failed");
if (!out_obj)
return;
lpi_constraints_table = kcalloc(out_obj->package.count,
sizeof(*lpi_constraints_table),
GFP_KERNEL);
if (!lpi_constraints_table)
goto free_acpi_buffer;
acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
for (i = 0; i < out_obj->package.count; i++) {
struct lpi_constraints *constraint;
acpi_status status;
union acpi_object *package = &out_obj->package.elements[i];
struct lpi_device_info info = { };
int package_count = 0, j;
if (!package)
continue;
for (j = 0; j < package->package.count; ++j) {
union acpi_object *element =
&(package->package.elements[j]);
switch (element->type) {
case ACPI_TYPE_INTEGER:
info.enabled = element->integer.value;
break;
case ACPI_TYPE_STRING:
info.name = element->string.pointer;
break;
case ACPI_TYPE_PACKAGE:
package_count = element->package.count;
info.package = element->package.elements;
break;
}
}
if (!info.enabled || !info.package || !info.name)
continue;
constraint = &lpi_constraints_table[lpi_constraints_table_size];
status = acpi_get_handle(NULL, info.name, &constraint->handle);
if (ACPI_FAILURE(status))
continue;
acpi_handle_debug(lps0_device_handle,
"index:%d Name:%s\n", i, info.name);
constraint->min_dstate = -1;
for (j = 0; j < package_count; ++j) {
union acpi_object *info_obj = &info.package[j];
union acpi_object *cnstr_pkg;
union acpi_object *obj;
struct lpi_device_constraint dev_info;
switch (info_obj->type) {
case ACPI_TYPE_INTEGER:
/* version */
break;
case ACPI_TYPE_PACKAGE:
if (info_obj->package.count < 2)
break;
cnstr_pkg = info_obj->package.elements;
obj = &cnstr_pkg[0];
dev_info.uid = obj->integer.value;
obj = &cnstr_pkg[1];
dev_info.min_dstate = obj->integer.value;
acpi_handle_debug(lps0_device_handle,
"uid:%d min_dstate:%s\n",
dev_info.uid,
acpi_power_state_string(dev_info.min_dstate));
constraint->min_dstate = dev_info.min_dstate;
break;
}
}
if (constraint->min_dstate < 0) {
acpi_handle_debug(lps0_device_handle,
"Incomplete constraint defined\n");
continue;
}
lpi_constraints_table_size++;
}
acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
free_acpi_buffer:
ACPI_FREE(out_obj);
}
static void lpi_check_constraints(void)
{
int i;
for (i = 0; i < lpi_constraints_table_size; ++i) {
struct acpi_device *adev;
if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev))
continue;
acpi_handle_debug(adev->handle,
"LPI: required min power state:%s current power state:%s\n",
acpi_power_state_string(lpi_constraints_table[i].min_dstate),
acpi_power_state_string(adev->power.state));
if (!adev->flags.power_manageable) {
acpi_handle_info(adev->handle, "LPI: Device not power manageble\n");
continue;
}
if (adev->power.state < lpi_constraints_table[i].min_dstate)
acpi_handle_info(adev->handle,
"LPI: Constraint not met; min power state:%s current power state:%s\n",
acpi_power_state_string(lpi_constraints_table[i].min_dstate),
acpi_power_state_string(adev->power.state));
}
}
static void acpi_sleep_run_lps0_dsm(unsigned int func)
{
union acpi_object *out_obj;
......@@ -714,6 +875,12 @@ static int lps0_device_attach(struct acpi_device *adev,
if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
lps0_dsm_func_mask = bitmask;
lps0_device_handle = adev->handle;
/*
* Use suspend-to-idle by default if the default
* suspend mode was not set from the command line.
*/
if (mem_sleep_default > PM_SUSPEND_MEM)
mem_sleep_current = PM_SUSPEND_TO_IDLE;
}
acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
......@@ -723,6 +890,9 @@ static int lps0_device_attach(struct acpi_device *adev,
"_DSM function 0 evaluation failed\n");
}
ACPI_FREE(out_obj);
lpi_device_get_constraints();
return 0;
}
......@@ -731,14 +901,14 @@ static struct acpi_scan_handler lps0_handler = {
.attach = lps0_device_attach,
};
static int acpi_freeze_begin(void)
static int acpi_s2idle_begin(void)
{
acpi_scan_lock_acquire();
s2idle_in_progress = true;
return 0;
}
static int acpi_freeze_prepare(void)
static int acpi_s2idle_prepare(void)
{
if (lps0_device_handle) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
......@@ -758,8 +928,12 @@ static int acpi_freeze_prepare(void)
return 0;
}
static void acpi_freeze_wake(void)
static void acpi_s2idle_wake(void)
{
if (pm_debug_messages_on)
lpi_check_constraints();
/*
* If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
* that the SCI has triggered while suspended, so cancel the wakeup in
......@@ -772,7 +946,7 @@ static void acpi_freeze_wake(void)
}
}
static void acpi_freeze_sync(void)
static void acpi_s2idle_sync(void)
{
/*
* Process all pending events in case there are any wakeup ones.
......@@ -785,7 +959,7 @@ static void acpi_freeze_sync(void)
s2idle_wakeup = false;
}
static void acpi_freeze_restore(void)
static void acpi_s2idle_restore(void)
{
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
......@@ -798,19 +972,19 @@ static void acpi_freeze_restore(void)
}
}
static void acpi_freeze_end(void)
static void acpi_s2idle_end(void)
{
s2idle_in_progress = false;
acpi_scan_lock_release();
}
static const struct platform_freeze_ops acpi_freeze_ops = {
.begin = acpi_freeze_begin,
.prepare = acpi_freeze_prepare,
.wake = acpi_freeze_wake,
.sync = acpi_freeze_sync,
.restore = acpi_freeze_restore,
.end = acpi_freeze_end,
static const struct platform_s2idle_ops acpi_s2idle_ops = {
.begin = acpi_s2idle_begin,
.prepare = acpi_s2idle_prepare,
.wake = acpi_s2idle_wake,
.sync = acpi_s2idle_sync,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
static void acpi_sleep_suspend_setup(void)
......@@ -825,7 +999,7 @@ static void acpi_sleep_suspend_setup(void)
&acpi_suspend_ops_old : &acpi_suspend_ops);
acpi_scan_add_handler(&lps0_handler);
freeze_set_ops(&acpi_freeze_ops);
s2idle_set_ops(&acpi_s2idle_ops);
}
#else /* !CONFIG_SUSPEND */
......
......@@ -418,8 +418,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
dev_name(dev), pm_verb(state.event), info, error);
}
#ifdef CONFIG_PM_DEBUG
static void dpm_show_time(ktime_t starttime, pm_message_t state,
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
const char *info)
{
ktime_t calltime;
......@@ -432,14 +431,12 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state,
usecs = usecs64;
if (usecs == 0)
usecs = 1;
pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
info ?: "", info ? " " : "", pm_verb(state.event),
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
info ?: "", info ? " " : "", pm_verb(state.event),
error ? "aborted" : "complete",
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
#else
static inline void dpm_show_time(ktime_t starttime, pm_message_t state,
const char *info) {}
#endif /* CONFIG_PM_DEBUG */
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
pm_message_t state, const char *info)
......@@ -602,14 +599,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
put_device(dev);
}
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Call the "noirq" resume handlers for all devices in dpm_noirq_list and
* enable device drivers to receive interrupts.
*/
void dpm_resume_noirq(pm_message_t state)
void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
......@@ -654,11 +644,28 @@ void dpm_resume_noirq(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, "noirq");
dpm_show_time(starttime, state, 0, "noirq");
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
void dpm_noirq_end(void)
{
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
cpuidle_resume();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
* allow device drivers' interrupt handlers to be called.
*/
void dpm_resume_noirq(pm_message_t state)
{
dpm_noirq_resume_devices(state);
dpm_noirq_end();
}
/**
......@@ -776,7 +783,7 @@ void dpm_resume_early(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, "early");
dpm_show_time(starttime, state, 0, "early");
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
......@@ -948,7 +955,7 @@ void dpm_resume(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, NULL);
dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
......@@ -1098,6 +1105,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (async_error)
goto Complete;
if (pm_wakeup_pending()) {
async_error = -EBUSY;
goto Complete;
}
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
......@@ -1158,22 +1170,19 @@ static int device_suspend_noirq(struct device *dev)
return __device_suspend_noirq(dev, pm_transition, false);
}
/**
* dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Prevent device drivers from receiving interrupts and call the "noirq" suspend
* handlers for all non-sysdev devices.
*/
int dpm_suspend_noirq(pm_message_t state)
void dpm_noirq_begin(void)
{
cpuidle_pause();
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
}
int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
cpuidle_pause();
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
......@@ -1208,14 +1217,31 @@ int dpm_suspend_noirq(pm_message_t state)
if (error) {
suspend_stats.failed_suspend_noirq++;
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_resume_noirq(resume_event(state));
} else {
dpm_show_time(starttime, state, "noirq");
}
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
}
/**
* dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Prevent device drivers' interrupt handlers from being called and invoke
* "noirq" suspend callbacks for all non-sysdev devices.
*/
int dpm_suspend_noirq(pm_message_t state)
{
int ret;
dpm_noirq_begin();
ret = dpm_noirq_suspend_devices(state);
if (ret)
dpm_resume_noirq(resume_event(state));
return ret;
}
/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
......@@ -1350,9 +1376,8 @@ int dpm_suspend_late(pm_message_t state)
suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
} else {
dpm_show_time(starttime, state, "late");
}
dpm_show_time(starttime, state, error, "late");
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
return error;
}
......@@ -1618,8 +1643,8 @@ int dpm_suspend(pm_message_t state)
if (error) {
suspend_stats.failed_suspend++;
dpm_save_failed_step(SUSPEND_SUSPEND);
} else
dpm_show_time(starttime, state, NULL);
}
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
......
......@@ -865,7 +865,7 @@ bool pm_wakeup_pending(void)
void pm_system_wakeup(void)
{
atomic_inc(&pm_abort_suspend);
freeze_wake();
s2idle_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);
......
......@@ -77,7 +77,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
unsigned int max_latency,
unsigned int forbidden_flags,
bool freeze)
bool s2idle)
{
unsigned int latency_req = 0;
int i, ret = 0;
......@@ -89,7 +89,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
if (s->disabled || su->disable || s->exit_latency <= latency_req
|| s->exit_latency > max_latency
|| (s->flags & forbidden_flags)
|| (freeze && !s->enter_freeze))
|| (s2idle && !s->enter_s2idle))
continue;
latency_req = s->exit_latency;
......@@ -128,7 +128,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
}
#ifdef CONFIG_SUSPEND
static void enter_freeze_proper(struct cpuidle_driver *drv,
static void enter_s2idle_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
/*
......@@ -143,7 +143,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
drv->states[index].enter_freeze(dev, drv, index);
drv->states[index].enter_s2idle(dev, drv, index);
WARN_ON(!irqs_disabled());
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
......@@ -155,25 +155,25 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
}
/**
* cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
* cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
*
* If there are states with the ->enter_freeze callback, find the deepest of
* If there are states with the ->enter_s2idle callback, find the deepest of
* them and enter it with frozen tick.
*/
int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
int index;
/*
* Find the deepest state with ->enter_freeze present, which guarantees
* Find the deepest state with ->enter_s2idle present, which guarantees
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
if (index > 0)
enter_freeze_proper(drv, dev, index);
enter_s2idle_proper(drv, dev, index);
return index;
}
......
......@@ -41,9 +41,9 @@ static int init_state_node(struct cpuidle_state *idle_state,
/*
* Since this is not a "coupled" state, it's safe to assume interrupts
* won't be enabled when it exits allowing the tick to be frozen
* safely. So enter() can be also enter_freeze() callback.
* safely. So enter() can be also enter_s2idle() callback.
*/
idle_state->enter_freeze = match_id->data;
idle_state->enter_s2idle = match_id->data;
err = of_property_read_u32(state_node, "wakeup-latency-us",
&idle_state->exit_latency);
......
This diff is collapsed.
......@@ -203,15 +203,26 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
acpi_status status;
if (priv->wakeup_mode) {
/*
* Needed for wakeup from suspend-to-idle to work on some
* platforms that don't expose the 5-button array, but still
* send notifies with the power button event code to this
* device object on power button actions while suspended.
*/
if (event == 0xce)
goto wakeup;
/* Wake up on 5-button array events only. */
if (event == 0xc0 || !priv->array)
return;
if (sparse_keymap_entry_from_scancode(priv->array, event))
pm_wakeup_hard_event(&device->dev);
else
if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
dev_info(&device->dev, "unknown event 0x%x\n", event);
return;
}
wakeup:
pm_wakeup_hard_event(&device->dev);
return;
}
......
......@@ -150,7 +150,7 @@ static void of_get_regulation_constraints(struct device_node *np,
suspend_state = &constraints->state_disk;
break;
case PM_SUSPEND_ON:
case PM_SUSPEND_FREEZE:
case PM_SUSPEND_TO_IDLE:
case PM_SUSPEND_STANDBY:
default:
continue;
......
......@@ -52,11 +52,11 @@ struct cpuidle_state {
int (*enter_dead) (struct cpuidle_device *dev, int index);
/*
* CPUs execute ->enter_freeze with the local tick or entire timekeeping
* CPUs execute ->enter_s2idle with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
* temporarily) or attempt to change states of clock event devices.
*/
void (*enter_freeze) (struct cpuidle_device *dev,
void (*enter_s2idle) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index);
};
......@@ -198,14 +198,14 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern void cpuidle_use_deepest_state(bool enable);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_use_deepest_state(bool enable)
......
......@@ -689,6 +689,8 @@ struct dev_pm_domain {
extern void device_pm_lock(void);
extern void dpm_resume_start(pm_message_t state);
extern void dpm_resume_end(pm_message_t state);
extern void dpm_noirq_resume_devices(pm_message_t state);
extern void dpm_noirq_end(void);
extern void dpm_resume_noirq(pm_message_t state);
extern void dpm_resume_early(pm_message_t state);
extern void dpm_resume(pm_message_t state);
......@@ -697,6 +699,8 @@ extern void dpm_complete(pm_message_t state);
extern void device_pm_unlock(void);
extern int dpm_suspend_end(pm_message_t state);
extern int dpm_suspend_start(pm_message_t state);
extern void dpm_noirq_begin(void);
extern int dpm_noirq_suspend_devices(pm_message_t state);
extern int dpm_suspend_noirq(pm_message_t state);
extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
......
......@@ -33,10 +33,10 @@ static inline void pm_restore_console(void)
typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
#define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1)
#define PM_SUSPEND_TO_IDLE ((__force suspend_state_t) 1)
#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2)
#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
#define PM_SUSPEND_MIN PM_SUSPEND_FREEZE
#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
enum suspend_stat_step {
......@@ -186,7 +186,7 @@ struct platform_suspend_ops {
void (*recover)(void);
};
struct platform_freeze_ops {
struct platform_s2idle_ops {
int (*begin)(void);
int (*prepare)(void);
void (*wake)(void);
......@@ -196,6 +196,9 @@ struct platform_freeze_ops {
};
#ifdef CONFIG_SUSPEND
extern suspend_state_t mem_sleep_current;
extern suspend_state_t mem_sleep_default;
/**
* suspend_set_ops - set platform dependent suspend operations
* @ops: The new suspend operations to set.
......@@ -234,22 +237,22 @@ static inline bool pm_resume_via_firmware(void)
}
/* Suspend-to-idle state machnine. */
enum freeze_state {
FREEZE_STATE_NONE, /* Not suspended/suspending. */
FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */
FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */
enum s2idle_states {
S2IDLE_STATE_NONE, /* Not suspended/suspending. */
S2IDLE_STATE_ENTER, /* Enter suspend-to-idle. */
S2IDLE_STATE_WAKE, /* Wake up from suspend-to-idle. */
};
extern enum freeze_state __read_mostly suspend_freeze_state;
extern enum s2idle_states __read_mostly s2idle_state;
static inline bool idle_should_freeze(void)
static inline bool idle_should_enter_s2idle(void)
{
return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
}
extern void __init pm_states_init(void);
extern void freeze_set_ops(const struct platform_freeze_ops *ops);
extern void freeze_wake(void);
extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
extern void s2idle_wake(void);
/**
* arch_suspend_disable_irqs - disable IRQs for suspend
......@@ -281,10 +284,10 @@ static inline bool pm_resume_via_firmware(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
static inline bool idle_should_freeze(void) { return false; }
static inline bool idle_should_enter_s2idle(void) { return false; }
static inline void __init pm_states_init(void) {}
static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
static inline void freeze_wake(void) {}
static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
static inline void s2idle_wake(void) {}
#endif /* !CONFIG_SUSPEND */
/* struct pbe is used for creating lists of pages that should be restored
......@@ -427,6 +430,7 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
extern unsigned int pm_wakeup_irq;
extern suspend_state_t pm_suspend_target_state;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
......@@ -491,10 +495,24 @@ static inline void unlock_system_sleep(void) {}
#ifdef CONFIG_PM_SLEEP_DEBUG
extern bool pm_print_times_enabled;
extern bool pm_debug_messages_on;
extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
#else
#define pm_print_times_enabled (false)
#define pm_debug_messages_on (false)
#include <linux/printk.h>
#define __pm_pr_dbg(defer, fmt, ...) \
no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
#endif
#define pm_pr_dbg(fmt, ...) \
__pm_pr_dbg(false, fmt, ##__VA_ARGS__)
#define pm_deferred_pr_dbg(fmt, ...) \
__pm_pr_dbg(true, fmt, ##__VA_ARGS__)
#ifdef CONFIG_PM_AUTOSLEEP
/* kernel/power/autosleep.c */
......
......@@ -651,7 +651,7 @@ static int load_image_and_restore(void)
int error;
unsigned int flags;
pr_debug("Loading hibernation image.\n");
pm_pr_dbg("Loading hibernation image.\n");
lock_device_hotplug();
error = create_basic_memory_bitmaps();
......@@ -681,7 +681,7 @@ int hibernate(void)
bool snapshot_test = false;
if (!hibernation_available()) {
pr_debug("Hibernation not available.\n");
pm_pr_dbg("Hibernation not available.\n");
return -EPERM;
}
......@@ -692,6 +692,7 @@ int hibernate(void)
goto Unlock;
}
pr_info("hibernation entry\n");
pm_prepare_console();
error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
if (error) {
......@@ -727,7 +728,7 @@ int hibernate(void)
else
flags |= SF_CRC32_MODE;
pr_debug("Writing image.\n");
pm_pr_dbg("Writing image.\n");
error = swsusp_write(flags);
swsusp_free();
if (!error) {
......@@ -739,7 +740,7 @@ int hibernate(void)
in_suspend = 0;
pm_restore_gfp_mask();
} else {
pr_debug("Image restored successfully.\n");
pm_pr_dbg("Image restored successfully.\n");
}
Free_bitmaps:
......@@ -747,7 +748,7 @@ int hibernate(void)
Thaw:
unlock_device_hotplug();
if (snapshot_test) {
pr_debug("Checking hibernation image\n");
pm_pr_dbg("Checking hibernation image\n");
error = swsusp_check();
if (!error)
error = load_image_and_restore();
......@@ -762,6 +763,8 @@ int hibernate(void)
atomic_inc(&snapshot_device_available);
Unlock:
unlock_system_sleep();
pr_info("hibernation exit\n");
return error;
}
......@@ -811,7 +814,7 @@ static int software_resume(void)
goto Unlock;
}
pr_debug("Checking hibernation image partition %s\n", resume_file);
pm_pr_dbg("Checking hibernation image partition %s\n", resume_file);
if (resume_delay) {
pr_info("Waiting %dsec before reading resume device ...\n",
......@@ -853,10 +856,10 @@ static int software_resume(void)
}
Check_image:
pr_debug("Hibernation image partition %d:%d present\n",
pm_pr_dbg("Hibernation image partition %d:%d present\n",
MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
pr_debug("Looking for hibernation image.\n");
pm_pr_dbg("Looking for hibernation image.\n");
error = swsusp_check();
if (error)
goto Unlock;
......@@ -868,6 +871,7 @@ static int software_resume(void)
goto Unlock;
}
pr_info("resume from hibernation\n");
pm_prepare_console();
error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
if (error) {
......@@ -875,7 +879,7 @@ static int software_resume(void)
goto Close_Finish;
}
pr_debug("Preparing processes for restore.\n");
pm_pr_dbg("Preparing processes for restore.\n");
error = freeze_processes();
if (error)
goto Close_Finish;
......@@ -884,11 +888,12 @@ static int software_resume(void)
Finish:
__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
pm_restore_console();
pr_info("resume from hibernation failed (%d)\n", error);
atomic_inc(&snapshot_device_available);
/* For success case, the suspend path will release the lock */
Unlock:
mutex_unlock(&pm_mutex);
pr_debug("Hibernation image not present or could not be loaded.\n");
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
swsusp_close(FMODE_READ);
......@@ -1012,8 +1017,8 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
error = -EINVAL;
if (!error)
pr_debug("Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
pm_pr_dbg("Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
unlock_system_sleep();
return error ? error : n;
}
......
......@@ -150,7 +150,7 @@ static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr
power_attr(mem_sleep);
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_PM_DEBUG
#ifdef CONFIG_PM_SLEEP_DEBUG
int pm_test_level = TEST_NONE;
static const char * const pm_tests[__TEST_AFTER_LAST] = {
......@@ -211,7 +211,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
}
power_attr(pm_test);
#endif /* CONFIG_PM_DEBUG */
#endif /* CONFIG_PM_SLEEP_DEBUG */
#ifdef CONFIG_DEBUG_FS
static char *suspend_step_name(enum suspend_stat_step step)
......@@ -361,6 +361,61 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
power_attr_ro(pm_wakeup_irq);
bool pm_debug_messages_on __read_mostly;
static ssize_t pm_debug_messages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", pm_debug_messages_on);
}
static ssize_t pm_debug_messages_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
pm_debug_messages_on = !!val;
return n;
}
power_attr(pm_debug_messages);
/**
* __pm_pr_dbg - Print a suspend debug message to the kernel log.
* @defer: Whether or not to use printk_deferred() to print the message.
* @fmt: Message format.
*
* The message will be emitted if enabled through the pm_debug_messages
* sysfs attribute.
*/
void __pm_pr_dbg(bool defer, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (!pm_debug_messages_on)
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (defer)
printk_deferred(KERN_DEBUG "PM: %pV", &vaf);
else
printk(KERN_DEBUG "PM: %pV", &vaf);
va_end(args);
}
#else /* !CONFIG_PM_SLEEP_DEBUG */
static inline void pm_print_times_init(void) {}
#endif /* CONFIG_PM_SLEEP_DEBUG */
......@@ -691,12 +746,11 @@ static struct attribute * g[] = {
&wake_lock_attr.attr,
&wake_unlock_attr.attr,
#endif
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
#ifdef CONFIG_PM_SLEEP_DEBUG
&pm_test_attr.attr,
&pm_print_times_attr.attr,
&pm_wakeup_irq_attr.attr,
&pm_debug_messages_attr.attr,
#endif
#endif
#ifdef CONFIG_FREEZER
......
......@@ -192,7 +192,6 @@ extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *);
extern const char * const pm_labels[];
extern const char *pm_states[];
extern const char *mem_sleep_states[];
extern suspend_state_t mem_sleep_current;
extern int suspend_devices_and_enter(suspend_state_t state);
#else /* !CONFIG_SUSPEND */
......@@ -245,7 +244,11 @@ enum {
#define TEST_FIRST TEST_NONE
#define TEST_MAX (__TEST_AFTER_LAST - 1)
#ifdef CONFIG_PM_SLEEP_DEBUG
extern int pm_test_level;
#else
#define pm_test_level (TEST_NONE)
#endif
#ifdef CONFIG_SUSPEND_FREEZER
static inline int suspend_freeze_processes(void)
......
This diff is collapsed.
......@@ -104,9 +104,9 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
printk(info_test, pm_states[state]);
status = pm_suspend(state);
if (status < 0)
state = PM_SUSPEND_FREEZE;
state = PM_SUSPEND_TO_IDLE;
}
if (state == PM_SUSPEND_FREEZE) {
if (state == PM_SUSPEND_TO_IDLE) {
printk(info_test, pm_states[state]);
status = pm_suspend(state);
}
......
......@@ -158,7 +158,7 @@ static void cpuidle_idle_call(void)
}
/*
* Suspend-to-idle ("freeze") is a system state in which all user space
* Suspend-to-idle ("s2idle") is a system state in which all user space
* has been frozen, all I/O devices have been suspended and the only
* activity happens here and in iterrupts (if any). In that case bypass
* the cpuidle governor and go stratight for the deepest idle state
......@@ -167,9 +167,9 @@ static void cpuidle_idle_call(void)
* until a proper wakeup interrupt happens.
*/
if (idle_should_freeze() || dev->use_deepest_state) {
if (idle_should_freeze()) {
entered_state = cpuidle_enter_freeze(drv, dev);
if (idle_should_enter_s2idle() || dev->use_deepest_state) {
if (idle_should_enter_s2idle()) {
entered_state = cpuidle_enter_s2idle(drv, dev);
if (entered_state > 0) {
local_irq_enable();
goto exit_idle;
......
......@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/suspend.h>
#include <linux/time.h>
#include "timekeeping_internal.h"
......@@ -75,7 +76,7 @@ void tk_debug_account_sleep_time(struct timespec64 *t)
int bin = min(fls(t->tv_sec), NUM_BINS-1);
sleep_time_bin[bin]++;
printk_deferred(KERN_INFO "Suspended for %lld.%03lu seconds\n",
(s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
pm_deferred_pr_dbg("Timekeeping suspended for %lld.%03lu seconds\n",
(s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment