Commit c786e405 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'acpi-urgent-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI fixes from Rafael Wysocki:
 "These fix a recent ACPICA regression affecting control method
  execution at the table level and an earlier hibernation regression in
  the ACPI driver for Intel SoCs (LPSS) that was missed by a previous
  fix in this cycle.

  Specifics:

   - Fix a recent ACPICA regression introduced by a previous fix that
     caused control method execution at the table level to be mishandled
     by mistake (Erik Schmauss).

   - Fix a hibernation regression from the 4.15 cycle in the ACPI driver
     for Intel SoCs (LPSS) that caused the platform firmware to be
     confused during resume from hibernation by the driver's PM quirks
     which was fixed for system-wide suspend/resume (ACPI S3) earlier in
     this cycle, but that previous fix missed the hibernation (ACPI S4)
     case (Rafael Wysocki)"

* tag 'acpi-urgent-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPICA: AML Parser: ignore control method status in module-level code
  ACPI / LPSS: Avoid PM quirks on suspend and resume from hibernation
parents f67077de 5f95d39b
......@@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
#define LPSS_GPIODEF0_DMA_LLP BIT(13)
static DEFINE_MUTEX(lpss_iosf_mutex);
static bool lpss_iosf_d3_entered;
static void lpss_iosf_enter_d3_state(void)
{
......@@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void)
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
LPSS_IOSF_GPIODEF0, value1, mask1);
lpss_iosf_d3_entered = true;
exit:
mutex_unlock(&lpss_iosf_mutex);
}
......@@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void)
mutex_lock(&lpss_iosf_mutex);
if (!lpss_iosf_d3_entered)
goto exit;
lpss_iosf_d3_entered = false;
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
LPSS_IOSF_GPIODEF0, value1, mask1);
......@@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void)
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
LPSS_IOSF_PMCSR, value2, mask2);
exit:
mutex_unlock(&lpss_iosf_mutex);
}
static int acpi_lpss_suspend(struct device *dev, bool runtime)
static int acpi_lpss_suspend(struct device *dev, bool wakeup)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
bool wakeup = runtime || device_may_wakeup(dev);
int ret;
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
......@@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime)
* wrong status for devices being about to be powered off. See
* lpss_iosf_enter_d3_state() for further information.
*/
if ((runtime || !pm_suspend_via_firmware()) &&
if (acpi_target_system_state() == ACPI_STATE_S0 &&
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_enter_d3_state();
return ret;
}
static int acpi_lpss_resume(struct device *dev, bool runtime)
static int acpi_lpss_resume(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
......@@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime)
* This call is kept first to be in symmetry with
* acpi_lpss_runtime_suspend() one.
*/
if ((runtime || !pm_resume_via_firmware()) &&
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_exit_d3_state();
ret = acpi_dev_resume(dev);
......@@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
return 0;
ret = pm_generic_suspend_late(dev);
return ret ? ret : acpi_lpss_suspend(dev, false);
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
}
static int acpi_lpss_resume_early(struct device *dev)
{
int ret = acpi_lpss_resume(dev, false);
int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
......@@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
static int acpi_lpss_runtime_resume(struct device *dev)
{
int ret = acpi_lpss_resume(dev, true);
int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_runtime_resume(dev);
}
......
......@@ -709,15 +709,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
} else
if ((walk_state->
parse_flags & ACPI_PARSE_MODULE_LEVEL)
&& status != AE_CTRL_TRANSFER
&& ACPI_FAILURE(status)) {
/*
* ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
* executing it as a control method. However, if we encounter
* an error while loading the table, we need to keep trying to
* load the table rather than aborting the table load. Set the
* status to AE_OK to proceed with the table load. If we get a
* failure at this point, it means that the dispatcher got an
* error while processing Op (most likely an AML operand error.
* ACPI_PARSE_MODULE_LEVEL flag means that we are currently
* loading a table by executing it as a control method.
* However, if we encounter an error while loading the table,
* we need to keep trying to load the table rather than
* aborting the table load (setting the status to AE_OK
* continues the table load). If we get a failure at this
* point, it means that the dispatcher got an error while
* processing Op (most likely an AML operand error) or a
* control method was called from module level and the
* dispatcher returned AE_CTRL_TRANSFER. In the latter case,
* leave the status alone, there's nothing wrong with it.
*/
status = AE_OK;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment