Commit 2a893f91 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm+acpi-for-3.8-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI and power management fixes from Rafael Wysocki:

 - Removal of some ACPICA code that the kernel will never use from Lv
   Zheng.

 - APEI fix from Adrian Huang.

 - Removal of unnecessary ACPI memory hotplug driver code from Liu
   Jinsong.

 - Minor ACPI power management fixes.

 - ACPI debug code fix from Joe Perches.

 - ACPI fix to make system bus device nodes get the right names.

 - PNP resources handling fixes from Witold Szczeponik.

 - cpuidle fix for a recent regression stalling boot on systems with
   great numbers of CPUs from Daniel Lezcano.

 - cpuidle fixes from Sivaram Nair.

 - intel_idle debug message fix from Youquan Song.

 - cpufreq build regression fix from Larry Finger.

 - cpufreq fix for an obscure initialization race related to statistics
   from Konstantin Khlebnikov.

 - cpufreq change disabling the Longhaul driver by default from Rafał
   Bilski.

 - PM core fix preventing device suspend errors from happening during
   system suspend due to obscure race conditions.

 - PM QoS local variable name cleanup.

* tag 'pm+acpi-for-3.8-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM: Move disabling/enabling runtime PM to late suspend/early resume
  PM / QoS: Rename local variable in dev_pm_qos_add_ancestor_request()
  ACPI / scan: Do not use dummy HID for system bus ACPI nodes
  cpufreq / governor: Fix problem with cpufreq_ondemand or cpufreq_conservative
  cpufreq / Longhaul: Disable driver by default
  cpufreq / stats: fix race between stats allocation and first usage
  cpuidle: fix lock contention in the idle path
  intel_idle: pr_debug information need separated
  cpuidle / coupled: fix ready counter decrement
  cpuidle: Fix finding state with min power_usage
  PNP: Handle IORESOURCE_BITS in resource allocation
  PNP: Simplify setting of resources
  ACPI / power: Remove useless message from device registering routine
  ACPI / glue: Update DBG macro to include KERN_DEBUG
  ACPI / PM: Do not apply ACPI_SUCCESS() to acpi_bus_get_device() result
  ACPI / memhotplug: remove redundant logic of acpi memory hotadd
  ACPI / APEI: Fix the returned value in erst_dbg_read
  ACPICA: Remove useless mini-C library.
parents 127aa930 f67ffa95
......@@ -642,12 +642,13 @@ out the following operations:
* During system suspend it calls pm_runtime_get_noresume() and
pm_runtime_barrier() for every device right before executing the
subsystem-level .suspend() callback for it. In addition to that it calls
pm_runtime_disable() for every device right after executing the
subsystem-level .suspend() callback for it.
__pm_runtime_disable() with 'false' as the second argument for every device
right before executing the subsystem-level .suspend_late() callback for it.
* During system resume it calls pm_runtime_enable() and pm_runtime_put_sync()
for every device right before and right after executing the subsystem-level
.resume() callback for it, respectively.
for every device right after executing the subsystem-level .resume_early()
callback and right after executing the subsystem-level .resume() callback
for it, respectively.
7. Generic subsystem callbacks
......
......@@ -226,16 +226,6 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
struct acpi_memory_info *info;
int node;
/* Get the range from the _CRS */
result = acpi_memory_get_device_resources(mem_device);
if (result) {
dev_err(&mem_device->device->dev,
"get_device_resources failed\n");
mem_device->state = MEMORY_INVALID_STATE;
return result;
}
node = acpi_get_node(mem_device->device->handle);
/*
* Tell the VM there is more memory here...
......@@ -342,14 +332,6 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
break;
}
if (acpi_memory_check_device(mem_device))
break;
if (acpi_memory_enable_device(mem_device)) {
acpi_handle_err(handle,"Cannot enable memory device\n");
break;
}
ost_code = ACPI_OST_SC_SUCCESS;
break;
......
......@@ -162,5 +162,5 @@ acpi-y += \
utxferror.o \
utxfmutex.o
acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o utclib.o
acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o
This diff is collapsed.
......@@ -111,8 +111,17 @@ static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf,
if (rc)
goto out;
/* no more record */
if (id == APEI_ERST_INVALID_RECORD_ID)
if (id == APEI_ERST_INVALID_RECORD_ID) {
/*
* If the persistent store is empty initially, the function
* 'erst_read' below will return "-ENOENT" value. This causes
* 'retry_next' label is entered again. The returned value
* should be zero indicating the read operation is EOF.
*/
len = 0;
goto out;
}
retry:
rc = len = erst_read(id, erst_dbg_buf, erst_dbg_buf_len);
/* The record may be cleared by others, try read next record */
......
......@@ -358,8 +358,7 @@ static struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
struct acpi_device *adev;
return handle && ACPI_SUCCESS(acpi_bus_get_device(handle, &adev)) ?
adev : NULL;
return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL;
}
/**
......
......@@ -18,9 +18,14 @@
#define ACPI_GLUE_DEBUG 0
#if ACPI_GLUE_DEBUG
#define DBG(x...) printk(PREFIX x)
#define DBG(fmt, ...) \
printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__)
#else
#define DBG(x...) do { } while(0)
#define DBG(fmt, ...) \
do { \
if (0) \
printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__); \
} while (0)
#endif
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
......
......@@ -445,11 +445,8 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
return -ENODEV;
ret = acpi_bus_get_device(handle, &acpi_dev);
if (ret)
goto no_power_resource;
if (!acpi_dev->power.flags.power_resources)
goto no_power_resource;
if (ret || !acpi_dev->power.flags.power_resources)
return -ENODEV;
powered_device = kzalloc(sizeof(*powered_device), GFP_KERNEL);
if (!powered_device)
......@@ -471,10 +468,6 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
}
return ret;
no_power_resource:
printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!\n");
return -ENODEV;
}
EXPORT_SYMBOL_GPL(acpi_power_resource_register_device);
......
......@@ -1346,7 +1346,7 @@ static void acpi_device_set_id(struct acpi_device *device)
acpi_add_id(device, ACPI_DOCK_HID);
else if (!acpi_ibm_smbus_match(device))
acpi_add_id(device, ACPI_SMBUS_IBM_HID);
else if (!acpi_device_hid(device) &&
else if (list_empty(&device->pnp.ids) &&
ACPI_IS_ROOT_DEVICE(device->parent)) {
acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
......
......@@ -513,6 +513,8 @@ static int device_resume_early(struct device *dev, pm_message_t state)
Out:
TRACE_RESUME(error);
pm_runtime_enable(dev);
return error;
}
......@@ -589,8 +591,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
if (!dev->power.is_suspended)
goto Unlock;
pm_runtime_enable(dev);
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
......@@ -930,6 +930,8 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
pm_callback_t callback = NULL;
char *info = NULL;
__pm_runtime_disable(dev, false);
if (dev->power.syscore)
return 0;
......@@ -1133,11 +1135,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
Complete:
complete_all(&dev->power.completion);
if (error)
async_error = error;
else if (dev->power.is_suspended)
__pm_runtime_disable(dev, false);
return error;
}
......
......@@ -542,19 +542,19 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
struct dev_pm_qos_request *req, s32 value)
{
struct device *ancestor = dev->parent;
int error = -ENODEV;
int ret = -ENODEV;
while (ancestor && !ancestor->power.ignore_children)
ancestor = ancestor->parent;
if (ancestor)
error = dev_pm_qos_add_request(ancestor, req,
ret = dev_pm_qos_add_request(ancestor, req,
DEV_PM_QOS_LATENCY, value);
if (error < 0)
if (ret < 0)
req->dev = NULL;
return error;
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
......
......@@ -20,6 +20,9 @@ if CPU_FREQ
config CPU_FREQ_TABLE
tristate
config CPU_FREQ_GOV_COMMON
bool
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
select CPU_FREQ_TABLE
......@@ -141,6 +144,7 @@ config CPU_FREQ_GOV_USERSPACE
config CPU_FREQ_GOV_ONDEMAND
tristate "'ondemand' cpufreq policy governor"
select CPU_FREQ_TABLE
select CPU_FREQ_GOV_COMMON
help
'ondemand' - This driver adds a dynamic cpufreq policy governor.
The governor does a periodic polling and
......@@ -159,6 +163,7 @@ config CPU_FREQ_GOV_ONDEMAND
config CPU_FREQ_GOV_CONSERVATIVE
tristate "'conservative' cpufreq governor"
depends on CPU_FREQ
select CPU_FREQ_GOV_COMMON
help
'conservative' - this driver is rather similar to the 'ondemand'
governor both in its source code and its purpose, the difference is
......
......@@ -7,8 +7,9 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o cpufreq_governor.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o cpufreq_governor.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
......
......@@ -364,18 +364,21 @@ static int __init cpufreq_stats_init(void)
if (ret)
return ret;
register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu)
cpufreq_update_policy(cpu);
ret = cpufreq_register_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
cpufreq_unregister_notifier(&notifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu)
cpufreq_stats_free_table(cpu);
return ret;
}
register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu) {
cpufreq_update_policy(cpu);
}
return 0;
}
static void __exit cpufreq_stats_exit(void)
......
......@@ -77,7 +77,7 @@ static unsigned int longhaul_index;
static int scale_voltage;
static int disable_acpi_c3;
static int revid_errata;
static int enable;
/* Clock ratios multiplied by 10 */
static int mults[32];
......@@ -965,6 +965,10 @@ static int __init longhaul_init(void)
if (!x86_match_cpu(longhaul_id))
return -ENODEV;
if (!enable) {
printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
return -ENODEV;
}
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
printk(KERN_ERR PFX "More than 1 CPU detected, "
......@@ -1021,6 +1025,10 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
* such. */
module_param(revid_errata, int, 0644);
MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
/* By default driver is disabled to prevent incompatible
* system freeze. */
module_param(enable, int, 0644);
MODULE_PARM_DESC(enable, "Enable driver");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
......
......@@ -209,7 +209,7 @@ inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
int all;
int ret;
all = coupled->online_count || (coupled->online_count << WAITING_BITS);
all = coupled->online_count | (coupled->online_count << WAITING_BITS);
ret = atomic_add_unless(&coupled->ready_waiting_counts,
-MAX_WAITING_CPUS, all);
......
......@@ -70,7 +70,7 @@ int cpuidle_play_dead(void)
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int i, dead_state = -1;
int power_usage = -1;
int power_usage = INT_MAX;
if (!drv)
return -ENODEV;
......
......@@ -235,16 +235,10 @@ EXPORT_SYMBOL_GPL(cpuidle_get_driver);
*/
struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
{
struct cpuidle_driver *drv;
if (!dev)
return NULL;
spin_lock(&cpuidle_driver_lock);
drv = __cpuidle_get_cpu_driver(dev->cpu);
spin_unlock(&cpuidle_driver_lock);
return drv;
return __cpuidle_get_cpu_driver(dev->cpu);
}
EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
......
......@@ -312,7 +312,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int power_usage = -1;
int power_usage = INT_MAX;
int i;
int multiplier;
struct timespec t;
......
......@@ -506,7 +506,7 @@ static int intel_idle_cpuidle_driver_init(void)
if (*cpuidle_state_table[cstate].name == '\0')
pr_debug(PREFIX "unaware of model 0x%x"
" MWAIT %d please"
" contact lenb@kernel.org",
" contact lenb@kernel.org\n",
boot_cpu_data.x86_model, cstate);
continue;
}
......
......@@ -298,6 +298,39 @@ static ssize_t pnp_show_current_resources(struct device *dmdev,
return ret;
}
static char *pnp_get_resource_value(char *buf,
unsigned long type,
resource_size_t *start,
resource_size_t *end,
unsigned long *flags)
{
if (start)
*start = 0;
if (end)
*end = 0;
if (flags)
*flags = 0;
/* TBD: allow for disabled resources */
buf = skip_spaces(buf);
if (start) {
*start = simple_strtoull(buf, &buf, 0);
if (end) {
buf = skip_spaces(buf);
if (*buf == '-') {
buf = skip_spaces(buf + 1);
*end = simple_strtoull(buf, &buf, 0);
} else
*end = *start;
}
}
/* TBD: allow for additional flags, e.g., IORESOURCE_WINDOW */
return buf;
}
static ssize_t pnp_set_current_resources(struct device *dmdev,
struct device_attribute *attr,
const char *ubuf, size_t count)
......@@ -305,7 +338,6 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
struct pnp_dev *dev = to_pnp_dev(dmdev);
char *buf = (void *)ubuf;
int retval = 0;
resource_size_t start, end;
if (dev->status & PNP_ATTACHED) {
retval = -EBUSY;
......@@ -349,6 +381,10 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
goto done;
}
if (!strnicmp(buf, "set", 3)) {
resource_size_t start;
resource_size_t end;
unsigned long flags;
if (dev->active)
goto done;
buf += 3;
......@@ -357,41 +393,36 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
while (1) {
buf = skip_spaces(buf);
if (!strnicmp(buf, "io", 2)) {
buf = skip_spaces(buf + 2);
start = simple_strtoul(buf, &buf, 0);
buf = skip_spaces(buf);
if (*buf == '-') {
buf = skip_spaces(buf + 1);
end = simple_strtoul(buf, &buf, 0);
buf = pnp_get_resource_value(buf + 2,
IORESOURCE_IO,
&start, &end,
&flags);
pnp_add_io_resource(dev, start, end, flags);
} else if (!strnicmp(buf, "mem", 3)) {
buf = pnp_get_resource_value(buf + 3,
IORESOURCE_MEM,
&start, &end,
&flags);
pnp_add_mem_resource(dev, start, end, flags);
} else if (!strnicmp(buf, "irq", 3)) {
buf = pnp_get_resource_value(buf + 3,
IORESOURCE_IRQ,
&start, NULL,
&flags);
pnp_add_irq_resource(dev, start, flags);
} else if (!strnicmp(buf, "dma", 3)) {
buf = pnp_get_resource_value(buf + 3,
IORESOURCE_DMA,
&start, NULL,
&flags);
pnp_add_dma_resource(dev, start, flags);
} else if (!strnicmp(buf, "bus", 3)) {
buf = pnp_get_resource_value(buf + 3,
IORESOURCE_BUS,
&start, &end,
NULL);
pnp_add_bus_resource(dev, start, end);
} else
end = start;
pnp_add_io_resource(dev, start, end, 0);
continue;
}
if (!strnicmp(buf, "mem", 3)) {
buf = skip_spaces(buf + 3);
start = simple_strtoul(buf, &buf, 0);
buf = skip_spaces(buf);
if (*buf == '-') {
buf = skip_spaces(buf + 1);
end = simple_strtoul(buf, &buf, 0);
} else
end = start;
pnp_add_mem_resource(dev, start, end, 0);
continue;
}
if (!strnicmp(buf, "irq", 3)) {
buf = skip_spaces(buf + 3);
start = simple_strtoul(buf, &buf, 0);
pnp_add_irq_resource(dev, start, 0);
continue;
}
if (!strnicmp(buf, "dma", 3)) {
buf = skip_spaces(buf + 3);
start = simple_strtoul(buf, &buf, 0);
pnp_add_dma_resource(dev, start, 0);
continue;
}
break;
}
mutex_unlock(&pnp_res_mutex);
......
......@@ -18,11 +18,27 @@
DEFINE_MUTEX(pnp_res_mutex);
static struct resource *pnp_find_resource(struct pnp_dev *dev,
unsigned char rule,
unsigned long type,
unsigned int bar)
{
struct resource *res = pnp_get_resource(dev, type, bar);
/* when the resource already exists, set its resource bits from rule */
if (res) {
res->flags &= ~IORESOURCE_BITS;
res->flags |= rule & IORESOURCE_BITS;
}
return res;
}
static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
{
struct resource *res, local_res;
res = pnp_get_resource(dev, IORESOURCE_IO, idx);
res = pnp_find_resource(dev, rule->flags, IORESOURCE_IO, idx);
if (res) {
pnp_dbg(&dev->dev, " io %d already set to %#llx-%#llx "
"flags %#lx\n", idx, (unsigned long long) res->start,
......@@ -65,7 +81,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
{
struct resource *res, local_res;
res = pnp_get_resource(dev, IORESOURCE_MEM, idx);
res = pnp_find_resource(dev, rule->flags, IORESOURCE_MEM, idx);
if (res) {
pnp_dbg(&dev->dev, " mem %d already set to %#llx-%#llx "
"flags %#lx\n", idx, (unsigned long long) res->start,
......@@ -78,6 +94,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
res->start = 0;
res->end = 0;
/* ??? rule->flags restricted to 8 bits, all tests bogus ??? */
if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
res->flags |= IORESOURCE_READONLY;
if (rule->flags & IORESOURCE_MEM_CACHEABLE)
......@@ -123,7 +140,7 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
};
res = pnp_get_resource(dev, IORESOURCE_IRQ, idx);
res = pnp_find_resource(dev, rule->flags, IORESOURCE_IRQ, idx);
if (res) {
pnp_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n",
idx, (int) res->start, res->flags);
......@@ -182,7 +199,7 @@ static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
1, 3, 5, 6, 7, 0, 2, 4
};
res = pnp_get_resource(dev, IORESOURCE_DMA, idx);
res = pnp_find_resource(dev, rule->flags, IORESOURCE_DMA, idx);
if (res) {
pnp_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n",
idx, (int) res->start, res->flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment