Commit ef5f5de0 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'acpi-pm'

* acpi-pm:
  ACPI / bus: Move duplicate code to a separate new function
  mfd: Add support for Intel Sunrisepoint LPSS devices
  dmaengine: add a driver for Intel integrated DMA 64-bit
  mfd: make mfd_remove_devices() iterate in reverse order
  driver core: implement device_for_each_child_reverse()
  klist: implement klist_prev()
  Driver core: wakeup the parent device before trying probe
  ACPI / PM: Attach ACPI power domain only once
  PM / QoS: Make it possible to expose device latency tolerance to userspace
  ACPI / PM: Update the copyright notice and description of power.c
parents 73990fc8 e91a398c
......@@ -482,6 +482,43 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
Device Matching
-------------------------------------------------------------------------- */
static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
const struct device *dev)
{
struct mutex *physical_node_lock = &adev->physical_node_lock;
mutex_lock(physical_node_lock);
if (list_empty(&adev->physical_node_list)) {
adev = NULL;
} else {
const struct acpi_device_physical_node *node;
node = list_first_entry(&adev->physical_node_list,
struct acpi_device_physical_node, node);
if (node->dev != dev)
adev = NULL;
}
mutex_unlock(physical_node_lock);
return adev;
}
/**
* acpi_device_is_first_physical_node - Is given dev first physical node
* @adev: ACPI companion device
* @dev: Physical device to check
*
* Function checks if given @dev is the first physical devices attached to
* the ACPI companion device. This distinction is needed in some cases
* where the same companion device is shared between many physical devices.
*
* Note that the caller have to provide valid @adev pointer.
*/
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev)
{
return !!acpi_primary_dev_companion(adev, dev);
}
/*
* acpi_companion_match() - Can we match via ACPI companion device
* @dev: Device in question
......@@ -506,7 +543,6 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
struct acpi_device *acpi_companion_match(const struct device *dev)
{
struct acpi_device *adev;
struct mutex *physical_node_lock;
adev = ACPI_COMPANION(dev);
if (!adev)
......@@ -515,21 +551,7 @@ struct acpi_device *acpi_companion_match(const struct device *dev)
if (list_empty(&adev->pnp.ids))
return NULL;
physical_node_lock = &adev->physical_node_lock;
mutex_lock(physical_node_lock);
if (list_empty(&adev->physical_node_list)) {
adev = NULL;
} else {
const struct acpi_device_physical_node *node;
node = list_first_entry(&adev->physical_node_list,
struct acpi_device_physical_node, node);
if (node->dev != dev)
adev = NULL;
}
mutex_unlock(physical_node_lock);
return adev;
return acpi_primary_dev_companion(adev, dev);
}
/**
......
......@@ -1119,6 +1119,14 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
if (dev->pm_domain)
return -EEXIST;
/*
* Only attach the power domain to the first device if the
* companion is shared by multiple. This is to prevent doing power
* management twice.
*/
if (!acpi_device_is_first_physical_node(adev, dev))
return -EBUSY;
acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
dev->pm_domain = &acpi_general_pm_domain;
if (power_on) {
......
......@@ -96,6 +96,8 @@ void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
bool acpi_device_is_present(struct acpi_device *adev);
bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev);
/* --------------------------------------------------------------------------
Device Matching and Notification
......
/*
* acpi_power.c - ACPI Bus Power Management ($Revision: 39 $)
* drivers/acpi/power.c - ACPI Power Resources management.
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 - 2015 Intel Corp.
* Author: Andy Grover <andrew.grover@intel.com>
* Author: Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
......@@ -23,10 +25,11 @@
* ACPI power-managed devices may be controlled in two ways:
* 1. via "Device Specific (D-State) Control"
* 2. via "Power Resource Control".
* This module is used to manage devices relying on Power Resource Control.
* The code below deals with ACPI Power Resources control.
*
* An ACPI "power resource object" represents a software controllable power
* plane, clock plane, or other resource depended on by a device.
*
* An ACPI "power resource object" describes a software controllable power
* plane, clock plane, or other resource used by a power managed device.
* A device may rely on multiple power resources, and a power resource
* may be shared by multiple devices.
*/
......
......@@ -115,7 +115,6 @@ int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
return 0;
}
bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
{
struct acpi_device_physical_node *pn;
......
......@@ -1252,6 +1252,19 @@ void device_unregister(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_unregister);
static struct device *prev_device(struct klist_iter *i)
{
struct klist_node *n = klist_prev(i);
struct device *dev = NULL;
struct device_private *p;
if (n) {
p = to_device_private_parent(n);
dev = p->device;
}
return dev;
}
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
......@@ -1340,6 +1353,36 @@ int device_for_each_child(struct device *parent, void *data,
}
EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_for_each_child_reverse - device child iterator in reversed order.
* @parent: parent struct device.
* @fn: function to be called for each device.
* @data: data for the callback.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
*
* We check the return of @fn each time. If it returns anything
* other than 0, we break out and return that value.
*/
int device_for_each_child_reverse(struct device *parent, void *data,
int (*fn)(struct device *dev, void *data))
{
struct klist_iter i;
struct device *child;
int error = 0;
if (!parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
while ((child = prev_device(&i)) && !error)
error = fn(child, data);
klist_iter_exit(&i);
return error;
}
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
......
......@@ -399,6 +399,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe);
*
* This function must be called with @dev lock held. When called for a
* USB interface, @dev->parent lock must be held as well.
*
* If the device has a parent, runtime-resume the parent before driver probing.
*/
int driver_probe_device(struct device_driver *drv, struct device *dev)
{
......@@ -410,10 +412,16 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
pm_runtime_barrier(dev);
ret = really_probe(dev, drv);
pm_request_idle(dev);
if (dev->parent)
pm_runtime_put(dev->parent);
return ret;
}
......@@ -507,11 +515,17 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
device_lock(dev);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
dev_dbg(dev, "async probe completed\n");
pm_request_idle(dev);
if (dev->parent)
pm_runtime_put(dev->parent);
device_unlock(dev);
put_device(dev);
......@@ -541,6 +555,9 @@ static int __device_attach(struct device *dev, bool allow_async)
.want_async = false,
};
if (dev->parent)
pm_runtime_get_sync(dev->parent);
ret = bus_for_each_drv(dev->bus, NULL, &data,
__device_attach_driver);
if (!ret && allow_async && data.have_async) {
......@@ -557,6 +574,9 @@ static int __device_attach(struct device *dev, bool allow_async)
} else {
pm_request_idle(dev);
}
if (dev->parent)
pm_runtime_put(dev->parent);
}
out_unlock:
device_unlock(dev);
......
......@@ -73,6 +73,8 @@ extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
extern int pm_qos_sysfs_add_flags(struct device *dev);
extern void pm_qos_sysfs_remove_flags(struct device *dev);
extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
#else /* CONFIG_PM */
......
......@@ -883,3 +883,40 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
/**
* dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
* @dev: Device whose latency tolerance to expose
*/
int dev_pm_qos_expose_latency_tolerance(struct device *dev)
{
int ret;
if (!dev->power.set_latency_tolerance)
return -EINVAL;
mutex_lock(&dev_pm_qos_sysfs_mtx);
ret = pm_qos_sysfs_add_latency_tolerance(dev);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
/**
* dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
* @dev: Device whose latency tolerance to hide
*/
void dev_pm_qos_hide_latency_tolerance(struct device *dev)
{
mutex_lock(&dev_pm_qos_sysfs_mtx);
pm_qos_sysfs_remove_latency_tolerance(dev);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
/* Remove the request from user space now */
pm_runtime_get_sync(dev);
dev_pm_qos_update_user_latency_tolerance(dev,
PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
......@@ -738,6 +738,17 @@ void pm_qos_sysfs_remove_flags(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
{
return sysfs_merge_group(&dev->kobj,
&pm_qos_latency_tolerance_attr_group);
}
void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
}
void rpm_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
......
......@@ -85,6 +85,14 @@ config INTEL_IOP_ADMA
help
Enable support for the Intel(R) IOP Series RAID engines.
config IDMA64
tristate "Intel integrated DMA 64-bit support"
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Enable DMA support for Intel Low Power Subsystem such as found on
Intel Skylake PCH.
source "drivers/dma/dw/Kconfig"
config AT_HDMAC
......
......@@ -14,6 +14,7 @@ obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_IDMA64) += idma64.o
obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
......
This diff is collapsed.
/*
* Driver for the Intel integrated DMA 64-bit
*
* Copyright (C) 2015 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __DMA_IDMA64_H__
#define __DMA_IDMA64_H__
#include <linux/device.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include "virt-dma.h"
/* Channel registers */
#define IDMA64_CH_SAR 0x00 /* Source Address Register */
#define IDMA64_CH_DAR 0x08 /* Destination Address Register */
#define IDMA64_CH_LLP 0x10 /* Linked List Pointer */
#define IDMA64_CH_CTL_LO 0x18 /* Control Register Low */
#define IDMA64_CH_CTL_HI 0x1c /* Control Register High */
#define IDMA64_CH_SSTAT 0x20
#define IDMA64_CH_DSTAT 0x28
#define IDMA64_CH_SSTATAR 0x30
#define IDMA64_CH_DSTATAR 0x38
#define IDMA64_CH_CFG_LO 0x40 /* Configuration Register Low */
#define IDMA64_CH_CFG_HI 0x44 /* Configuration Register High */
#define IDMA64_CH_SGR 0x48
#define IDMA64_CH_DSR 0x50
#define IDMA64_CH_LENGTH 0x58
/* Bitfields in CTL_LO */
#define IDMA64C_CTLL_INT_EN (1 << 0) /* irqs enabled? */
#define IDMA64C_CTLL_DST_WIDTH(x) ((x) << 1) /* bytes per element */
#define IDMA64C_CTLL_SRC_WIDTH(x) ((x) << 4)
#define IDMA64C_CTLL_DST_INC (0 << 8) /* DAR update/not */
#define IDMA64C_CTLL_DST_FIX (1 << 8)
#define IDMA64C_CTLL_SRC_INC (0 << 10) /* SAR update/not */
#define IDMA64C_CTLL_SRC_FIX (1 << 10)
#define IDMA64C_CTLL_DST_MSIZE(x) ((x) << 11) /* burst, #elements */
#define IDMA64C_CTLL_SRC_MSIZE(x) ((x) << 14)
#define IDMA64C_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
#define IDMA64C_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
#define IDMA64C_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
#define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */
/* Bitfields in CTL_HI */
#define IDMA64C_CTLH_BLOCK_TS(x) ((x) & ((1 << 17) - 1))
#define IDMA64C_CTLH_DONE (1 << 17)
/* Bitfields in CFG_LO */
#define IDMA64C_CFGL_DST_BURST_ALIGN (1 << 0) /* dst burst align */
#define IDMA64C_CFGL_SRC_BURST_ALIGN (1 << 1) /* src burst align */
#define IDMA64C_CFGL_CH_SUSP (1 << 8)
#define IDMA64C_CFGL_FIFO_EMPTY (1 << 9)
#define IDMA64C_CFGL_CH_DRAIN (1 << 10) /* drain FIFO */
#define IDMA64C_CFGL_DST_OPT_BL (1 << 20) /* optimize dst burst length */
#define IDMA64C_CFGL_SRC_OPT_BL (1 << 21) /* optimize src burst length */
/* Bitfields in CFG_HI */
#define IDMA64C_CFGH_SRC_PER(x) ((x) << 0) /* src peripheral */
#define IDMA64C_CFGH_DST_PER(x) ((x) << 4) /* dst peripheral */
#define IDMA64C_CFGH_RD_ISSUE_THD(x) ((x) << 8)
#define IDMA64C_CFGH_RW_ISSUE_THD(x) ((x) << 18)
/* Interrupt registers */
#define IDMA64_INT_XFER 0x00
#define IDMA64_INT_BLOCK 0x08
#define IDMA64_INT_SRC_TRAN 0x10
#define IDMA64_INT_DST_TRAN 0x18
#define IDMA64_INT_ERROR 0x20
#define IDMA64_RAW(x) (0x2c0 + IDMA64_INT_##x) /* r */
#define IDMA64_STATUS(x) (0x2e8 + IDMA64_INT_##x) /* r (raw & mask) */
#define IDMA64_MASK(x) (0x310 + IDMA64_INT_##x) /* rw (set = irq enabled) */
#define IDMA64_CLEAR(x) (0x338 + IDMA64_INT_##x) /* w (ack, affects "raw") */
/* Common registers */
#define IDMA64_STATUS_INT 0x360 /* r */
#define IDMA64_CFG 0x398
#define IDMA64_CH_EN 0x3a0
/* Bitfields in CFG */
#define IDMA64_CFG_DMA_EN (1 << 0)
/* Hardware descriptor for Linked LIst transfers */
struct idma64_lli {
u64 sar;
u64 dar;
u64 llp;
u32 ctllo;
u32 ctlhi;
u32 sstat;
u32 dstat;
};
struct idma64_hw_desc {
struct idma64_lli *lli;
dma_addr_t llp;
dma_addr_t phys;
unsigned int len;
};
struct idma64_desc {
struct virt_dma_desc vdesc;
enum dma_transfer_direction direction;
struct idma64_hw_desc *hw;
unsigned int ndesc;
size_t length;
enum dma_status status;
};
static inline struct idma64_desc *to_idma64_desc(struct virt_dma_desc *vdesc)
{
return container_of(vdesc, struct idma64_desc, vdesc);
}
struct idma64_chan {
struct virt_dma_chan vchan;
void __iomem *regs;
/* hardware configuration */
enum dma_transfer_direction direction;
unsigned int mask;
struct dma_slave_config config;
void *pool;
struct idma64_desc *desc;
};
static inline struct idma64_chan *to_idma64_chan(struct dma_chan *chan)
{
return container_of(chan, struct idma64_chan, vchan.chan);
}
#define channel_set_bit(idma64, reg, mask) \
dma_writel(idma64, reg, ((mask) << 8) | (mask))
#define channel_clear_bit(idma64, reg, mask) \
dma_writel(idma64, reg, ((mask) << 8) | 0)
static inline u32 idma64c_readl(struct idma64_chan *idma64c, int offset)
{
return readl(idma64c->regs + offset);
}
static inline void idma64c_writel(struct idma64_chan *idma64c, int offset,
u32 value)
{
writel(value, idma64c->regs + offset);
}
#define channel_readl(idma64c, reg) \
idma64c_readl(idma64c, IDMA64_CH_##reg)
#define channel_writel(idma64c, reg, value) \
idma64c_writel(idma64c, IDMA64_CH_##reg, (value))
static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset)
{
u64 l, h;
l = idma64c_readl(idma64c, offset);
h = idma64c_readl(idma64c, offset + 4);
return l | (h << 32);
}
static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset,
u64 value)
{
idma64c_writel(idma64c, offset, value);
idma64c_writel(idma64c, offset + 4, value >> 32);
}
#define channel_readq(idma64c, reg) \
idma64c_readq(idma64c, IDMA64_CH_##reg)
#define channel_writeq(idma64c, reg, value) \
idma64c_writeq(idma64c, IDMA64_CH_##reg, (value))
struct idma64 {
struct dma_device dma;
void __iomem *regs;
/* channels */
unsigned short all_chan_mask;
struct idma64_chan *chan;
};
static inline struct idma64 *to_idma64(struct dma_device *ddev)
{
return container_of(ddev, struct idma64, dma);
}
static inline u32 idma64_readl(struct idma64 *idma64, int offset)
{
return readl(idma64->regs + offset);
}
static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
{
writel(value, idma64->regs + offset);
}
#define dma_readl(idma64, reg) \
idma64_readl(idma64, IDMA64_##reg)
#define dma_writel(idma64, reg, value) \
idma64_writel(idma64, IDMA64_##reg, (value))
/**
* struct idma64_chip - representation of DesignWare DMA controller hardware
* @dev: struct device of the DMA controller
* @irq: irq line
* @regs: memory mapped I/O space
* @idma64: struct idma64 that is filed by idma64_probe()
*/
struct idma64_chip {
struct device *dev;
int irq;
void __iomem *regs;
struct idma64 *idma64;
};
#endif /* __DMA_IDMA64_H__ */
......@@ -328,6 +328,29 @@ config INTEL_SOC_PMIC
thermal, charger and related power management functions
on these systems.
config MFD_INTEL_LPSS
tristate
select COMMON_CLK
select MFD_CORE
config MFD_INTEL_LPSS_ACPI
tristate "Intel Low Power Subsystem support in ACPI mode"
select MFD_INTEL_LPSS
depends on X86 && ACPI
help
This driver supports Intel Low Power Subsystem (LPSS) devices such as
I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
PCH) in ACPI mode.
config MFD_INTEL_LPSS_PCI
tristate "Intel Low Power Subsystem support in PCI mode"
select MFD_INTEL_LPSS
depends on X86 && PCI
help
This driver supports Intel Low Power Subsystem (LPSS) devices such as
I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
PCH) in PCI mode.
config MFD_INTEL_MSIC
bool "Intel MSIC"
depends on INTEL_SCU_IPC
......
......@@ -161,6 +161,9 @@ obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_TPS65090) += tps65090.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
obj-$(CONFIG_MFD_ATMEL_HLCDC) += atmel-hlcdc.o
obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o
obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o
obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o
obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
......
/*
* Intel LPSS ACPI support.
*
* Copyright (C) 2015, Intel Corporation
*
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/acpi.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include "intel-lpss.h"
static const struct intel_lpss_platform_info spt_info = {
.clk_rate = 120000000,
};
static const struct acpi_device_id intel_lpss_acpi_ids[] = {
/* SPT */
{ "INT3446", (kernel_ulong_t)&spt_info },
{ "INT3447", (kernel_ulong_t)&spt_info },
{ }
};
MODULE_DEVICE_TABLE(acpi, intel_lpss_acpi_ids);
static int intel_lpss_acpi_probe(struct platform_device *pdev)
{
struct intel_lpss_platform_info *info;
const struct acpi_device_id *id;
id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev);
if (!id)
return -ENODEV;
info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
info->irq = platform_get_irq(pdev, 0);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
return intel_lpss_probe(&pdev->dev, info);
}
static int intel_lpss_acpi_remove(struct platform_device *pdev)
{
intel_lpss_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
static INTEL_LPSS_PM_OPS(intel_lpss_acpi_pm_ops);
static struct platform_driver intel_lpss_acpi_driver = {
.probe = intel_lpss_acpi_probe,
.remove = intel_lpss_acpi_remove,
.driver = {
.name = "intel-lpss",
.acpi_match_table = intel_lpss_acpi_ids,
.pm = &intel_lpss_acpi_pm_ops,
},
};
module_platform_driver(intel_lpss_acpi_driver);
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("Intel LPSS ACPI driver");
MODULE_LICENSE("GPL v2");
/*
* Intel LPSS PCI support.
*
* Copyright (C) 2015, Intel Corporation
*
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include "intel-lpss.h"
static int intel_lpss_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct intel_lpss_platform_info *info;
int ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->mem = &pdev->resource[0];
info->irq = pdev->irq;
/* Probably it is enough to set this for iDMA capable devices only */
pci_set_master(pdev);
ret = intel_lpss_probe(&pdev->dev, info);
if (ret)
return ret;
pm_runtime_put(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
}
static void intel_lpss_pci_remove(struct pci_dev *pdev)
{
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
intel_lpss_remove(&pdev->dev);
}
static INTEL_LPSS_PM_OPS(intel_lpss_pci_pm_ops);
static const struct intel_lpss_platform_info spt_info = {
.clk_rate = 120000000,
};
static const struct intel_lpss_platform_info spt_uart_info = {
.clk_rate = 120000000,
.clk_con_id = "baudclk",
};
static const struct pci_device_id intel_lpss_pci_ids[] = {
/* SPT-LP */
{ PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9d61), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9d62), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9d63), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info },
/* SPT-H */
{ PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
static struct pci_driver intel_lpss_pci_driver = {
.name = "intel-lpss",
.id_table = intel_lpss_pci_ids,
.probe = intel_lpss_pci_probe,
.remove = intel_lpss_pci_remove,
.driver = {
.pm = &intel_lpss_pci_pm_ops,
},
};
module_pci_driver(intel_lpss_pci_driver);
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("Intel LPSS PCI driver");
MODULE_LICENSE("GPL v2");
This diff is collapsed.
/*
* Intel LPSS core support.
*
* Copyright (C) 2015, Intel Corporation
*
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __MFD_INTEL_LPSS_H
#define __MFD_INTEL_LPSS_H
struct device;
struct resource;
struct intel_lpss_platform_info {
struct resource *mem;
int irq;
unsigned long clk_rate;
const char *clk_con_id;
};
int intel_lpss_probe(struct device *dev,
const struct intel_lpss_platform_info *info);
void intel_lpss_remove(struct device *dev);
#ifdef CONFIG_PM
int intel_lpss_prepare(struct device *dev);
int intel_lpss_suspend(struct device *dev);
int intel_lpss_resume(struct device *dev);
#ifdef CONFIG_PM_SLEEP
#define INTEL_LPSS_SLEEP_PM_OPS \
.prepare = intel_lpss_prepare, \
.suspend = intel_lpss_suspend, \
.resume = intel_lpss_resume, \
.freeze = intel_lpss_suspend, \
.thaw = intel_lpss_resume, \
.poweroff = intel_lpss_suspend, \
.restore = intel_lpss_resume,
#endif
#define INTEL_LPSS_RUNTIME_PM_OPS \
.runtime_suspend = intel_lpss_suspend, \
.runtime_resume = intel_lpss_resume,
#else /* !CONFIG_PM */
#define INTEL_LPSS_SLEEP_PM_OPS
#define INTEL_LPSS_RUNTIME_PM_OPS
#endif /* CONFIG_PM */
#define INTEL_LPSS_PM_OPS(name) \
const struct dev_pm_ops name = { \
INTEL_LPSS_SLEEP_PM_OPS \
INTEL_LPSS_RUNTIME_PM_OPS \
}
#endif /* __MFD_INTEL_LPSS_H */
......@@ -302,7 +302,7 @@ void mfd_remove_devices(struct device *parent)
{
atomic_t *cnts = NULL;
device_for_each_child(parent, &cnts, mfd_remove_devices_fn);
device_for_each_child_reverse(parent, &cnts, mfd_remove_devices_fn);
kfree(cnts);
}
EXPORT_SYMBOL(mfd_remove_devices);
......
......@@ -959,6 +959,8 @@ extern int __must_check device_add(struct device *dev);
extern void device_del(struct device *dev);
extern int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
extern int device_for_each_child_reverse(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
extern int device_rename(struct device *dev, const char *new_name);
......
......@@ -63,6 +63,7 @@ extern void klist_iter_init(struct klist *k, struct klist_iter *i);
extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n);
extern void klist_iter_exit(struct klist_iter *i);
extern struct klist_node *klist_prev(struct klist_iter *i);
extern struct klist_node *klist_next(struct klist_iter *i);
#endif
......@@ -161,6 +161,8 @@ void dev_pm_qos_hide_flags(struct device *dev);
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
int dev_pm_qos_expose_latency_tolerance(struct device *dev);
void dev_pm_qos_hide_latency_tolerance(struct device *dev);
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
{
......@@ -229,6 +231,9 @@ static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
{ return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
{ return 0; }
static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
{ return 0; }
static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
......
......@@ -323,6 +323,47 @@ static struct klist_node *to_klist_node(struct list_head *n)
return container_of(n, struct klist_node, n_node);
}
/**
* klist_prev - Ante up prev node in list.
* @i: Iterator structure.
*
* First grab list lock. Decrement the reference count of the previous
* node, if there was one. Grab the prev node, increment its reference
* count, drop the lock, and return that prev node.
*/
struct klist_node *klist_prev(struct klist_iter *i)
{
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
spin_lock(&i->i_klist->k_lock);
if (last) {
prev = to_klist_node(last->n_node.prev);
if (!klist_dec_and_del(last))
put = NULL;
} else
prev = to_klist_node(i->i_klist->k_list.prev);
i->i_cur = NULL;
while (prev != to_klist_node(&i->i_klist->k_list)) {
if (likely(!knode_dead(prev))) {
kref_get(&prev->n_ref);
i->i_cur = prev;
break;
}
prev = to_klist_node(prev->n_node.prev);
}
spin_unlock(&i->i_klist->k_lock);
if (put && last)
put(last);
return i->i_cur;
}
EXPORT_SYMBOL_GPL(klist_prev);
/**
* klist_next - Ante up next node in list.
* @i: Iterator structure.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment