Commit 985427f9 authored by Maulik Shah's avatar Maulik Shah Committed by Bjorn Andersson

soc: qcom: rpmh: Invoke rpmh_flush() for dirty caches

Add changes to invoke rpmh flush() from CPU PM notification.
This is done when the last the cpu is entering deep CPU idle
states and controller is not busy.

Controllers that have 'HW solver' mode like display RSC do not need
to register for CPU PM notification. They may be in autonomous mode
executing low power mode and do not require rpmh_flush() to happen
from CPU PM notification.
Signed-off-by: default avatarMaulik Shah <mkshah@codeaurora.org>
Reviewed-by: default avatarDouglas Anderson <dianders@chromium.org>
Reviewed-by: default avatarStephen Boyd <swboyd@chromium.org>
Link: https://lore.kernel.org/r/1586703004-13674-5-git-send-email-mkshah@codeaurora.orgSigned-off-by: default avatarBjorn Andersson <bjorn.andersson@linaro.org>
parent f5ac95f9
...@@ -84,23 +84,32 @@ struct rpmh_ctrlr { ...@@ -84,23 +84,32 @@ struct rpmh_ctrlr {
* struct rsc_drv: the Direct Resource Voter (DRV) of the * struct rsc_drv: the Direct Resource Voter (DRV) of the
* Resource State Coordinator controller (RSC) * Resource State Coordinator controller (RSC)
* *
* @name: controller identifier * @name: Controller identifier
* @tcs_base: start address of the TCS registers in this controller * @tcs_base: Start address of the TCS registers in this controller
* @id: instance id in the controller (Direct Resource Voter) * @id: Instance id in the controller (Direct Resource Voter)
* @num_tcs: number of TCSes in this DRV * @num_tcs: Number of TCSes in this DRV
* @tcs: TCS groups * @rsc_pm: CPU PM notifier for controller
* @tcs_in_use: s/w state of the TCS * Used when solver mode is not present
* @lock: synchronize state of the controller * @cpus_entered_pm: CPU mask for cpus in idle power collapse
* @client: handle to the DRV's client. * Used when solver mode is not present
* @tcs: TCS groups
* @tcs_in_use: S/W state of the TCS
* @lock: Synchronize state of the controller
* @pm_lock: Synchronize during PM notifications
* Used when solver mode is not present
* @client: Handle to the DRV's client.
*/ */
struct rsc_drv { struct rsc_drv {
const char *name; const char *name;
void __iomem *tcs_base; void __iomem *tcs_base;
int id; int id;
int num_tcs; int num_tcs;
struct notifier_block rsc_pm;
struct cpumask cpus_entered_pm;
struct tcs_group tcs[TCS_TYPE_NR]; struct tcs_group tcs[TCS_TYPE_NR];
DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR); DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
spinlock_t lock; spinlock_t lock;
spinlock_t pm_lock;
struct rpmh_ctrlr client; struct rpmh_ctrlr client;
}; };
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/cpu_pm.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h> #include <linux/io.h>
...@@ -30,7 +31,12 @@ ...@@ -30,7 +31,12 @@
#define RSC_DRV_TCS_OFFSET 672 #define RSC_DRV_TCS_OFFSET 672
#define RSC_DRV_CMD_OFFSET 20 #define RSC_DRV_CMD_OFFSET 20
/* DRV Configuration Information Register */ /* DRV HW Solver Configuration Information Register */
#define DRV_SOLVER_CONFIG 0x04
#define DRV_HW_SOLVER_MASK 1
#define DRV_HW_SOLVER_SHIFT 24
/* DRV TCS Configuration Information Register */
#define DRV_PRNT_CHLD_CONFIG 0x0C #define DRV_PRNT_CHLD_CONFIG 0x0C
#define DRV_NUM_TCS_MASK 0x3F #define DRV_NUM_TCS_MASK 0x3F
#define DRV_NUM_TCS_SHIFT 6 #define DRV_NUM_TCS_SHIFT 6
...@@ -521,8 +527,85 @@ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg) ...@@ -521,8 +527,85 @@ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
return tcs_ctrl_write(drv, msg); return tcs_ctrl_write(drv, msg);
} }
/**
* rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
*
* @drv: The controller
*
* Checks if any of the AMCs are busy in handling ACTIVE sets.
* This is called from the last cpu powering down before flushing
* SLEEP and WAKE sets. If AMCs are busy, controller can not enter
* power collapse, so deny from the last cpu's pm notification.
*
* Return:
* * False - AMCs are idle
* * True - AMCs are busy
*/
static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
{
int m;
struct tcs_group *tcs = get_tcs_of_type(drv, ACTIVE_TCS);
/*
* If we made an active request on a RSC that does not have a
* dedicated TCS for active state use, then re-purposed wake TCSes
* should be checked for not busy, because we used wake TCSes for
* active requests in this case.
*
* Since this is called from the last cpu, need not take drv or tcs
* lock before checking tcs_is_free().
*/
if (!tcs->num_tcs)
tcs = get_tcs_of_type(drv, WAKE_TCS);
for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
if (!tcs_is_free(drv, m))
return true;
}
return false;
}
static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
unsigned long action, void *v)
{
struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
int ret = NOTIFY_OK;
spin_lock(&drv->pm_lock);
switch (action) {
case CPU_PM_ENTER:
cpumask_set_cpu(smp_processor_id(), &drv->cpus_entered_pm);
if (!cpumask_equal(&drv->cpus_entered_pm, cpu_online_mask))
goto exit;
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
cpumask_clear_cpu(smp_processor_id(), &drv->cpus_entered_pm);
goto exit;
}
ret = rpmh_rsc_ctrlr_is_busy(drv);
if (ret) {
ret = NOTIFY_BAD;
goto exit;
}
ret = rpmh_flush(&drv->client);
if (ret)
ret = NOTIFY_BAD;
else
ret = NOTIFY_OK;
exit:
spin_unlock(&drv->pm_lock);
return ret;
}
static int rpmh_probe_tcs_config(struct platform_device *pdev, static int rpmh_probe_tcs_config(struct platform_device *pdev,
struct rsc_drv *drv) struct rsc_drv *drv, void __iomem *base)
{ {
struct tcs_type_config { struct tcs_type_config {
u32 type; u32 type;
...@@ -532,15 +615,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev, ...@@ -532,15 +615,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev,
u32 config, max_tcs, ncpt, offset; u32 config, max_tcs, ncpt, offset;
int i, ret, n, st = 0; int i, ret, n, st = 0;
struct tcs_group *tcs; struct tcs_group *tcs;
struct resource *res;
void __iomem *base;
char drv_id[10] = {0};
snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset); ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
if (ret) if (ret)
...@@ -620,7 +694,11 @@ static int rpmh_rsc_probe(struct platform_device *pdev) ...@@ -620,7 +694,11 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
{ {
struct device_node *dn = pdev->dev.of_node; struct device_node *dn = pdev->dev.of_node;
struct rsc_drv *drv; struct rsc_drv *drv;
struct resource *res;
char drv_id[10] = {0};
int ret, irq; int ret, irq;
u32 solver_config;
void __iomem *base;
/* /*
* Even though RPMh doesn't directly use cmd-db, all of its children * Even though RPMh doesn't directly use cmd-db, all of its children
...@@ -646,7 +724,13 @@ static int rpmh_rsc_probe(struct platform_device *pdev) ...@@ -646,7 +724,13 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
if (!drv->name) if (!drv->name)
drv->name = dev_name(&pdev->dev); drv->name = dev_name(&pdev->dev);
ret = rpmh_probe_tcs_config(pdev, drv); snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
ret = rpmh_probe_tcs_config(pdev, drv, base);
if (ret) if (ret)
return ret; return ret;
...@@ -663,6 +747,20 @@ static int rpmh_rsc_probe(struct platform_device *pdev) ...@@ -663,6 +747,20 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
if (ret) if (ret)
return ret; return ret;
/*
* CPU PM notification are not required for controllers that support
* 'HW solver' mode where they can be in autonomous mode executing low
* power mode to power down.
*/
solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG);
solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
if (!solver_config) {
drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
spin_lock_init(&drv->pm_lock);
cpu_pm_register_notifier(&drv->rsc_pm);
}
/* Enable the active TCS to send requests immediately */ /* Enable the active TCS to send requests immediately */
write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask); write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
...@@ -297,12 +298,10 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr) ...@@ -297,12 +298,10 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
{ {
struct batch_cache_req *req; struct batch_cache_req *req;
const struct rpmh_request *rpm_msg; const struct rpmh_request *rpm_msg;
unsigned long flags;
int ret = 0; int ret = 0;
int i; int i;
/* Send Sleep/Wake requests to the controller, expect no response */ /* Send Sleep/Wake requests to the controller, expect no response */
spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_for_each_entry(req, &ctrlr->batch_cache, list) { list_for_each_entry(req, &ctrlr->batch_cache, list) {
for (i = 0; i < req->count; i++) { for (i = 0; i < req->count; i++) {
rpm_msg = req->rpm_msgs + i; rpm_msg = req->rpm_msgs + i;
...@@ -312,7 +311,6 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr) ...@@ -312,7 +311,6 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
break; break;
} }
} }
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
return ret; return ret;
} }
...@@ -433,31 +431,32 @@ static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state, ...@@ -433,31 +431,32 @@ static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
} }
/** /**
* rpmh_flush: Flushes the buffered active and sleep sets to TCS * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
* *
* @ctrlr: controller making request to flush cached data * @ctrlr: Controller making request to flush cached data
* *
* Return: -EBUSY if the controller is busy, probably waiting on a response * This function is called from sleep code on the last CPU
* to a RPMH request sent earlier. * (thus no spinlock needed).
* *
* This function is always called from the sleep code from the last CPU * Return:
* that is powering down the entire system. Since no other RPMH API would be * * 0 - Success
* executing at this time, it is safe to run lockless. * * -EAGAIN - Retry again
* * Error code - Otherwise
*/ */
int rpmh_flush(struct rpmh_ctrlr *ctrlr) int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{ {
struct cache_req *p; struct cache_req *p;
int ret; int ret;
lockdep_assert_irqs_disabled();
if (!ctrlr->dirty) { if (!ctrlr->dirty) {
pr_debug("Skipping flush, TCS has latest data.\n"); pr_debug("Skipping flush, TCS has latest data.\n");
return 0; return 0;
} }
/* Invalidate the TCSes first to avoid stale data */ /* Invalidate the TCSes first to avoid stale data */
do { ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
} while (ret == -EAGAIN);
if (ret) if (ret)
return ret; return ret;
...@@ -466,10 +465,6 @@ int rpmh_flush(struct rpmh_ctrlr *ctrlr) ...@@ -466,10 +465,6 @@ int rpmh_flush(struct rpmh_ctrlr *ctrlr)
if (ret) if (ret)
return ret; return ret;
/*
* Nobody else should be calling this function other than system PM,
* hence we can run without locks.
*/
list_for_each_entry(p, &ctrlr->cache, list) { list_for_each_entry(p, &ctrlr->cache, list) {
if (!is_req_valid(p)) { if (!is_req_valid(p)) {
pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x", pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment