Commit 8644121c authored by Maulik Shah's avatar Maulik Shah Committed by Greg Kroah-Hartman

soc: qcom: rpmh: Invalidate SLEEP and WAKE TCSes before flushing new data

commit f5ac95f9 upstream.

TCSes have previously programmed data when rpmh_flush() is called.
This can cause old data to trigger along with newly flushed.

Fix this by cleaning SLEEP and WAKE TCSes before new data is flushed.

With this there is no need to invoke rpmh_rsc_invalidate() call from
rpmh_invalidate().

Simplify rpmh_invalidate() by moving invalidate_batch() inside.

Fixes: 600513df ("drivers: qcom: rpmh: cache sleep/wake state requests")
Signed-off-by: default avatarMaulik Shah <mkshah@codeaurora.org>
Reviewed-by: default avatarDouglas Anderson <dianders@chromium.org>
Reviewed-by: default avatarStephen Boyd <swboyd@chromium.org>
Link: https://lore.kernel.org/r/1586703004-13674-4-git-send-email-mkshah@codeaurora.orgSigned-off-by: default avatarBjorn Andersson <bjorn.andersson@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d5046990
...@@ -318,19 +318,6 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr) ...@@ -318,19 +318,6 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
return ret; return ret;
} }
static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
{
struct batch_cache_req *req, *tmp;
unsigned long flags;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
kfree(req);
INIT_LIST_HEAD(&ctrlr->batch_cache);
ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
/** /**
* rpmh_write_batch: Write multiple sets of RPMH commands and wait for the * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
* batch to finish. * batch to finish.
...@@ -470,6 +457,13 @@ int rpmh_flush(const struct device *dev) ...@@ -470,6 +457,13 @@ int rpmh_flush(const struct device *dev)
return 0; return 0;
} }
/* Invalidate the TCSes first to avoid stale data */
do {
ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
} while (ret == -EAGAIN);
if (ret)
return ret;
/* First flush the cached batch requests */ /* First flush the cached batch requests */
ret = flush_batch(ctrlr); ret = flush_batch(ctrlr);
if (ret) if (ret)
...@@ -501,24 +495,25 @@ int rpmh_flush(const struct device *dev) ...@@ -501,24 +495,25 @@ int rpmh_flush(const struct device *dev)
EXPORT_SYMBOL(rpmh_flush); EXPORT_SYMBOL(rpmh_flush);
/** /**
* rpmh_invalidate: Invalidate all sleep and active sets * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
* sets.
* *
* @dev: The device making the request * @dev: The device making the request
* *
* Invalidate the sleep and active values in the TCS blocks. * Invalidate the sleep and wake values in batch_cache.
*/ */
int rpmh_invalidate(const struct device *dev) int rpmh_invalidate(const struct device *dev)
{ {
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
int ret; struct batch_cache_req *req, *tmp;
unsigned long flags;
invalidate_batch(ctrlr);
do { spin_lock_irqsave(&ctrlr->cache_lock, flags);
ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
} while (ret == -EAGAIN); kfree(req);
INIT_LIST_HEAD(&ctrlr->batch_cache);
ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
return ret; return 0;
} }
EXPORT_SYMBOL(rpmh_invalidate); EXPORT_SYMBOL(rpmh_invalidate);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment