Commit f8e87554 authored by Olof Johansson's avatar Olof Johansson

Merge tag 'qcom-drivers-for-5.10' of...

Merge tag 'qcom-drivers-for-5.10' of https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux into arm/drivers

Qualcomm driver updates for v5.10

Replace the busy wait for free tcs slots in the RPMh driver with a
sleeping wait and use memory barriers when writing the command registers.

Add a bunch of SoC ids to the socinfo driver, fix an erro printin the
apr driver and migrate llcc to devm_platform_ioremap_resource_byname().

* tag 'qcom-drivers-for-5.10' of https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux:
  soc: qcom: llcc: use devm_platform_ioremap_resource_byname()
  soc: qcom: apr: Fixup the error displayed on lookup failure
  soc: qcom: socinfo: Add msm8992/4 and apq8094 SoC IDs
  soc: qcom: rpmh-rsc: Sleep waiting for tcs slots to be free
  soc: qcom-geni-se: Don't use relaxed writes when writing commands
  soc: qcom: socinfo: add SC7180 entry to soc_id array
  soc: qcom: socinfo: add soc id for IPQ6018

Link: https://lore.kernel.org/r/20200924040504.179708-1-bjorn.andersson@linaro.orgSigned-off-by: default avatarOlof Johansson <olof@lixom.net>
parents c78c6e18 28993472
...@@ -328,7 +328,7 @@ static int of_apr_add_pd_lookups(struct device *dev) ...@@ -328,7 +328,7 @@ static int of_apr_add_pd_lookups(struct device *dev)
pds = pdr_add_lookup(apr->pdr, service_name, service_path); pds = pdr_add_lookup(apr->pdr, service_name, service_path);
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) { if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
dev_err(dev, "pdr add lookup failed: %d\n", ret); dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
return PTR_ERR(pds); return PTR_ERR(pds);
} }
} }
......
...@@ -387,7 +387,6 @@ static int qcom_llcc_remove(struct platform_device *pdev) ...@@ -387,7 +387,6 @@ static int qcom_llcc_remove(struct platform_device *pdev)
static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
const char *name) const char *name)
{ {
struct resource *res;
void __iomem *base; void __iomem *base;
struct regmap_config llcc_regmap_config = { struct regmap_config llcc_regmap_config = {
.reg_bits = 32, .reg_bits = 32,
...@@ -396,11 +395,7 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, ...@@ -396,11 +395,7 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
.fast_io = true, .fast_io = true,
}; };
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); base = devm_platform_ioremap_resource_byname(pdev, name);
if (!res)
return ERR_PTR(-ENODEV);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) if (IS_ERR(base))
return ERR_CAST(base); return ERR_CAST(base);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define __RPM_INTERNAL_H__ #define __RPM_INTERNAL_H__
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/wait.h>
#include <soc/qcom/tcs.h> #include <soc/qcom/tcs.h>
#define TCS_TYPE_NR 4 #define TCS_TYPE_NR 4
...@@ -106,6 +107,8 @@ struct rpmh_ctrlr { ...@@ -106,6 +107,8 @@ struct rpmh_ctrlr {
* @lock: Synchronize state of the controller. If RPMH's cache * @lock: Synchronize state of the controller. If RPMH's cache
* lock will also be held, the order is: drv->lock then * lock will also be held, the order is: drv->lock then
* cache_lock. * cache_lock.
* @tcs_wait: Wait queue used to wait for @tcs_in_use to free up a
* slot
* @client: Handle to the DRV's client. * @client: Handle to the DRV's client.
*/ */
struct rsc_drv { struct rsc_drv {
...@@ -118,6 +121,7 @@ struct rsc_drv { ...@@ -118,6 +121,7 @@ struct rsc_drv {
struct tcs_group tcs[TCS_TYPE_NR]; struct tcs_group tcs[TCS_TYPE_NR];
DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR); DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
spinlock_t lock; spinlock_t lock;
wait_queue_head_t tcs_wait;
struct rpmh_ctrlr client; struct rpmh_ctrlr client;
}; };
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/wait.h>
#include <soc/qcom/cmd-db.h> #include <soc/qcom/cmd-db.h>
#include <soc/qcom/tcs.h> #include <soc/qcom/tcs.h>
...@@ -453,6 +454,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p) ...@@ -453,6 +454,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
if (!drv->tcs[ACTIVE_TCS].num_tcs) if (!drv->tcs[ACTIVE_TCS].num_tcs)
enable_tcs_irq(drv, i, false); enable_tcs_irq(drv, i, false);
spin_unlock(&drv->lock); spin_unlock(&drv->lock);
wake_up(&drv->tcs_wait);
if (req) if (req)
rpmh_tx_done(req, err); rpmh_tx_done(req, err);
} }
...@@ -571,73 +573,34 @@ static int find_free_tcs(struct tcs_group *tcs) ...@@ -571,73 +573,34 @@ static int find_free_tcs(struct tcs_group *tcs)
} }
/** /**
* tcs_write() - Store messages into a TCS right now, or return -EBUSY. * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
* @drv: The controller. * @drv: The controller.
* @tcs: The tcs_group used for ACTIVE_ONLY transfers.
* @msg: The data to be sent. * @msg: The data to be sent.
* *
* Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it. * Claims a tcs in the given tcs_group while making sure that no existing cmd
* is in flight that would conflict with the one in @msg.
* *
* If there are no free TCSes for ACTIVE_ONLY transfers or if a command for * Context: Must be called with the drv->lock held since that protects
* the same address is already transferring returns -EBUSY which means the * tcs_in_use.
* client should retry shortly.
* *
* Return: 0 on success, -EBUSY if client should retry, or an error. * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
* Client should have interrupts enabled for a bit before retrying. * or the tcs_group is full.
*/ */
static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg) static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
const struct tcs_request *msg)
{ {
struct tcs_group *tcs;
int tcs_id;
unsigned long flags;
int ret; int ret;
tcs = get_tcs_for_msg(drv, msg);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
spin_lock_irqsave(&drv->lock, flags);
/* /*
* The h/w does not like if we send a request to the same address, * The h/w does not like if we send a request to the same address,
* when one is already in-flight or being processed. * when one is already in-flight or being processed.
*/ */
ret = check_for_req_inflight(drv, tcs, msg); ret = check_for_req_inflight(drv, tcs, msg);
if (ret) if (ret)
goto unlock; return ret;
ret = find_free_tcs(tcs);
if (ret < 0)
goto unlock;
tcs_id = ret;
tcs->req[tcs_id - tcs->offset] = msg;
set_bit(tcs_id, drv->tcs_in_use);
if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
/*
* Clear previously programmed WAKE commands in selected
* repurposed TCS to avoid triggering them. tcs->slots will be
* cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
*/
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
enable_tcs_irq(drv, tcs_id, true);
}
spin_unlock_irqrestore(&drv->lock, flags);
/*
* These two can be done after the lock is released because:
* - We marked "tcs_in_use" under lock.
* - Once "tcs_in_use" has been marked nobody else could be writing
* to these registers until the interrupt goes off.
* - The interrupt can't go off until we trigger w/ the last line
* of __tcs_set_trigger() below.
*/
__tcs_buffer_write(drv, tcs_id, 0, msg);
__tcs_set_trigger(drv, tcs_id, true);
return 0; return find_free_tcs(tcs);
unlock:
spin_unlock_irqrestore(&drv->lock, flags);
return ret;
} }
/** /**
...@@ -664,18 +627,47 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg) ...@@ -664,18 +627,47 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
*/ */
int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
{ {
int ret; struct tcs_group *tcs;
int tcs_id;
unsigned long flags;
do { tcs = get_tcs_for_msg(drv, msg);
ret = tcs_write(drv, msg); if (IS_ERR(tcs))
if (ret == -EBUSY) { return PTR_ERR(tcs);
pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
msg->cmds[0].addr);
udelay(10);
}
} while (ret == -EBUSY);
return ret; spin_lock_irqsave(&drv->lock, flags);
/* Wait forever for a free tcs. It better be there eventually! */
wait_event_lock_irq(drv->tcs_wait,
(tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
drv->lock);
tcs->req[tcs_id - tcs->offset] = msg;
set_bit(tcs_id, drv->tcs_in_use);
if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
/*
* Clear previously programmed WAKE commands in selected
* repurposed TCS to avoid triggering them. tcs->slots will be
* cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
*/
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
enable_tcs_irq(drv, tcs_id, true);
}
spin_unlock_irqrestore(&drv->lock, flags);
/*
* These two can be done after the lock is released because:
* - We marked "tcs_in_use" under lock.
* - Once "tcs_in_use" has been marked nobody else could be writing
* to these registers until the interrupt goes off.
* - The interrupt can't go off until we trigger w/ the last line
* of __tcs_set_trigger() below.
*/
__tcs_buffer_write(drv, tcs_id, 0, msg);
__tcs_set_trigger(drv, tcs_id, true);
return 0;
} }
/** /**
...@@ -983,6 +975,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev) ...@@ -983,6 +975,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
return ret; return ret;
spin_lock_init(&drv->lock); spin_lock_init(&drv->lock);
init_waitqueue_head(&drv->tcs_wait);
bitmap_zero(drv->tcs_in_use, MAX_TCS_NR); bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
irq = platform_get_irq(pdev, drv->id); irq = platform_get_irq(pdev, drv->id);
......
...@@ -194,6 +194,7 @@ static const struct soc_id soc_id[] = { ...@@ -194,6 +194,7 @@ static const struct soc_id soc_id[] = {
{ 186, "MSM8674" }, { 186, "MSM8674" },
{ 194, "MSM8974PRO" }, { 194, "MSM8974PRO" },
{ 206, "MSM8916" }, { 206, "MSM8916" },
{ 207, "MSM8994" },
{ 208, "APQ8074-AA" }, { 208, "APQ8074-AA" },
{ 209, "APQ8074-AB" }, { 209, "APQ8074-AB" },
{ 210, "APQ8074PRO" }, { 210, "APQ8074PRO" },
...@@ -214,6 +215,8 @@ static const struct soc_id soc_id[] = { ...@@ -214,6 +215,8 @@ static const struct soc_id soc_id[] = {
{ 248, "MSM8216" }, { 248, "MSM8216" },
{ 249, "MSM8116" }, { 249, "MSM8116" },
{ 250, "MSM8616" }, { 250, "MSM8616" },
{ 251, "MSM8992" },
{ 253, "APQ8094" },
{ 291, "APQ8096" }, { 291, "APQ8096" },
{ 305, "MSM8996SG" }, { 305, "MSM8996SG" },
{ 310, "MSM8996AU" }, { 310, "MSM8996AU" },
...@@ -223,6 +226,8 @@ static const struct soc_id soc_id[] = { ...@@ -223,6 +226,8 @@ static const struct soc_id soc_id[] = {
{ 321, "SDM845" }, { 321, "SDM845" },
{ 341, "SDA845" }, { 341, "SDA845" },
{ 356, "SM8250" }, { 356, "SM8250" },
{ 402, "IPQ6018" },
{ 425, "SC7180" },
}; };
static const char *socinfo_machine(struct device *dev, unsigned int id) static const char *socinfo_machine(struct device *dev, unsigned int id)
......
...@@ -296,7 +296,7 @@ static inline void geni_se_setup_m_cmd(struct geni_se *se, u32 cmd, u32 params) ...@@ -296,7 +296,7 @@ static inline void geni_se_setup_m_cmd(struct geni_se *se, u32 cmd, u32 params)
u32 m_cmd; u32 m_cmd;
m_cmd = (cmd << M_OPCODE_SHFT) | (params & M_PARAMS_MSK); m_cmd = (cmd << M_OPCODE_SHFT) | (params & M_PARAMS_MSK);
writel_relaxed(m_cmd, se->base + SE_GENI_M_CMD0); writel(m_cmd, se->base + SE_GENI_M_CMD0);
} }
/** /**
...@@ -316,7 +316,7 @@ static inline void geni_se_setup_s_cmd(struct geni_se *se, u32 cmd, u32 params) ...@@ -316,7 +316,7 @@ static inline void geni_se_setup_s_cmd(struct geni_se *se, u32 cmd, u32 params)
s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK); s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK);
s_cmd |= (cmd << S_OPCODE_SHFT); s_cmd |= (cmd << S_OPCODE_SHFT);
s_cmd |= (params & S_PARAMS_MSK); s_cmd |= (params & S_PARAMS_MSK);
writel_relaxed(s_cmd, se->base + SE_GENI_S_CMD0); writel(s_cmd, se->base + SE_GENI_S_CMD0);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment