Commit 5a5846fd authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'icc-5.16-rc1' of...

Merge tag 'icc-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/djakov/icc into char-misc-next

Georgi writes:

interconnect changes for 5.16

Here are the changes for the 5.16-rc1 merge window consisting of just
driver updates. The highlight is the refactoring of some existing drivers
into common code and expanding some macros that will make adding QoS
support much easier.

Driver changes:
- icc-rpm: move bus clocks handling into qnoc_probe
- sdm660: expand DEFINE_QNODE macros
- sdm660: drop default/unused values
- sdm660: merge common code into icc-rpm
- icc-rpm: add support for QoS reg offset
- msm8916: expand DEFINE_QNODE macros
- msm8916: add support for AP-owned nodes
- msm8939: expand DEFINE_QNODE macros
- msm8939: add support for AP-owned nodes
- qcs404: expand DEFINE_QNODE macros
- qcom: drop DEFINE_QNODE macro
- samsung: describe drivers in KConfig
Signed-off-by: default avatarGeorgi Djakov <djakov@kernel.org>

* tag 'icc-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/djakov/icc:
  interconnect: samsung: describe drivers in KConfig
  interconnect: qcom: drop DEFINE_QNODE macro
  interconnect: qcs404: expand DEFINE_QNODE macros
  interconnect: msm8939: add support for AP-owned nodes
  interconnect: msm8939: expand DEFINE_QNODE macros
  interconnect: msm8916: add support for AP-owned nodes
  interconnect: msm8916: expand DEFINE_QNODE macros
  interconnect: icc-rpm: add support for QoS reg offset
  interconnect: sdm660: merge common code into icc-rpm
  interconnect: sdm660: drop default/unused values
  interconnect: sdm660: expand DEFINE_QNODE macros
  interconnect: icc-rpm: move bus clocks handling into qnoc_probe
parents b1f4c00e dfe14674
......@@ -11,60 +11,231 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include "smd-rpm.h"
#include "icc-rpm.h"
static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
/* BIMC QoS */
#define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
#define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
#define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
#define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
#define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
#define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3
#define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
#define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
#define M_BKE_EN_EN_BMASK 0x1
/* NoC QoS */
#define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
#define NOC_QOS_PRIORITY_P1_MASK 0xc
#define NOC_QOS_PRIORITY_P0_MASK 0x3
#define NOC_QOS_PRIORITY_P1_SHIFT 0x2
#define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
#define NOC_QOS_MODEn_MASK 0x3
static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
struct qcom_icc_qos *qos,
int regnum)
{
u32 val;
u32 mask;
val = qos->prio_level;
mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
/* LIMITCMDS is not present on M_BKE_HEALTH_3 */
if (regnum != 3) {
val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
}
return regmap_update_bits(qp->regmap,
qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
mask, val);
}
static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
{
struct qcom_icc_provider *qp;
struct qcom_icc_node *qn;
struct icc_provider *provider;
struct icc_node *n;
u64 sum_bw;
u64 max_peak_bw;
u64 rate;
u32 agg_avg = 0;
u32 agg_peak = 0;
int ret, i;
u32 mode = NOC_QOS_MODE_BYPASS;
u32 val = 0;
int i, rc = 0;
qn = src->data;
provider = src->provider;
qp = to_qcom_provider(provider);
list_for_each_entry(n, &provider->nodes, node_list)
provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
&agg_avg, &agg_peak);
if (qn->qos.qos_mode != -1)
mode = qn->qos.qos_mode;
/* QoS Priority: The QoS Health parameters are getting considered
* only if we are NOT in Bypass Mode.
*/
if (mode != NOC_QOS_MODE_BYPASS) {
for (i = 3; i >= 0; i--) {
rc = qcom_icc_bimc_set_qos_health(qp,
&qn->qos, i);
if (rc)
return rc;
}
sum_bw = icc_units_to_bps(agg_avg);
max_peak_bw = icc_units_to_bps(agg_peak);
/* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
val = 1;
}
return regmap_update_bits(qp->regmap,
qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
M_BKE_EN_EN_BMASK, val);
}
static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
struct qcom_icc_qos *qos)
{
u32 val;
int rc;
/* Must be updated one at a time, P1 first, P0 last */
val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
rc = regmap_update_bits(qp->regmap,
qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
NOC_QOS_PRIORITY_P1_MASK, val);
if (rc)
return rc;
return regmap_update_bits(qp->regmap,
qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
}
static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
{
struct qcom_icc_provider *qp;
struct qcom_icc_node *qn;
struct icc_provider *provider;
u32 mode = NOC_QOS_MODE_BYPASS;
int rc = 0;
qn = src->data;
provider = src->provider;
qp = to_qcom_provider(provider);
if (qn->qos.qos_port < 0) {
dev_dbg(src->provider->dev,
"NoC QoS: Skipping %s: vote aggregated on parent.\n",
qn->name);
return 0;
}
if (qn->qos.qos_mode != -1)
mode = qn->qos.qos_mode;
if (mode == NOC_QOS_MODE_FIXED) {
dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
qn->name);
rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
if (rc)
return rc;
} else if (mode == NOC_QOS_MODE_BYPASS) {
dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
qn->name);
}
return regmap_update_bits(qp->regmap,
qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
NOC_QOS_MODEn_MASK, mode);
}
static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
{
struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
struct qcom_icc_node *qn = node->data;
dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
/* send bandwidth request message to the RPM processor */
if (qn->mas_rpm_id != -1) {
if (qp->is_bimc_node)
return qcom_icc_set_bimc_qos(node, sum_bw);
return qcom_icc_set_noc_qos(node, sum_bw);
}
static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
{
int ret = 0;
if (mas_rpm_id != -1) {
ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
RPM_BUS_MASTER_REQ,
qn->mas_rpm_id,
mas_rpm_id,
sum_bw);
if (ret) {
pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
qn->mas_rpm_id, ret);
mas_rpm_id, ret);
return ret;
}
}
if (qn->slv_rpm_id != -1) {
if (slv_rpm_id != -1) {
ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
RPM_BUS_SLAVE_REQ,
qn->slv_rpm_id,
slv_rpm_id,
sum_bw);
if (ret) {
pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
qn->slv_rpm_id, ret);
slv_rpm_id, ret);
return ret;
}
}
return ret;
}
static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_icc_provider *qp;
struct qcom_icc_node *qn;
struct icc_provider *provider;
struct icc_node *n;
u64 sum_bw;
u64 max_peak_bw;
u64 rate;
u32 agg_avg = 0;
u32 agg_peak = 0;
int ret, i;
qn = src->data;
provider = src->provider;
qp = to_qcom_provider(provider);
list_for_each_entry(n, &provider->nodes, node_list)
provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
&agg_avg, &agg_peak);
sum_bw = icc_units_to_bps(agg_avg);
max_peak_bw = icc_units_to_bps(agg_peak);
if (!qn->qos.ap_owned) {
/* send bandwidth request message to the RPM processor */
ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
if (ret)
return ret;
} else if (qn->qos.qos_mode != -1) {
/* set bandwidth directly from the AP */
ret = qcom_icc_qos_set(src, sum_bw);
if (ret)
return ret;
}
rate = max(sum_bw, max_peak_bw);
do_div(rate, qn->buswidth);
......@@ -86,8 +257,11 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
return 0;
}
int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
const struct clk_bulk_data *cd)
static const char * const bus_clocks[] = {
"bus", "bus_a",
};
int qnoc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct qcom_icc_desc *desc;
......@@ -97,6 +271,8 @@ int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i;
const char * const *cds;
int cd_num;
int ret;
/* wait for the RPM proxy */
......@@ -110,7 +286,15 @@ int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
qnodes = desc->nodes;
num_nodes = desc->num_nodes;
qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
if (desc->num_clocks) {
cds = desc->clocks;
cd_num = desc->num_clocks;
} else {
cds = bus_clocks;
cd_num = ARRAY_SIZE(bus_clocks);
}
qp = devm_kzalloc(dev, struct_size(qp, bus_clks, cd_num), GFP_KERNEL);
if (!qp)
return -ENOMEM;
......@@ -119,12 +303,35 @@ int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
if (!data)
return -ENOMEM;
qp->bus_clks = devm_kmemdup(dev, cd, cd_size,
GFP_KERNEL);
if (!qp->bus_clks)
return -ENOMEM;
for (i = 0; i < cd_num; i++)
qp->bus_clks[i].id = cds[i];
qp->num_clks = cd_num;
qp->is_bimc_node = desc->is_bimc_node;
qp->qos_offset = desc->qos_offset;
if (desc->regmap_cfg) {
struct resource *res;
void __iomem *mmio;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
mmio = devm_ioremap_resource(dev, res);
if (IS_ERR(mmio)) {
dev_err(dev, "Cannot ioremap interconnect bus resource\n");
return PTR_ERR(mmio);
}
qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
if (IS_ERR(qp->regmap)) {
dev_err(dev, "Cannot regmap interconnect bus resource\n");
return PTR_ERR(qp->regmap);
}
}
ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
if (ret)
return ret;
......
......@@ -9,8 +9,6 @@
#define RPM_BUS_MASTER_REQ 0x73616d62
#define RPM_BUS_SLAVE_REQ 0x766c7362
#define QCOM_MAX_LINKS 12
#define to_qcom_provider(_provider) \
container_of(_provider, struct qcom_icc_provider, provider)
......@@ -19,11 +17,35 @@
* @provider: generic interconnect provider
* @bus_clks: the clk_bulk_data table of bus clocks
* @num_clks: the total number of clk_bulk_data entries
* @is_bimc_node: indicates whether to use bimc specific setting
* @qos_offset: offset to QoS registers
* @regmap: regmap for QoS registers read/write access
*/
struct qcom_icc_provider {
struct icc_provider provider;
struct clk_bulk_data *bus_clks;
int num_clks;
bool is_bimc_node;
struct regmap *regmap;
unsigned int qos_offset;
struct clk_bulk_data bus_clks[];
};
/**
* struct qcom_icc_qos - Qualcomm specific interconnect QoS parameters
* @areq_prio: node requests priority
* @prio_level: priority level for bus communication
* @limit_commands: activate/deactivate limiter mode during runtime
* @ap_owned: indicates if the node is owned by the AP or by the RPM
* @qos_mode: default qos mode for this node
* @qos_port: qos port number for finding qos registers of this node
*/
struct qcom_icc_qos {
u32 areq_prio;
u32 prio_level;
bool limit_commands;
bool ap_owned;
int qos_mode;
int qos_port;
};
/**
......@@ -35,39 +57,37 @@ struct qcom_icc_provider {
* @buswidth: width of the interconnect between a node and the bus (bytes)
* @mas_rpm_id: RPM id for devices that are bus masters
* @slv_rpm_id: RPM id for devices that are bus slaves
* @qos: NoC QoS setting parameters
* @rate: current bus clock rate in Hz
*/
struct qcom_icc_node {
unsigned char *name;
u16 id;
u16 links[QCOM_MAX_LINKS];
const u16 *links;
u16 num_links;
u16 buswidth;
int mas_rpm_id;
int slv_rpm_id;
struct qcom_icc_qos qos;
u64 rate;
};
struct qcom_icc_desc {
struct qcom_icc_node **nodes;
size_t num_nodes;
const char * const *clocks;
size_t num_clocks;
bool is_bimc_node;
const struct regmap_config *regmap_cfg;
unsigned int qos_offset;
};
#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
...) \
static struct qcom_icc_node _name = { \
.name = #_name, \
.id = _id, \
.buswidth = _buswidth, \
.mas_rpm_id = _mas_rpm_id, \
.slv_rpm_id = _slv_rpm_id, \
.num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
.links = { __VA_ARGS__ }, \
}
/* Valid for both NoC and BIMC */
#define NOC_QOS_MODE_INVALID -1
#define NOC_QOS_MODE_FIXED 0x0
#define NOC_QOS_MODE_BYPASS 0x2
int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
const struct clk_bulk_data *cd);
int qnoc_probe(struct platform_device *pdev);
int qnoc_remove(struct platform_device *pdev);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -6,8 +6,10 @@ config INTERCONNECT_SAMSUNG
Interconnect drivers for Samsung SoCs.
config INTERCONNECT_EXYNOS
tristate "Exynos generic interconnect driver"
tristate "Exynos SoC generic interconnect driver"
depends on INTERCONNECT_SAMSUNG
default y if ARCH_EXYNOS
help
Generic interconnect driver for Exynos SoCs.
Generic interconnect driver for Samsung Exynos SoCs (e.g. Exynos3250,
Exynos4210, Exynos4412, Exynos542x, Exynos5433).
Choose Y here only if you build for such Samsung SoC.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment