Commit adfec2e7 authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/arm-smmu: Convert to iommu_fwspec

In the final step of preparation for full generic configuration support,
swap our fixed-size master_cfg for the generic iommu_fwspec. For the
legacy DT bindings, the driver simply gets to act as its own 'firmware'.
Farewell, arbitrary MAX_MASTER_STREAMIDS!
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 588888a7
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_iommu.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -51,9 +52,6 @@ ...@@ -51,9 +52,6 @@
#include "io-pgtable.h" #include "io-pgtable.h"
/* Maximum number of stream IDs assigned to a single device */
#define MAX_MASTER_STREAMIDS 128
/* Maximum number of context banks per SMMU */ /* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS 128 #define ARM_SMMU_MAX_CBS 128
...@@ -321,13 +319,13 @@ struct arm_smmu_smr { ...@@ -321,13 +319,13 @@ struct arm_smmu_smr {
struct arm_smmu_master_cfg { struct arm_smmu_master_cfg {
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
int num_streamids; s16 smendx[];
u16 streamids[MAX_MASTER_STREAMIDS];
s16 smendx[MAX_MASTER_STREAMIDS];
}; };
#define INVALID_SMENDX -1 #define INVALID_SMENDX -1
#define for_each_cfg_sme(cfg, i, idx) \ #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
for (i = 0; idx = cfg->smendx[i], i < cfg->num_streamids; ++i) #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
#define for_each_cfg_sme(fw, i, idx) \
for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
struct arm_smmu_device { struct arm_smmu_device {
struct device *dev; struct device *dev;
...@@ -480,14 +478,16 @@ static int __find_legacy_master_phandle(struct device *dev, void *data) ...@@ -480,14 +478,16 @@ static int __find_legacy_master_phandle(struct device *dev, void *data)
} }
static struct platform_driver arm_smmu_driver; static struct platform_driver arm_smmu_driver;
static struct iommu_ops arm_smmu_ops;
static int arm_smmu_register_legacy_master(struct device *dev) static int arm_smmu_register_legacy_master(struct device *dev,
struct arm_smmu_device **smmu)
{ {
struct arm_smmu_device *smmu; struct device *smmu_dev;
struct arm_smmu_master_cfg *cfg;
struct device_node *np; struct device_node *np;
struct of_phandle_iterator it; struct of_phandle_iterator it;
void *data = &it; void *data = &it;
u32 *sids;
__be32 pci_sid; __be32 pci_sid;
int err; int err;
...@@ -500,20 +500,13 @@ static int arm_smmu_register_legacy_master(struct device *dev) ...@@ -500,20 +500,13 @@ static int arm_smmu_register_legacy_master(struct device *dev)
it.node = np; it.node = np;
err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data, err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
__find_legacy_master_phandle); __find_legacy_master_phandle);
smmu_dev = data;
of_node_put(np); of_node_put(np);
if (err == 0) if (err == 0)
return -ENODEV; return -ENODEV;
if (err < 0) if (err < 0)
return err; return err;
smmu = dev_get_drvdata(data);
if (it.cur_count > MAX_MASTER_STREAMIDS) {
dev_err(smmu->dev,
"reached maximum number (%d) of stream IDs for master device %s\n",
MAX_MASTER_STREAMIDS, dev_name(dev));
return -ENOSPC;
}
if (dev_is_pci(dev)) { if (dev_is_pci(dev)) {
/* "mmu-masters" assumes Stream ID == Requester ID */ /* "mmu-masters" assumes Stream ID == Requester ID */
pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid, pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
...@@ -522,17 +515,20 @@ static int arm_smmu_register_legacy_master(struct device *dev) ...@@ -522,17 +515,20 @@ static int arm_smmu_register_legacy_master(struct device *dev)
it.cur_count = 1; it.cur_count = 1;
} }
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
if (!cfg) &arm_smmu_ops);
return -ENOMEM; if (err)
return err;
cfg->smmu = smmu;
dev->archdata.iommu = cfg;
while (it.cur_count--) sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
cfg->streamids[cfg->num_streamids++] = be32_to_cpup(it.cur++); if (!sids)
return -ENOMEM;
return 0; *smmu = dev_get_drvdata(smmu_dev);
of_phandle_iterator_args(&it, sids, it.cur_count);
err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
kfree(sids);
return err;
} }
static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
...@@ -1127,7 +1123,8 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx) ...@@ -1127,7 +1123,8 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
static int arm_smmu_master_alloc_smes(struct device *dev) static int arm_smmu_master_alloc_smes(struct device *dev)
{ {
struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
struct arm_smmu_device *smmu = cfg->smmu; struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_smr *smrs = smmu->smrs; struct arm_smmu_smr *smrs = smmu->smrs;
struct iommu_group *group; struct iommu_group *group;
...@@ -1135,19 +1132,19 @@ static int arm_smmu_master_alloc_smes(struct device *dev) ...@@ -1135,19 +1132,19 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
mutex_lock(&smmu->stream_map_mutex); mutex_lock(&smmu->stream_map_mutex);
/* Figure out a viable stream map entry allocation */ /* Figure out a viable stream map entry allocation */
for_each_cfg_sme(cfg, i, idx) { for_each_cfg_sme(fwspec, i, idx) {
if (idx != INVALID_SMENDX) { if (idx != INVALID_SMENDX) {
ret = -EEXIST; ret = -EEXIST;
goto out_err; goto out_err;
} }
ret = arm_smmu_find_sme(smmu, cfg->streamids[i], 0); ret = arm_smmu_find_sme(smmu, fwspec->ids[i], 0);
if (ret < 0) if (ret < 0)
goto out_err; goto out_err;
idx = ret; idx = ret;
if (smrs && smmu->s2crs[idx].count == 0) { if (smrs && smmu->s2crs[idx].count == 0) {
smrs[idx].id = cfg->streamids[i]; smrs[idx].id = fwspec->ids[i];
smrs[idx].mask = 0; /* We don't currently share SMRs */ smrs[idx].mask = 0; /* We don't currently share SMRs */
smrs[idx].valid = true; smrs[idx].valid = true;
} }
...@@ -1165,7 +1162,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev) ...@@ -1165,7 +1162,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
iommu_group_put(group); iommu_group_put(group);
/* It worked! Now, poke the actual hardware */ /* It worked! Now, poke the actual hardware */
for_each_cfg_sme(cfg, i, idx) { for_each_cfg_sme(fwspec, i, idx) {
arm_smmu_write_sme(smmu, idx); arm_smmu_write_sme(smmu, idx);
smmu->s2crs[idx].group = group; smmu->s2crs[idx].group = group;
} }
...@@ -1182,13 +1179,14 @@ static int arm_smmu_master_alloc_smes(struct device *dev) ...@@ -1182,13 +1179,14 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
return ret; return ret;
} }
static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
{ {
struct arm_smmu_device *smmu = cfg->smmu; struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
int i, idx; int i, idx;
mutex_lock(&smmu->stream_map_mutex); mutex_lock(&smmu->stream_map_mutex);
for_each_cfg_sme(cfg, i, idx) { for_each_cfg_sme(fwspec, i, idx) {
if (arm_smmu_free_sme(smmu, idx)) if (arm_smmu_free_sme(smmu, idx))
arm_smmu_write_sme(smmu, idx); arm_smmu_write_sme(smmu, idx);
cfg->smendx[i] = INVALID_SMENDX; cfg->smendx[i] = INVALID_SMENDX;
...@@ -1197,7 +1195,7 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) ...@@ -1197,7 +1195,7 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg)
} }
static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_master_cfg *cfg) struct iommu_fwspec *fwspec)
{ {
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2cr *s2cr = smmu->s2crs; struct arm_smmu_s2cr *s2cr = smmu->s2crs;
...@@ -1214,7 +1212,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, ...@@ -1214,7 +1212,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
type = S2CR_TYPE_BYPASS; type = S2CR_TYPE_BYPASS;
for_each_cfg_sme(cfg, i, idx) { for_each_cfg_sme(fwspec, i, idx) {
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
continue; continue;
...@@ -1229,16 +1227,18 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, ...@@ -1229,16 +1227,18 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{ {
int ret; int ret;
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
if (!cfg) { if (!fwspec || fwspec->ops != &arm_smmu_ops) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
return -ENXIO; return -ENXIO;
} }
smmu = fwspec_smmu(fwspec);
/* Ensure that the domain is finalised */ /* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, cfg->smmu); ret = arm_smmu_init_domain_context(domain, smmu);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1246,15 +1246,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1246,15 +1246,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Sanity check the domain. We don't support domains across * Sanity check the domain. We don't support domains across
* different SMMUs. * different SMMUs.
*/ */
if (smmu_domain->smmu != cfg->smmu) { if (smmu_domain->smmu != smmu) {
dev_err(dev, dev_err(dev,
"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
dev_name(smmu_domain->smmu->dev), dev_name(cfg->smmu->dev)); dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
return -EINVAL; return -EINVAL;
} }
/* Looks ok, so add the device to the domain */ /* Looks ok, so add the device to the domain */
return arm_smmu_domain_add_master(smmu_domain, cfg); return arm_smmu_domain_add_master(smmu_domain, fwspec);
} }
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
...@@ -1375,57 +1375,72 @@ static bool arm_smmu_capable(enum iommu_cap cap) ...@@ -1375,57 +1375,72 @@ static bool arm_smmu_capable(enum iommu_cap cap)
static int arm_smmu_add_device(struct device *dev) static int arm_smmu_add_device(struct device *dev)
{ {
struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg; struct arm_smmu_master_cfg *cfg;
struct iommu_fwspec *fwspec;
int i, ret; int i, ret;
ret = arm_smmu_register_legacy_master(dev); ret = arm_smmu_register_legacy_master(dev, &smmu);
cfg = dev->archdata.iommu; fwspec = dev->iommu_fwspec;
if (ret) if (ret)
goto out_free; goto out_free;
ret = -EINVAL; ret = -EINVAL;
for (i = 0; i < cfg->num_streamids; i++) { for (i = 0; i < fwspec->num_ids; i++) {
u16 sid = cfg->streamids[i]; u16 sid = fwspec->ids[i];
if (sid & ~cfg->smmu->streamid_mask) { if (sid & ~smmu->streamid_mask) {
dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
sid, cfg->smmu->streamid_mask); sid, cfg->smmu->streamid_mask);
goto out_free; goto out_free;
} }
cfg->smendx[i] = INVALID_SMENDX;
} }
ret = -ENOMEM;
cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
GFP_KERNEL);
if (!cfg)
goto out_free;
cfg->smmu = smmu;
fwspec->iommu_priv = cfg;
while (i--)
cfg->smendx[i] = INVALID_SMENDX;
ret = arm_smmu_master_alloc_smes(dev); ret = arm_smmu_master_alloc_smes(dev);
if (!ret) if (ret)
return ret; goto out_free;
return 0;
out_free: out_free:
kfree(cfg); if (fwspec)
dev->archdata.iommu = NULL; kfree(fwspec->iommu_priv);
iommu_fwspec_free(dev);
return ret; return ret;
} }
static void arm_smmu_remove_device(struct device *dev) static void arm_smmu_remove_device(struct device *dev)
{ {
struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; struct iommu_fwspec *fwspec = dev->iommu_fwspec;
if (!cfg) if (!fwspec || fwspec->ops != &arm_smmu_ops)
return; return;
arm_smmu_master_free_smes(cfg); arm_smmu_master_free_smes(fwspec);
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
kfree(cfg); kfree(fwspec->iommu_priv);
dev->archdata.iommu = NULL; iommu_fwspec_free(dev);
} }
static struct iommu_group *arm_smmu_device_group(struct device *dev) static struct iommu_group *arm_smmu_device_group(struct device *dev)
{ {
struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct arm_smmu_device *smmu = cfg->smmu; struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
struct iommu_group *group = NULL; struct iommu_group *group = NULL;
int i, idx; int i, idx;
for_each_cfg_sme(cfg, i, idx) { for_each_cfg_sme(fwspec, i, idx) {
if (group && smmu->s2crs[idx].group && if (group && smmu->s2crs[idx].group &&
group != smmu->s2crs[idx].group) group != smmu->s2crs[idx].group)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -1936,6 +1951,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -1936,6 +1951,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
} }
} }
of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
platform_set_drvdata(pdev, smmu); platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu); arm_smmu_device_reset(smmu);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment