Commit 41bb23e7 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu: Remove unused argument in is_attach_deferred

The is_attach_deferred iommu_ops callback is a device op. The domain
argument is unnecessary and never used. Remove it to make code clean.
Suggested-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20220216025249.3459465-9-baolu.lu@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 3f6634d9
...@@ -116,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain) ...@@ -116,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
extern bool translation_pre_enabled(struct amd_iommu *iommu); extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, extern bool amd_iommu_is_attach_deferred(struct device *dev);
struct device *dev);
extern int __init add_special_device(u8 type, u8 id, u16 *devid, extern int __init add_special_device(u8 type, u8 id, u16 *devid,
bool cmd_line); bool cmd_line);
......
...@@ -2215,8 +2215,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, ...@@ -2215,8 +2215,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, head); list_add_tail(&region->list, head);
} }
bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, bool amd_iommu_is_attach_deferred(struct device *dev)
struct device *dev)
{ {
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
......
...@@ -537,7 +537,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) ...@@ -537,7 +537,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
ret = NOTIFY_DONE; ret = NOTIFY_DONE;
/* In kdump kernel pci dev is not initialized yet -> send INVALID */ /* In kdump kernel pci dev is not initialized yet -> send INVALID */
if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) { if (amd_iommu_is_attach_deferred(&pdev->dev)) {
amd_iommu_complete_ppr(pdev, iommu_fault->pasid, amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
PPR_INVALID, tag); PPR_INVALID, tag);
goto out; goto out;
......
...@@ -5052,8 +5052,7 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) ...@@ -5052,8 +5052,7 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
} }
} }
static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, static bool intel_iommu_is_attach_deferred(struct device *dev)
struct device *dev)
{ {
return attach_deferred(dev); return attach_deferred(dev);
} }
......
...@@ -831,13 +831,12 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, ...@@ -831,13 +831,12 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
return ret; return ret;
} }
static bool iommu_is_attach_deferred(struct iommu_domain *domain, static bool iommu_is_attach_deferred(struct device *dev)
struct device *dev)
{ {
const struct iommu_ops *ops = dev_iommu_ops(dev); const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->is_attach_deferred) if (ops->is_attach_deferred)
return ops->is_attach_deferred(domain, dev); return ops->is_attach_deferred(dev);
return false; return false;
} }
...@@ -894,7 +893,7 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) ...@@ -894,7 +893,7 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
mutex_lock(&group->mutex); mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices); list_add_tail(&device->list, &group->devices);
if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) if (group->domain && !iommu_is_attach_deferred(dev))
ret = __iommu_attach_device(group->domain, dev); ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex); mutex_unlock(&group->mutex);
if (ret) if (ret)
...@@ -1745,7 +1744,7 @@ static int iommu_group_do_dma_attach(struct device *dev, void *data) ...@@ -1745,7 +1744,7 @@ static int iommu_group_do_dma_attach(struct device *dev, void *data)
struct iommu_domain *domain = data; struct iommu_domain *domain = data;
int ret = 0; int ret = 0;
if (!iommu_is_attach_deferred(domain, dev)) if (!iommu_is_attach_deferred(dev))
ret = __iommu_attach_device(domain, dev); ret = __iommu_attach_device(domain, dev);
return ret; return ret;
...@@ -2020,9 +2019,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device); ...@@ -2020,9 +2019,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{ {
const struct iommu_ops *ops = dev_iommu_ops(dev); if (iommu_is_attach_deferred(dev))
if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
return __iommu_attach_device(domain, dev); return __iommu_attach_device(domain, dev);
return 0; return 0;
...@@ -2031,7 +2028,7 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) ...@@ -2031,7 +2028,7 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
static void __iommu_detach_device(struct iommu_domain *domain, static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
if (iommu_is_attach_deferred(domain, dev)) if (iommu_is_attach_deferred(dev))
return; return;
if (unlikely(domain->ops->detach_dev == NULL)) if (unlikely(domain->ops->detach_dev == NULL))
......
...@@ -269,7 +269,7 @@ struct iommu_ops { ...@@ -269,7 +269,7 @@ struct iommu_ops {
void (*put_resv_regions)(struct device *dev, struct list_head *list); void (*put_resv_regions)(struct device *dev, struct list_head *list);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args); int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); bool (*is_attach_deferred)(struct device *dev);
/* Per device IOMMU features */ /* Per device IOMMU features */
bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment