Commit 0095bf83 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu: Improve iopf_queue_remove_device()

Convert iopf_queue_remove_device() to return void instead of an error code,
as the return value is never used. This removal helper is designed to be
never-failed, so there's no need for error handling.

Ack all outstanding page requests from the device with the response code of
IOMMU_PAGE_RESP_INVALID, indicating device should not attempt any retry.

Add comments to this helper explaining the steps involved in removing a
device from the iopf queue and disabling its PRI. The individual drivers
are expected to be adjusted accordingly. Here we just define the expected
behaviors of the individual iommu driver from the core's perspective.
Suggested-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Tested-by: default avatarYan Zhao <yan.y.zhao@intel.com>
Link: https://lore.kernel.org/r/20240212012227.119381-14-baolu.lu@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent a74c077b
...@@ -4455,12 +4455,7 @@ static int intel_iommu_disable_iopf(struct device *dev) ...@@ -4455,12 +4455,7 @@ static int intel_iommu_disable_iopf(struct device *dev)
*/ */
pci_disable_pri(to_pci_dev(dev)); pci_disable_pri(to_pci_dev(dev));
info->pri_enabled = 0; info->pri_enabled = 0;
iopf_queue_remove_device(iommu->iopf_queue, dev);
/*
* With PRI disabled and outstanding PRQs drained, removing device
* from iopf queue should never fail.
*/
WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev));
return 0; return 0;
} }
......
...@@ -448,41 +448,60 @@ EXPORT_SYMBOL_GPL(iopf_queue_add_device); ...@@ -448,41 +448,60 @@ EXPORT_SYMBOL_GPL(iopf_queue_add_device);
* @queue: IOPF queue * @queue: IOPF queue
* @dev: device to remove * @dev: device to remove
* *
* Caller makes sure that no more faults are reported for this device. * Removing a device from an iopf_queue. It's recommended to follow these
* steps when removing a device:
* *
* Return: 0 on success and <0 on error. * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware
* and flush any hardware page request queues. This should be done before
* calling into this helper.
* - Acknowledge all outstanding PRQs to the device: Respond to all outstanding
* page requests with IOMMU_PAGE_RESP_INVALID, indicating the device should
* not retry. This helper function handles this.
* - Disable PRI on the device: After calling this helper, the caller could
* then disable PRI on the device.
*
* Calling iopf_queue_remove_device() essentially disassociates the device.
* The fault_param might still exist, but iommu_page_response() will do
* nothing. The device fault parameter reference count has been properly
* passed from iommu_report_device_fault() to the fault handling work, and
* will eventually be released after iommu_page_response().
*/ */
int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
{ {
int ret = 0;
struct iopf_fault *iopf, *next; struct iopf_fault *iopf, *next;
struct iommu_page_response resp;
struct dev_iommu *param = dev->iommu; struct dev_iommu *param = dev->iommu;
struct iommu_fault_param *fault_param; struct iommu_fault_param *fault_param;
const struct iommu_ops *ops = dev_iommu_ops(dev);
mutex_lock(&queue->lock); mutex_lock(&queue->lock);
mutex_lock(&param->lock); mutex_lock(&param->lock);
fault_param = rcu_dereference_check(param->fault_param, fault_param = rcu_dereference_check(param->fault_param,
lockdep_is_held(&param->lock)); lockdep_is_held(&param->lock));
if (!fault_param) {
ret = -ENODEV;
goto unlock;
}
if (fault_param->queue != queue) { if (WARN_ON(!fault_param || fault_param->queue != queue))
ret = -EINVAL;
goto unlock; goto unlock;
}
if (!list_empty(&fault_param->faults)) { mutex_lock(&fault_param->lock);
ret = -EBUSY; list_for_each_entry_safe(iopf, next, &fault_param->partial, list)
goto unlock; kfree(iopf);
}
list_del(&fault_param->queue_list); list_for_each_entry_safe(iopf, next, &fault_param->faults, list) {
memset(&resp, 0, sizeof(struct iommu_page_response));
resp.pasid = iopf->fault.prm.pasid;
resp.grpid = iopf->fault.prm.grpid;
resp.code = IOMMU_PAGE_RESP_INVALID;
/* Just in case some faults are still stuck */ if (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID)
list_for_each_entry_safe(iopf, next, &fault_param->partial, list) resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
ops->page_response(dev, iopf, &resp);
list_del(&iopf->list);
kfree(iopf); kfree(iopf);
}
mutex_unlock(&fault_param->lock);
list_del(&fault_param->queue_list);
/* dec the ref owned by iopf_queue_add_device() */ /* dec the ref owned by iopf_queue_add_device() */
rcu_assign_pointer(param->fault_param, NULL); rcu_assign_pointer(param->fault_param, NULL);
...@@ -490,8 +509,6 @@ int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) ...@@ -490,8 +509,6 @@ int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
unlock: unlock:
mutex_unlock(&param->lock); mutex_unlock(&param->lock);
mutex_unlock(&queue->lock); mutex_unlock(&queue->lock);
return ret;
} }
EXPORT_SYMBOL_GPL(iopf_queue_remove_device); EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
......
...@@ -1542,7 +1542,7 @@ iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) ...@@ -1542,7 +1542,7 @@ iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
#ifdef CONFIG_IOMMU_IOPF #ifdef CONFIG_IOMMU_IOPF
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev); int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev); void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
int iopf_queue_flush_dev(struct device *dev); int iopf_queue_flush_dev(struct device *dev);
struct iopf_queue *iopf_queue_alloc(const char *name); struct iopf_queue *iopf_queue_alloc(const char *name);
void iopf_queue_free(struct iopf_queue *queue); void iopf_queue_free(struct iopf_queue *queue);
...@@ -1558,10 +1558,9 @@ iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) ...@@ -1558,10 +1558,9 @@ iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
return -ENODEV; return -ENODEV;
} }
static inline int static inline void
iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
{ {
return -ENODEV;
} }
static inline int iopf_queue_flush_dev(struct device *dev) static inline int iopf_queue_flush_dev(struct device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment