Commit 4bb4211e authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu: Per-domain I/O page fault handling

Tweak the I/O page fault handling framework to route the page faults to
the domain and call the page fault handler retrieved from the domain.
This makes the I/O page fault handling framework possible to serve more
usage scenarios as long as they have an IOMMU domain and install a page
fault handler in it. Some unused functions are also removed to avoid
dead code.

The iommu_get_domain_for_dev_pasid() which retrieves attached domain
for a {device, PASID} pair is used. It will be used by the page fault
handling framework which knows {device, PASID} reported from the iommu
driver. We have a guarantee that the SVA domain doesn't go away during
IOPF handling, because unbind() won't free the domain until all the
pending page requests have been flushed from the pipeline. The drivers
either call iopf_queue_flush_dev() explicitly, or in stall case, the
device driver is required to flush all DMAs including stalled
transactions before calling unbind().

This also renames iopf_handle_group() to iopf_handler() to avoid
confusing.
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarJean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Tested-by: default avatarZhangfei Gao <zhangfei.gao@linaro.org>
Tested-by: default avatarTony Zhu <tony.zhu@intel.com>
Link: https://lore.kernel.org/r/20221031005917.45690-13-baolu.lu@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 8cc93159
...@@ -69,69 +69,18 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf, ...@@ -69,69 +69,18 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
return iommu_page_response(dev, &resp); return iommu_page_response(dev, &resp);
} }
static enum iommu_page_response_code static void iopf_handler(struct work_struct *work)
iopf_handle_single(struct iopf_fault *iopf)
{
vm_fault_t ret;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned int access_flags = 0;
unsigned int fault_flags = FAULT_FLAG_REMOTE;
struct iommu_fault_page_request *prm = &iopf->fault.prm;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
return status;
mm = iommu_sva_find(prm->pasid);
if (IS_ERR_OR_NULL(mm))
return status;
mmap_read_lock(mm);
vma = find_extend_vma(mm, prm->addr);
if (!vma)
/* Unmapped area */
goto out_put_mm;
if (prm->perm & IOMMU_FAULT_PERM_READ)
access_flags |= VM_READ;
if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
access_flags |= VM_WRITE;
fault_flags |= FAULT_FLAG_WRITE;
}
if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
access_flags |= VM_EXEC;
fault_flags |= FAULT_FLAG_INSTRUCTION;
}
if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
fault_flags |= FAULT_FLAG_USER;
if (access_flags & ~vma->vm_flags)
/* Access fault */
goto out_put_mm;
ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
IOMMU_PAGE_RESP_SUCCESS;
out_put_mm:
mmap_read_unlock(mm);
mmput(mm);
return status;
}
static void iopf_handle_group(struct work_struct *work)
{ {
struct iopf_group *group; struct iopf_group *group;
struct iommu_domain *domain;
struct iopf_fault *iopf, *next; struct iopf_fault *iopf, *next;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
group = container_of(work, struct iopf_group, work); group = container_of(work, struct iopf_group, work);
domain = iommu_get_domain_for_dev_pasid(group->dev,
group->last_fault.fault.prm.pasid, 0);
if (!domain || !domain->iopf_handler)
status = IOMMU_PAGE_RESP_INVALID;
list_for_each_entry_safe(iopf, next, &group->faults, list) { list_for_each_entry_safe(iopf, next, &group->faults, list) {
/* /*
...@@ -139,7 +88,8 @@ static void iopf_handle_group(struct work_struct *work) ...@@ -139,7 +88,8 @@ static void iopf_handle_group(struct work_struct *work)
* faults in the group if there is an error. * faults in the group if there is an error.
*/ */
if (status == IOMMU_PAGE_RESP_SUCCESS) if (status == IOMMU_PAGE_RESP_SUCCESS)
status = iopf_handle_single(iopf); status = domain->iopf_handler(&iopf->fault,
domain->fault_data);
if (!(iopf->fault.prm.flags & if (!(iopf->fault.prm.flags &
IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
...@@ -242,7 +192,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) ...@@ -242,7 +192,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
group->last_fault.fault = *fault; group->last_fault.fault = *fault;
INIT_LIST_HEAD(&group->faults); INIT_LIST_HEAD(&group->faults);
list_add(&group->last_fault.list, &group->faults); list_add(&group->last_fault.list, &group->faults);
INIT_WORK(&group->work, iopf_handle_group); INIT_WORK(&group->work, iopf_handler);
/* See if we have partial faults for this group */ /* See if we have partial faults for this group */
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment