Commit a5f74ec7 authored by Souptick Joarder's avatar Souptick Joarder Committed by Rob Clark

gpu: drm: msm: Change return type to vm_fault_t

Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

Ref- commit 1c8f4220 ("mm: change return type to vm_fault_t")

Previously vm_insert_mixed() returns err which driver
mapped into VM_FAULT_* type. The new function
vmf_insert_mixed() will replace this inefficiency by
returning VM_FAULT_* type.

vmf_error() is the newly introduce inline function
in 4.17-rc6.
Signed-off-by: default avatarSouptick Joarder <jrdr.linux@gmail.com>
Reviewed-by: default avatarMatthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 3e91a8b5
......@@ -263,7 +263,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_fault *vmf);
vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
......
......@@ -219,7 +219,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
int msm_gem_fault(struct vm_fault *vmf)
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
......@@ -227,15 +227,18 @@ int msm_gem_fault(struct vm_fault *vmf)
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
int ret;
int err;
vm_fault_t ret;
/*
* vm_ops.open/drm_gem_mmap_obj and close get and put
* a reference on obj. So, we dont need to hold one here.
*/
ret = mutex_lock_interruptible(&msm_obj->lock);
if (ret)
err = mutex_lock_interruptible(&msm_obj->lock);
if (err) {
ret = VM_FAULT_NOPAGE;
goto out;
}
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
......@@ -245,7 +248,7 @@ int msm_gem_fault(struct vm_fault *vmf)
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
ret = vmf_error(PTR_ERR(pages));
goto out_unlock;
}
......@@ -257,27 +260,11 @@ int msm_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
mutex_unlock(&msm_obj->lock);
out:
switch (ret) {
case -EAGAIN:
case 0:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
/*
* EBUSY is ok: this just means that another thread
* already did the job.
*/
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
}
return ret;
}
/** get mmap offset */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment