Commit 8a2421bd authored by Chris Wilson's avatar Chris Wilson

drm/i915: Wait upon userptr get-user-pages within execbuffer

This simply hides the EAGAIN caused by userptr when userspace causes
resource contention. However, it is quite beneficial with highly
contended userptr users as we avoid repeating the setup costs and
kernel-user context switches.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
parent 616d9cee
...@@ -579,6 +579,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv) ...@@ -579,6 +579,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini_hw(dev_priv); intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv); i915_gem_cleanup_engines(dev_priv);
i915_gem_context_fini(dev_priv); i915_gem_context_fini(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv); i915_gem_drain_freed_objects(dev_priv);
......
...@@ -1453,6 +1453,13 @@ struct i915_gem_mm { ...@@ -1453,6 +1453,13 @@ struct i915_gem_mm {
/** LRU list of objects with fence regs on them. */ /** LRU list of objects with fence regs on them. */
struct list_head fence_list; struct list_head fence_list;
/**
* Workqueue to fault in userptr pages, flushed by the execbuf
* when required but otherwise left to userspace to try again
* on EAGAIN.
*/
struct workqueue_struct *userptr_wq;
u64 unordered_timeline; u64 unordered_timeline;
/* the indicator for dispatch video commands on two BSD rings */ /* the indicator for dispatch video commands on two BSD rings */
...@@ -3228,7 +3235,8 @@ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, ...@@ -3228,7 +3235,8 @@ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_init_userptr(struct drm_i915_private *dev_priv); int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
......
...@@ -4804,7 +4804,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -4804,7 +4804,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/ */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
i915_gem_init_userptr(dev_priv); ret = i915_gem_init_userptr(dev_priv);
if (ret)
goto out_unlock;
ret = i915_gem_init_ggtt(dev_priv); ret = i915_gem_init_ggtt(dev_priv);
if (ret) if (ret)
......
...@@ -1499,6 +1499,9 @@ static noinline int eb_relocate_slow(struct i915_execbuffer *eb) ...@@ -1499,6 +1499,9 @@ static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
goto out; goto out;
} }
/* A frequent cause for EAGAIN are currently unavailable client pages */
flush_workqueue(eb->i915->mm.userptr_wq);
err = i915_mutex_lock_interruptible(dev); err = i915_mutex_lock_interruptible(dev);
if (err) { if (err) {
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
......
...@@ -378,7 +378,7 @@ __i915_mm_struct_free(struct kref *kref) ...@@ -378,7 +378,7 @@ __i915_mm_struct_free(struct kref *kref)
mutex_unlock(&mm->i915->mm_lock); mutex_unlock(&mm->i915->mm_lock);
INIT_WORK(&mm->work, __i915_mm_struct_free__worker); INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
schedule_work(&mm->work); queue_work(mm->i915->mm.userptr_wq, &mm->work);
} }
static void static void
...@@ -598,7 +598,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) ...@@ -598,7 +598,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
get_task_struct(work->task); get_task_struct(work->task);
INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
schedule_work(&work->work); queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
} }
...@@ -830,8 +830,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file ...@@ -830,8 +830,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return 0; return 0;
} }
void i915_gem_init_userptr(struct drm_i915_private *dev_priv) int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{ {
mutex_init(&dev_priv->mm_lock); mutex_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs); hash_init(dev_priv->mm_structs);
dev_priv->mm.userptr_wq =
alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
if (!dev_priv->mm.userptr_wq)
return -ENOMEM;
return 0;
}
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->mm.userptr_wq);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment