Commit 6951e589 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Move GEM object domain management from struct_mutex to local

Use the per-object local lock to control the cache domain of the
individual GEM objects, not struct_mutex. This is a huge leap forward
for us in terms of object-level synchronisation; execbuffers are
coordinated using the ww_mutex and pread/pwrite is finally fully
serialised again.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-10-chris@chris-wilson.co.uk
parent 37d63f8f
...@@ -93,6 +93,7 @@ gem-y += \ ...@@ -93,6 +93,7 @@ gem-y += \
gem/i915_gem_dmabuf.o \ gem/i915_gem_dmabuf.o \
gem/i915_gem_domain.o \ gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \ gem/i915_gem_execbuffer.o \
gem/i915_gem_fence.o \
gem/i915_gem_internal.o \ gem/i915_gem_internal.o \
gem/i915_gem_object.o \ gem/i915_gem_object.o \
gem/i915_gem_mman.o \ gem/i915_gem_mman.o \
......
...@@ -95,6 +95,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -95,6 +95,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
{ {
struct clflush *clflush; struct clflush *clflush;
assert_object_held(obj);
/* /*
* Stolen memory is always coherent with the GPU as it is explicitly * Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent. * marked as wc by the system, or the system is cache-coherent.
...@@ -144,9 +146,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -144,9 +146,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
true, I915_FENCE_TIMEOUT, true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP); I915_FENCE_GFP);
reservation_object_lock(obj->resv, NULL);
reservation_object_add_excl_fence(obj->resv, &clflush->dma); reservation_object_add_excl_fence(obj->resv, &clflush->dma);
reservation_object_unlock(obj->resv);
i915_sw_fence_commit(&clflush->wait); i915_sw_fence_commit(&clflush->wait);
} else if (obj->mm.pages) { } else if (obj->mm.pages) {
......
...@@ -151,7 +151,6 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * ...@@ -151,7 +151,6 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
int err; int err;
...@@ -159,12 +158,12 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire ...@@ -159,12 +158,12 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
if (err) if (err)
return err; return err;
err = i915_mutex_lock_interruptible(dev); err = i915_gem_object_lock_interruptible(obj);
if (err) if (err)
goto out; goto out;
err = i915_gem_object_set_to_cpu_domain(obj, write); err = i915_gem_object_set_to_cpu_domain(obj, write);
mutex_unlock(&dev->struct_mutex); i915_gem_object_unlock(obj);
out: out:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
...@@ -174,19 +173,18 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire ...@@ -174,19 +173,18 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
int err; int err;
err = i915_gem_object_pin_pages(obj); err = i915_gem_object_pin_pages(obj);
if (err) if (err)
return err; return err;
err = i915_mutex_lock_interruptible(dev); err = i915_gem_object_lock_interruptible(obj);
if (err) if (err)
goto out; goto out;
err = i915_gem_object_set_to_gtt_domain(obj, false); err = i915_gem_object_set_to_gtt_domain(obj, false);
mutex_unlock(&dev->struct_mutex); i915_gem_object_unlock(obj);
out: out:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
......
...@@ -29,9 +29,9 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) ...@@ -29,9 +29,9 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
if (!READ_ONCE(obj->pin_global)) if (!READ_ONCE(obj->pin_global))
return; return;
mutex_lock(&obj->base.dev->struct_mutex); i915_gem_object_lock(obj);
__i915_gem_object_flush_for_display(obj); __i915_gem_object_flush_for_display(obj);
mutex_unlock(&obj->base.dev->struct_mutex); i915_gem_object_unlock(obj);
} }
/** /**
...@@ -47,11 +47,10 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -47,11 +47,10 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
{ {
int ret; int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex); assert_object_held(obj);
ret = i915_gem_object_wait(obj, ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0), (write ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (ret) if (ret)
...@@ -109,11 +108,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -109,11 +108,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{ {
int ret; int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex); assert_object_held(obj);
ret = i915_gem_object_wait(obj, ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0), (write ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (ret) if (ret)
...@@ -179,7 +177,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -179,7 +177,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex); assert_object_held(obj);
if (obj->cache_level == cache_level) if (obj->cache_level == cache_level)
return 0; return 0;
...@@ -228,7 +226,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -228,7 +226,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
*/ */
ret = i915_gem_object_wait(obj, ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL, I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (ret) if (ret)
...@@ -372,12 +369,16 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, ...@@ -372,12 +369,16 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
goto out; goto out;
ret = i915_mutex_lock_interruptible(dev); ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret) if (ret)
goto out; goto out;
ret = i915_gem_object_set_cache_level(obj, level); ret = i915_gem_object_lock_interruptible(obj);
mutex_unlock(&dev->struct_mutex); if (ret == 0) {
ret = i915_gem_object_set_cache_level(obj, level);
i915_gem_object_unlock(obj);
}
mutex_unlock(&i915->drm.struct_mutex);
out: out:
i915_gem_object_put(obj); i915_gem_object_put(obj);
...@@ -399,7 +400,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -399,7 +400,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex); assert_object_held(obj);
/* Mark the global pin early so that we account for the /* Mark the global pin early so that we account for the
* display coherency whilst setting up the cache domains. * display coherency whilst setting up the cache domains.
...@@ -484,16 +485,18 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) ...@@ -484,16 +485,18 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
void void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); struct drm_i915_gem_object *obj = vma->obj;
assert_object_held(obj);
if (WARN_ON(vma->obj->pin_global == 0)) if (WARN_ON(obj->pin_global == 0))
return; return;
if (--vma->obj->pin_global == 0) if (--obj->pin_global == 0)
vma->display_alignment = I915_GTT_MIN_ALIGNMENT; vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */ /* Bump the LRU to try and avoid premature eviction whilst flipping */
i915_gem_object_bump_inactive_ggtt(vma->obj); i915_gem_object_bump_inactive_ggtt(obj);
i915_vma_unpin(vma); i915_vma_unpin(vma);
} }
...@@ -511,11 +514,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -511,11 +514,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{ {
int ret; int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex); assert_object_held(obj);
ret = i915_gem_object_wait(obj, ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0), (write ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (ret) if (ret)
...@@ -637,7 +639,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -637,7 +639,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (err) if (err)
goto out; goto out;
err = i915_mutex_lock_interruptible(dev); err = i915_gem_object_lock_interruptible(obj);
if (err) if (err)
goto out_unpin; goto out_unpin;
...@@ -651,7 +653,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -651,7 +653,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
/* And bump the LRU for this access */ /* And bump the LRU for this access */
i915_gem_object_bump_inactive_ggtt(obj); i915_gem_object_bump_inactive_ggtt(obj);
mutex_unlock(&dev->struct_mutex); i915_gem_object_unlock(obj);
if (write_domain != 0) if (write_domain != 0)
intel_fb_obj_invalidate(obj, intel_fb_obj_invalidate(obj,
...@@ -674,22 +676,23 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, ...@@ -674,22 +676,23 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
{ {
int ret; int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
*needs_clflush = 0; *needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj)) if (!i915_gem_object_has_struct_page(obj))
return -ENODEV; return -ENODEV;
ret = i915_gem_object_lock_interruptible(obj);
if (ret)
return ret;
ret = i915_gem_object_wait(obj, ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (ret) if (ret)
return ret; goto err_unlock;
ret = i915_gem_object_pin_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
return ret; goto err_unlock;
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
!static_cpu_has(X86_FEATURE_CLFLUSH)) { !static_cpu_has(X86_FEATURE_CLFLUSH)) {
...@@ -717,6 +720,8 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, ...@@ -717,6 +720,8 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
err_unpin: err_unpin:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
err_unlock:
i915_gem_object_unlock(obj);
return ret; return ret;
} }
...@@ -725,23 +730,24 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, ...@@ -725,23 +730,24 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
{ {
int ret; int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
*needs_clflush = 0; *needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj)) if (!i915_gem_object_has_struct_page(obj))
return -ENODEV; return -ENODEV;
ret = i915_gem_object_lock_interruptible(obj);
if (ret)
return ret;
ret = i915_gem_object_wait(obj, ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL, I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (ret) if (ret)
return ret; goto err_unlock;
ret = i915_gem_object_pin_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
return ret; goto err_unlock;
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
!static_cpu_has(X86_FEATURE_CLFLUSH)) { !static_cpu_has(X86_FEATURE_CLFLUSH)) {
...@@ -778,5 +784,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, ...@@ -778,5 +784,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
err_unpin: err_unpin:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
err_unlock:
i915_gem_object_unlock(obj);
return ret; return ret;
} }
...@@ -1075,7 +1075,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -1075,7 +1075,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (use_cpu_reloc(cache, obj)) if (use_cpu_reloc(cache, obj))
return NULL; return NULL;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true); err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
...@@ -1164,6 +1166,26 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) ...@@ -1164,6 +1166,26 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
*addr = value; *addr = value;
} }
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
int err;
i915_vma_lock(vma);
if (obj->cache_dirty & ~obj->cache_coherent)
i915_gem_clflush_object(obj, 0);
obj->write_domain = 0;
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
return err;
}
static int __reloc_gpu_alloc(struct i915_execbuffer *eb, static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
struct i915_vma *vma, struct i915_vma *vma,
unsigned int len) unsigned int len)
...@@ -1175,15 +1197,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1175,15 +1197,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
u32 *cmd; u32 *cmd;
int err; int err;
if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) {
obj = vma->obj;
if (obj->cache_dirty & ~obj->cache_coherent)
i915_gem_clflush_object(obj, 0);
obj->write_domain = 0;
}
GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
...@@ -1212,7 +1225,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1212,7 +1225,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_unpin; goto err_unpin;
} }
err = i915_request_await_object(rq, vma->obj, true); err = reloc_move_to_gpu(rq, vma);
if (err) if (err)
goto err_request; goto err_request;
...@@ -1220,14 +1233,12 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1220,14 +1233,12 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
batch->node.start, PAGE_SIZE, batch->node.start, PAGE_SIZE,
cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
if (err) if (err)
goto err_request; goto skip_request;
i915_vma_lock(batch);
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
err = i915_vma_move_to_active(batch, rq, 0); err = i915_vma_move_to_active(batch, rq, 0);
if (err) i915_vma_unlock(batch);
goto skip_request;
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err) if (err)
goto skip_request; goto skip_request;
...@@ -1837,24 +1848,59 @@ static int eb_relocate(struct i915_execbuffer *eb) ...@@ -1837,24 +1848,59 @@ static int eb_relocate(struct i915_execbuffer *eb)
static int eb_move_to_gpu(struct i915_execbuffer *eb) static int eb_move_to_gpu(struct i915_execbuffer *eb)
{ {
const unsigned int count = eb->buffer_count; const unsigned int count = eb->buffer_count;
struct ww_acquire_ctx acquire;
unsigned int i; unsigned int i;
int err; int err = 0;
ww_acquire_init(&acquire, &reservation_ww_class);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct i915_vma *vma = eb->vma[i];
err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
if (!err)
continue;
GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
if (err == -EDEADLK) {
GEM_BUG_ON(i == 0);
do {
int j = i - 1;
ww_mutex_unlock(&eb->vma[j]->resv->lock);
swap(eb->flags[i], eb->flags[j]);
swap(eb->vma[i], eb->vma[j]);
eb->vma[i]->exec_flags = &eb->flags[i];
} while (--i);
GEM_BUG_ON(vma != eb->vma[0]);
vma->exec_flags = &eb->flags[0];
err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
&acquire);
}
if (err)
break;
}
ww_acquire_done(&acquire);
while (i--) {
unsigned int flags = eb->flags[i]; unsigned int flags = eb->flags[i];
struct i915_vma *vma = eb->vma[i]; struct i915_vma *vma = eb->vma[i];
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
assert_vma_held(vma);
if (flags & EXEC_OBJECT_CAPTURE) { if (flags & EXEC_OBJECT_CAPTURE) {
struct i915_capture_list *capture; struct i915_capture_list *capture;
capture = kmalloc(sizeof(*capture), GFP_KERNEL); capture = kmalloc(sizeof(*capture), GFP_KERNEL);
if (unlikely(!capture)) if (capture) {
return -ENOMEM; capture->next = eb->request->capture_list;
capture->vma = vma;
capture->next = eb->request->capture_list; eb->request->capture_list = capture;
capture->vma = eb->vma[i]; }
eb->request->capture_list = capture;
} }
/* /*
...@@ -1874,24 +1920,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ...@@ -1874,24 +1920,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
flags &= ~EXEC_OBJECT_ASYNC; flags &= ~EXEC_OBJECT_ASYNC;
} }
if (flags & EXEC_OBJECT_ASYNC) if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
continue; err = i915_request_await_object
(eb->request, obj, flags & EXEC_OBJECT_WRITE);
err = i915_request_await_object }
(eb->request, obj, flags & EXEC_OBJECT_WRITE);
if (err)
return err;
}
for (i = 0; i < count; i++) { if (err == 0)
unsigned int flags = eb->flags[i]; err = i915_vma_move_to_active(vma, eb->request, flags);
struct i915_vma *vma = eb->vma[i];
err = i915_vma_move_to_active(vma, eb->request, flags); i915_vma_unlock(vma);
if (unlikely(err)) {
i915_request_skip(eb->request, err);
return err;
}
__eb_unreserve_vma(vma, flags); __eb_unreserve_vma(vma, flags);
vma->exec_flags = NULL; vma->exec_flags = NULL;
...@@ -1899,12 +1936,20 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ...@@ -1899,12 +1936,20 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
if (unlikely(flags & __EXEC_OBJECT_HAS_REF)) if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
i915_vma_put(vma); i915_vma_put(vma);
} }
ww_acquire_fini(&acquire);
if (unlikely(err))
goto err_skip;
eb->exec = NULL; eb->exec = NULL;
/* Unconditionally flush any chipset caches (for streaming writes). */ /* Unconditionally flush any chipset caches (for streaming writes). */
i915_gem_chipset_flush(eb->i915); i915_gem_chipset_flush(eb->i915);
return 0; return 0;
err_skip:
i915_request_skip(eb->request, err);
return err;
} }
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
......
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_gem_object.h"
struct stub_fence {
struct dma_fence dma;
struct i915_sw_fence chain;
};
static int __i915_sw_fence_call
stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct stub_fence *stub = container_of(fence, typeof(*stub), chain);
switch (state) {
case FENCE_COMPLETE:
dma_fence_signal(&stub->dma);
break;
case FENCE_FREE:
dma_fence_put(&stub->dma);
break;
}
return NOTIFY_DONE;
}
static const char *stub_driver_name(struct dma_fence *fence)
{
return DRIVER_NAME;
}
static const char *stub_timeline_name(struct dma_fence *fence)
{
return "object";
}
static void stub_release(struct dma_fence *fence)
{
struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
i915_sw_fence_fini(&stub->chain);
BUILD_BUG_ON(offsetof(typeof(*stub), dma));
dma_fence_free(&stub->dma);
}
static const struct dma_fence_ops stub_fence_ops = {
.get_driver_name = stub_driver_name,
.get_timeline_name = stub_timeline_name,
.release = stub_release,
};
struct dma_fence *
i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
{
struct stub_fence *stub;
assert_object_held(obj);
stub = kmalloc(sizeof(*stub), GFP_KERNEL);
if (!stub)
return NULL;
i915_sw_fence_init(&stub->chain, stub_notify);
dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
to_i915(obj->base.dev)->mm.unordered_timeline,
0);
if (i915_sw_fence_await_reservation(&stub->chain,
obj->resv, NULL,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP) < 0)
goto err;
reservation_object_add_excl_fence(obj->resv, &stub->dma);
return &stub->dma;
err:
stub_release(&stub->dma);
return NULL;
}
void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
struct dma_fence *fence)
{
struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
i915_sw_fence_commit(&stub->chain);
}
...@@ -378,6 +378,8 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, ...@@ -378,6 +378,8 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma; struct i915_vma *vma;
assert_object_held(obj);
if (!(obj->write_domain & flush_domains)) if (!(obj->write_domain & flush_domains))
return; return;
......
...@@ -99,16 +99,29 @@ i915_gem_object_put(struct drm_i915_gem_object *obj) ...@@ -99,16 +99,29 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
__drm_gem_object_put(&obj->base); __drm_gem_object_put(&obj->base);
} }
#define assert_object_held(obj) reservation_object_assert_held((obj)->resv)
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{ {
reservation_object_lock(obj->resv, NULL); reservation_object_lock(obj->resv, NULL);
} }
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
return reservation_object_lock_interruptible(obj->resv, NULL);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{ {
reservation_object_unlock(obj->resv); reservation_object_unlock(obj->resv);
} }
struct dma_fence *
i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
struct dma_fence *fence);
static inline void static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{ {
...@@ -372,6 +385,7 @@ static inline void ...@@ -372,6 +385,7 @@ static inline void
i915_gem_object_finish_access(struct drm_i915_gem_object *obj) i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
{ {
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
i915_gem_object_unlock(obj);
} }
static inline struct intel_engine_cs * static inline struct intel_engine_cs *
......
...@@ -187,12 +187,13 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) ...@@ -187,12 +187,13 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition. * machine in an unusable condition.
*/ */
mutex_lock(&i915->drm.struct_mutex);
for (phase = phases; *phase; phase++) { for (phase = phases; *phase; phase++) {
list_for_each_entry(obj, *phase, mm.link) list_for_each_entry(obj, *phase, mm.link) {
i915_gem_object_lock(obj);
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
i915_gem_object_unlock(obj);
}
} }
mutex_unlock(&i915->drm.struct_mutex);
intel_uc_sanitize(i915); intel_uc_sanitize(i915);
i915_gem_sanitize(i915); i915_gem_sanitize(i915);
......
...@@ -960,10 +960,6 @@ static int gpu_write(struct i915_vma *vma, ...@@ -960,10 +960,6 @@ static int gpu_write(struct i915_vma *vma,
GEM_BUG_ON(!intel_engine_can_store_dword(engine)); GEM_BUG_ON(!intel_engine_can_store_dword(engine));
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
if (err)
return err;
batch = gpu_write_dw(vma, dword * sizeof(u32), value); batch = gpu_write_dw(vma, dword * sizeof(u32), value);
if (IS_ERR(batch)) if (IS_ERR(batch))
return PTR_ERR(batch); return PTR_ERR(batch);
...@@ -974,13 +970,19 @@ static int gpu_write(struct i915_vma *vma, ...@@ -974,13 +970,19 @@ static int gpu_write(struct i915_vma *vma,
goto err_batch; goto err_batch;
} }
i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0); err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err) if (err)
goto err_request; goto err_request;
i915_gem_object_set_active_reference(batch->obj); i915_gem_object_set_active_reference(batch->obj);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_lock(vma);
err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto err_request; goto err_request;
......
...@@ -78,7 +78,9 @@ static int gtt_set(struct drm_i915_gem_object *obj, ...@@ -78,7 +78,9 @@ static int gtt_set(struct drm_i915_gem_object *obj,
u32 __iomem *map; u32 __iomem *map;
int err; int err;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true); err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) if (err)
return err; return err;
...@@ -105,7 +107,9 @@ static int gtt_get(struct drm_i915_gem_object *obj, ...@@ -105,7 +107,9 @@ static int gtt_get(struct drm_i915_gem_object *obj,
u32 __iomem *map; u32 __iomem *map;
int err; int err;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false); err = i915_gem_object_set_to_gtt_domain(obj, false);
i915_gem_object_unlock(obj);
if (err) if (err)
return err; return err;
...@@ -131,7 +135,9 @@ static int wc_set(struct drm_i915_gem_object *obj, ...@@ -131,7 +135,9 @@ static int wc_set(struct drm_i915_gem_object *obj,
u32 *map; u32 *map;
int err; int err;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_wc_domain(obj, true); err = i915_gem_object_set_to_wc_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) if (err)
return err; return err;
...@@ -152,7 +158,9 @@ static int wc_get(struct drm_i915_gem_object *obj, ...@@ -152,7 +158,9 @@ static int wc_get(struct drm_i915_gem_object *obj,
u32 *map; u32 *map;
int err; int err;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_wc_domain(obj, false); err = i915_gem_object_set_to_wc_domain(obj, false);
i915_gem_object_unlock(obj);
if (err) if (err)
return err; return err;
...@@ -176,7 +184,9 @@ static int gpu_set(struct drm_i915_gem_object *obj, ...@@ -176,7 +184,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
u32 *cs; u32 *cs;
int err; int err;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true); err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) if (err)
return err; return err;
...@@ -215,7 +225,9 @@ static int gpu_set(struct drm_i915_gem_object *obj, ...@@ -215,7 +225,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
} }
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_request_add(rq); i915_request_add(rq);
......
...@@ -209,7 +209,9 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) ...@@ -209,7 +209,9 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
i915_gem_object_flush_map(obj); i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false); err = i915_gem_object_set_to_gtt_domain(obj, false);
i915_gem_object_unlock(obj);
if (err) if (err)
goto err; goto err;
...@@ -261,7 +263,9 @@ static int gpu_fill(struct drm_i915_gem_object *obj, ...@@ -261,7 +263,9 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false); err = i915_gem_object_set_to_gtt_domain(obj, false);
i915_gem_object_unlock(obj);
if (err) if (err)
return err; return err;
...@@ -302,11 +306,15 @@ static int gpu_fill(struct drm_i915_gem_object *obj, ...@@ -302,11 +306,15 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (err) if (err)
goto err_request; goto err_request;
i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0); err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err) if (err)
goto skip_request; goto skip_request;
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
...@@ -754,7 +762,9 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -754,7 +762,9 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false); err = i915_gem_object_set_to_gtt_domain(obj, false);
i915_gem_object_unlock(obj);
if (err) if (err)
return err; return err;
...@@ -780,11 +790,15 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -780,11 +790,15 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err) if (err)
goto err_request; goto err_request;
i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0); err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err) if (err)
goto skip_request; goto skip_request;
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
...@@ -1345,7 +1359,9 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1345,7 +1359,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err) if (err)
goto err_request; goto err_request;
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, 0); err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
...@@ -1440,7 +1456,9 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1440,7 +1456,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err) if (err)
goto err_request; goto err_request;
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
...@@ -1449,7 +1467,9 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1449,7 +1467,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_request_add(rq); i915_request_add(rq);
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_cpu_domain(obj, false); err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err) if (err)
goto err; goto err;
......
...@@ -110,7 +110,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, ...@@ -110,7 +110,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
GEM_BUG_ON(view.partial.size > nreal); GEM_BUG_ON(view.partial.size > nreal);
cond_resched(); cond_resched();
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true); err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) { if (err) {
pr_err("Failed to flush to GTT write domain; err=%d\n", pr_err("Failed to flush to GTT write domain; err=%d\n",
err); err);
...@@ -142,7 +144,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, ...@@ -142,7 +144,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size) if (offset >= obj->base.size)
continue; continue;
i915_gem_object_lock(obj);
i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
i915_gem_object_unlock(obj);
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset); cpu = kmap(p) + offset_in_page(offset);
...@@ -344,7 +348,9 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) ...@@ -344,7 +348,9 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
return PTR_ERR(rq); return PTR_ERR(rq);
} }
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
i915_request_add(rq); i915_request_add(rq);
......
...@@ -46,9 +46,9 @@ static int mock_phys_object(void *arg) ...@@ -46,9 +46,9 @@ static int mock_phys_object(void *arg)
} }
/* Make the object dirty so that put_pages must do copy back the data */ /* Make the object dirty so that put_pages must do copy back the data */
mutex_lock(&i915->drm.struct_mutex); i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true); err = i915_gem_object_set_to_gtt_domain(obj, true);
mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_unlock(obj);
if (err) { if (err) {
pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n", pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
err); err);
......
...@@ -117,7 +117,9 @@ static int move_to_active(struct i915_vma *vma, ...@@ -117,7 +117,9 @@ static int move_to_active(struct i915_vma *vma,
{ {
int err; int err;
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags); err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
if (err) if (err)
return err; return err;
...@@ -1252,7 +1254,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, ...@@ -1252,7 +1254,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
} }
} }
i915_vma_lock(arg.vma);
err = i915_vma_move_to_active(arg.vma, rq, flags); err = i915_vma_move_to_active(arg.vma, rq, flags);
i915_vma_unlock(arg.vma);
if (flags & EXEC_OBJECT_NEEDS_FENCE) if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_vma_unpin_fence(arg.vma); i915_vma_unpin_fence(arg.vma);
......
...@@ -1108,11 +1108,13 @@ static int smoke_submit(struct preempt_smoke *smoke, ...@@ -1108,11 +1108,13 @@ static int smoke_submit(struct preempt_smoke *smoke,
} }
if (vma) { if (vma) {
i915_vma_lock(vma);
err = rq->engine->emit_bb_start(rq, err = rq->engine->emit_bb_start(rq,
vma->node.start, vma->node.start,
PAGE_SIZE, 0); PAGE_SIZE, 0);
if (!err) if (!err)
err = i915_vma_move_to_active(vma, rq, 0); err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
} }
i915_request_add(rq); i915_request_add(rq);
......
...@@ -118,7 +118,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) ...@@ -118,7 +118,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_pin; goto err_pin;
} }
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto err_req; goto err_req;
...@@ -195,8 +197,10 @@ static int check_whitelist(struct i915_gem_context *ctx, ...@@ -195,8 +197,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
return PTR_ERR(results); return PTR_ERR(results);
err = 0; err = 0;
i915_gem_object_lock(results);
igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */ igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false); err = i915_gem_object_set_to_cpu_domain(results, false);
i915_gem_object_unlock(results);
if (i915_terminally_wedged(ctx->i915)) if (i915_terminally_wedged(ctx->i915))
err = -EIO; err = -EIO;
if (err) if (err)
...@@ -367,7 +371,9 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx) ...@@ -367,7 +371,9 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (err) if (err)
goto err_obj; goto err_obj;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_wc_domain(obj, true); err = i915_gem_object_set_to_wc_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) if (err)
goto err_obj; goto err_obj;
......
...@@ -2844,7 +2844,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2844,7 +2844,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
goto put_obj; goto put_obj;
} }
i915_gem_object_lock(obj);
ret = i915_gem_object_set_to_cpu_domain(obj, false); ret = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (ret) { if (ret) {
gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n"); gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
goto unmap_src; goto unmap_src;
......
...@@ -509,18 +509,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -509,18 +509,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
} }
ret = i915_gem_object_set_to_gtt_domain(bb->obj, ret = i915_gem_object_set_to_gtt_domain(bb->obj,
false); false);
if (ret) if (ret)
goto err; goto err;
i915_gem_object_finish_access(bb->obj);
bb->accessing = false;
ret = i915_vma_move_to_active(bb->vma, ret = i915_vma_move_to_active(bb->vma,
workload->req, workload->req,
0); 0);
if (ret) if (ret)
goto err; goto err;
i915_gem_object_finish_access(bb->obj);
bb->accessing = false;
} }
} }
return 0; return 0;
......
...@@ -1058,19 +1058,20 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1058,19 +1058,20 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
void *dst, *src; void *dst, *src;
int ret; int ret;
ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush); ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
if (ret) {
dst = ERR_PTR(ret);
goto unpin_src;
}
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB); dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
i915_gem_object_finish_access(dst_obj);
if (IS_ERR(dst)) if (IS_ERR(dst))
goto unpin_dst; return dst;
ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
if (ret) {
i915_gem_object_unpin_map(dst_obj);
return ERR_PTR(ret);
}
src = ERR_PTR(-ENODEV); src = ERR_PTR(-ENODEV);
if (src_needs_clflush && if (src_needs_clflush &&
...@@ -1116,13 +1117,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1116,13 +1117,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
} }
} }
i915_gem_object_finish_access(src_obj);
/* dst_obj is returned with vmap pinned */ /* dst_obj is returned with vmap pinned */
*needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER; *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
unpin_dst:
i915_gem_object_finish_access(dst_obj);
unpin_src:
i915_gem_object_finish_access(src_obj);
return dst; return dst;
} }
......
...@@ -104,19 +104,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) ...@@ -104,19 +104,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{ {
struct i915_vma *vma; struct i915_vma *vma;
LIST_HEAD(still_in_list); LIST_HEAD(still_in_list);
int ret; int ret = 0;
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held(&obj->base.dev->struct_mutex);
/* Closed vma are removed from the obj->vma_list - but they may
* still have an active binding on the object. To remove those we
* must wait for all rendering to complete to the object (as unbinding
* must anyway), and retire the requests.
*/
ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret)
return ret;
spin_lock(&obj->vma.lock); spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma, struct i915_vma,
...@@ -139,29 +130,17 @@ i915_gem_object_wait_fence(struct dma_fence *fence, ...@@ -139,29 +130,17 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
unsigned int flags, unsigned int flags,
long timeout) long timeout)
{ {
struct i915_request *rq;
BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return timeout; return timeout;
if (!dma_fence_is_i915(fence)) if (dma_fence_is_i915(fence))
return dma_fence_wait_timeout(fence, return i915_request_wait(to_request(fence), flags, timeout);
flags & I915_WAIT_INTERRUPTIBLE,
timeout);
rq = to_request(fence); return dma_fence_wait_timeout(fence,
if (i915_request_completed(rq)) flags & I915_WAIT_INTERRUPTIBLE,
goto out; timeout);
timeout = i915_request_wait(rq, flags, timeout);
out:
if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
i915_request_retire_upto(rq);
return timeout;
} }
static long static long
...@@ -487,21 +466,22 @@ static int ...@@ -487,21 +466,22 @@ static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj, i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args) struct drm_i915_gem_pread *args)
{ {
char __user *user_data;
u64 remain;
unsigned int needs_clflush; unsigned int needs_clflush;
unsigned int idx, offset; unsigned int idx, offset;
struct dma_fence *fence;
char __user *user_data;
u64 remain;
int ret; int ret;
ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
if (ret)
return ret;
ret = i915_gem_object_prepare_read(obj, &needs_clflush); ret = i915_gem_object_prepare_read(obj, &needs_clflush);
mutex_unlock(&obj->base.dev->struct_mutex);
if (ret) if (ret)
return ret; return ret;
fence = i915_gem_object_lock_fence(obj);
i915_gem_object_finish_access(obj);
if (!fence)
return -ENOMEM;
remain = args->size; remain = args->size;
user_data = u64_to_user_ptr(args->data_ptr); user_data = u64_to_user_ptr(args->data_ptr);
offset = offset_in_page(args->offset); offset = offset_in_page(args->offset);
...@@ -519,7 +499,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, ...@@ -519,7 +499,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
offset = 0; offset = 0;
} }
i915_gem_object_finish_access(obj); i915_gem_object_unlock_fence(obj, fence);
return ret; return ret;
} }
...@@ -555,8 +535,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -555,8 +535,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &i915->ggtt; struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct drm_mm_node node; struct drm_mm_node node;
struct i915_vma *vma; struct dma_fence *fence;
void __user *user_data; void __user *user_data;
struct i915_vma *vma;
u64 remain, offset; u64 remain, offset;
int ret; int ret;
...@@ -585,11 +566,24 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -585,11 +566,24 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!node.allocated); GEM_BUG_ON(!node.allocated);
} }
ret = i915_gem_object_set_to_gtt_domain(obj, false); mutex_unlock(&i915->drm.struct_mutex);
ret = i915_gem_object_lock_interruptible(obj);
if (ret) if (ret)
goto out_unpin; goto out_unpin;
mutex_unlock(&i915->drm.struct_mutex); ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) {
i915_gem_object_unlock(obj);
goto out_unpin;
}
fence = i915_gem_object_lock_fence(obj);
i915_gem_object_unlock(obj);
if (!fence) {
ret = -ENOMEM;
goto out_unpin;
}
user_data = u64_to_user_ptr(args->data_ptr); user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size; remain = args->size;
...@@ -627,8 +621,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -627,8 +621,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
offset += page_length; offset += page_length;
} }
mutex_lock(&i915->drm.struct_mutex); i915_gem_object_unlock_fence(obj, fence);
out_unpin: out_unpin:
mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) { if (node.allocated) {
wmb(); wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
...@@ -739,6 +734,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -739,6 +734,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &i915->ggtt; struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct drm_mm_node node; struct drm_mm_node node;
struct dma_fence *fence;
struct i915_vma *vma; struct i915_vma *vma;
u64 remain, offset; u64 remain, offset;
void __user *user_data; void __user *user_data;
...@@ -786,11 +782,24 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -786,11 +782,24 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!node.allocated); GEM_BUG_ON(!node.allocated);
} }
ret = i915_gem_object_set_to_gtt_domain(obj, true); mutex_unlock(&i915->drm.struct_mutex);
ret = i915_gem_object_lock_interruptible(obj);
if (ret) if (ret)
goto out_unpin; goto out_unpin;
mutex_unlock(&i915->drm.struct_mutex); ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret) {
i915_gem_object_unlock(obj);
goto out_unpin;
}
fence = i915_gem_object_lock_fence(obj);
i915_gem_object_unlock(obj);
if (!fence) {
ret = -ENOMEM;
goto out_unpin;
}
intel_fb_obj_invalidate(obj, ORIGIN_CPU); intel_fb_obj_invalidate(obj, ORIGIN_CPU);
...@@ -835,8 +844,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -835,8 +844,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
} }
intel_fb_obj_flush(obj, ORIGIN_CPU); intel_fb_obj_flush(obj, ORIGIN_CPU);
mutex_lock(&i915->drm.struct_mutex); i915_gem_object_unlock_fence(obj, fence);
out_unpin: out_unpin:
mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) { if (node.allocated) {
wmb(); wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
...@@ -882,23 +892,23 @@ static int ...@@ -882,23 +892,23 @@ static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *args) const struct drm_i915_gem_pwrite *args)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
void __user *user_data;
u64 remain;
unsigned int partial_cacheline_write; unsigned int partial_cacheline_write;
unsigned int needs_clflush; unsigned int needs_clflush;
unsigned int offset, idx; unsigned int offset, idx;
struct dma_fence *fence;
void __user *user_data;
u64 remain;
int ret; int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
ret = i915_gem_object_prepare_write(obj, &needs_clflush); ret = i915_gem_object_prepare_write(obj, &needs_clflush);
mutex_unlock(&i915->drm.struct_mutex);
if (ret) if (ret)
return ret; return ret;
fence = i915_gem_object_lock_fence(obj);
i915_gem_object_finish_access(obj);
if (!fence)
return -ENOMEM;
/* If we don't overwrite a cacheline completely we need to be /* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't * careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch. * overcomplicate things and flush the entire patch.
...@@ -926,7 +936,8 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, ...@@ -926,7 +936,8 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
} }
intel_fb_obj_flush(obj, ORIGIN_CPU); intel_fb_obj_flush(obj, ORIGIN_CPU);
i915_gem_object_finish_access(obj); i915_gem_object_unlock_fence(obj, fence);
return ret; return ret;
} }
...@@ -1805,7 +1816,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) ...@@ -1805,7 +1816,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (err) if (err)
goto err_active; goto err_active;
i915_gem_object_lock(state->obj);
err = i915_gem_object_set_to_cpu_domain(state->obj, false); err = i915_gem_object_set_to_cpu_domain(state->obj, false);
i915_gem_object_unlock(state->obj);
if (err) if (err)
goto err_active; goto err_active;
...@@ -2256,12 +2269,13 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) ...@@ -2256,12 +2269,13 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND); i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
i915_gem_drain_freed_objects(i915); i915_gem_drain_freed_objects(i915);
mutex_lock(&i915->drm.struct_mutex);
for (phase = phases; *phase; phase++) { for (phase = phases; *phase; phase++) {
list_for_each_entry(obj, *phase, mm.link) list_for_each_entry(obj, *phase, mm.link) {
i915_gem_object_lock(obj);
WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true)); WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
i915_gem_object_unlock(obj);
}
} }
mutex_unlock(&i915->drm.struct_mutex);
return 0; return 0;
} }
......
...@@ -3578,8 +3578,11 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) ...@@ -3578,8 +3578,11 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
WARN_ON(i915_vma_bind(vma, WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0, obj ? obj->cache_level : 0,
PIN_UPDATE)); PIN_UPDATE));
if (obj) if (obj) {
i915_gem_object_lock(obj);
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
i915_gem_object_unlock(obj);
}
lock: lock:
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
......
...@@ -222,7 +222,9 @@ int i915_gem_render_state_emit(struct i915_request *rq) ...@@ -222,7 +222,9 @@ int i915_gem_render_state_emit(struct i915_request *rq)
goto err_unpin; goto err_unpin;
} }
i915_vma_lock(so.vma);
err = i915_vma_move_to_active(so.vma, rq, 0); err = i915_vma_move_to_active(so.vma, rq, 0);
i915_vma_unlock(so.vma);
err_unpin: err_unpin:
i915_vma_unpin(so.vma); i915_vma_unpin(so.vma);
err_vma: err_vma:
......
...@@ -840,13 +840,14 @@ void i915_vma_destroy(struct i915_vma *vma) ...@@ -840,13 +840,14 @@ void i915_vma_destroy(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(i915_vma_is_pinned(vma)); GEM_BUG_ON(i915_vma_is_pinned(vma));
if (i915_vma_is_closed(vma)) if (i915_vma_is_closed(vma))
list_del(&vma->closed_link); list_del(&vma->closed_link);
WARN_ON(i915_vma_unbind(vma)); WARN_ON(i915_vma_unbind(vma));
GEM_BUG_ON(i915_vma_is_active(vma));
__i915_vma_destroy(vma); __i915_vma_destroy(vma);
} }
...@@ -908,12 +909,10 @@ static void export_fence(struct i915_vma *vma, ...@@ -908,12 +909,10 @@ static void export_fence(struct i915_vma *vma,
* handle an error right now. Worst case should be missed * handle an error right now. Worst case should be missed
* synchronisation leading to rendering corruption. * synchronisation leading to rendering corruption.
*/ */
reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE) if (flags & EXEC_OBJECT_WRITE)
reservation_object_add_excl_fence(resv, &rq->fence); reservation_object_add_excl_fence(resv, &rq->fence);
else if (reservation_object_reserve_shared(resv, 1) == 0) else if (reservation_object_reserve_shared(resv, 1) == 0)
reservation_object_add_shared_fence(resv, &rq->fence); reservation_object_add_shared_fence(resv, &rq->fence);
reservation_object_unlock(resv);
} }
int i915_vma_move_to_active(struct i915_vma *vma, int i915_vma_move_to_active(struct i915_vma *vma,
...@@ -922,7 +921,8 @@ int i915_vma_move_to_active(struct i915_vma *vma, ...@@ -922,7 +921,8 @@ int i915_vma_move_to_active(struct i915_vma *vma,
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
lockdep_assert_held(&rq->i915->drm.struct_mutex); assert_vma_held(vma);
assert_object_held(obj);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
/* /*
......
...@@ -298,6 +298,18 @@ void i915_vma_close(struct i915_vma *vma); ...@@ -298,6 +298,18 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma); void i915_vma_reopen(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma); void i915_vma_destroy(struct i915_vma *vma);
#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
static inline void i915_vma_lock(struct i915_vma *vma)
{
reservation_object_lock(vma->resv, NULL);
}
static inline void i915_vma_unlock(struct i915_vma *vma)
{
reservation_object_unlock(vma->resv);
}
int __i915_vma_do_pin(struct i915_vma *vma, int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags); u64 size, u64 alignment, u64 flags);
static inline int __must_check static inline int __must_check
......
...@@ -2113,6 +2113,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, ...@@ -2113,6 +2113,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* pin/unpin/fence and not more. * pin/unpin/fence and not more.
*/ */
wakeref = intel_runtime_pm_get(dev_priv); wakeref = intel_runtime_pm_get(dev_priv);
i915_gem_object_lock(obj);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin); atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
...@@ -2167,6 +2168,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, ...@@ -2167,6 +2168,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
err: err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin); atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
i915_gem_object_unlock(obj);
intel_runtime_pm_put(dev_priv, wakeref); intel_runtime_pm_put(dev_priv, wakeref);
return vma; return vma;
} }
...@@ -2175,9 +2177,12 @@ void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) ...@@ -2175,9 +2177,12 @@ void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{ {
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
i915_gem_object_lock(vma->obj);
if (flags & PLANE_HAS_FENCE) if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma); i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma); i915_gem_object_unpin_from_display_plane(vma);
i915_gem_object_unlock(vma->obj);
i915_vma_put(vma); i915_vma_put(vma);
} }
......
...@@ -343,8 +343,6 @@ static void capture_logs_work(struct work_struct *work) ...@@ -343,8 +343,6 @@ static void capture_logs_work(struct work_struct *work)
static int guc_log_map(struct intel_guc_log *log) static int guc_log_map(struct intel_guc_log *log)
{ {
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr; void *vaddr;
int ret; int ret;
...@@ -353,9 +351,9 @@ static int guc_log_map(struct intel_guc_log *log) ...@@ -353,9 +351,9 @@ static int guc_log_map(struct intel_guc_log *log)
if (!log->vma) if (!log->vma)
return -ENODEV; return -ENODEV;
mutex_lock(&dev_priv->drm.struct_mutex); i915_gem_object_lock(log->vma->obj);
ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true); ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
mutex_unlock(&dev_priv->drm.struct_mutex); i915_gem_object_unlock(log->vma->obj);
if (ret) if (ret)
return ret; return ret;
......
...@@ -765,8 +765,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -765,8 +765,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin); atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
i915_gem_object_lock(new_bo);
vma = i915_gem_object_pin_to_display_plane(new_bo, vma = i915_gem_object_pin_to_display_plane(new_bo,
0, NULL, PIN_MAPPABLE); 0, NULL, PIN_MAPPABLE);
i915_gem_object_unlock(new_bo);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto out_pin_section; goto out_pin_section;
...@@ -1305,15 +1307,20 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, ...@@ -1305,15 +1307,20 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
static int get_registers(struct intel_overlay *overlay, bool use_phys) static int get_registers(struct intel_overlay *overlay, bool use_phys)
{ {
struct drm_i915_private *i915 = overlay->i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE); mutex_lock(&i915->drm.struct_mutex);
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
if (obj == NULL) if (obj == NULL)
obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE); obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj)) {
return PTR_ERR(obj); err = PTR_ERR(obj);
goto err_unlock;
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
...@@ -1334,10 +1341,13 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys) ...@@ -1334,10 +1341,13 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
} }
overlay->reg_bo = obj; overlay->reg_bo = obj;
mutex_unlock(&i915->drm.struct_mutex);
return 0; return 0;
err_put_bo: err_put_bo:
i915_gem_object_put(obj); i915_gem_object_put(obj);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err; return err;
} }
...@@ -1363,18 +1373,16 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) ...@@ -1363,18 +1373,16 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
INIT_ACTIVE_REQUEST(&overlay->last_flip); INIT_ACTIVE_REQUEST(&overlay->last_flip);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv)); ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
if (ret) if (ret)
goto out_free; goto out_free;
i915_gem_object_lock(overlay->reg_bo);
ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true); ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
i915_gem_object_unlock(overlay->reg_bo);
if (ret) if (ret)
goto out_reg_bo; goto out_reg_bo;
mutex_unlock(&dev_priv->drm.struct_mutex);
memset_io(overlay->regs, 0, sizeof(struct overlay_registers)); memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(overlay->regs); update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs); update_reg_attrs(overlay, overlay->regs);
...@@ -1386,7 +1394,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) ...@@ -1386,7 +1394,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
out_reg_bo: out_reg_bo:
i915_gem_object_put(overlay->reg_bo); i915_gem_object_put(overlay->reg_bo);
out_free: out_free:
mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(overlay); kfree(overlay);
} }
......
...@@ -246,15 +246,13 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, ...@@ -246,15 +246,13 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
intel_uc_fw_type_repr(uc_fw->type), intel_uc_fw_type_repr(uc_fw->type),
intel_uc_fw_status_repr(uc_fw->load_status)); intel_uc_fw_status_repr(uc_fw->load_status));
intel_uc_fw_ggtt_bind(uc_fw);
/* Call custom loader */ /* Call custom loader */
intel_uc_fw_ggtt_bind(uc_fw);
err = xfer(uc_fw); err = xfer(uc_fw);
intel_uc_fw_ggtt_unbind(uc_fw);
if (err) if (err)
goto fail; goto fail;
intel_uc_fw_ggtt_unbind(uc_fw);
uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS; uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("%s fw load %s\n", DRM_DEBUG_DRIVER("%s fw load %s\n",
intel_uc_fw_type_repr(uc_fw->type), intel_uc_fw_type_repr(uc_fw->type),
......
...@@ -874,7 +874,9 @@ static int live_all_engines(void *arg) ...@@ -874,7 +874,9 @@ static int live_all_engines(void *arg)
i915_gem_object_set_active_reference(batch->obj); i915_gem_object_set_active_reference(batch->obj);
} }
i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, request[id], 0); err = i915_vma_move_to_active(batch, request[id], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err); GEM_BUG_ON(err);
i915_request_get(request[id]); i915_request_get(request[id]);
...@@ -989,7 +991,9 @@ static int live_sequential_engines(void *arg) ...@@ -989,7 +991,9 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(err); GEM_BUG_ON(err);
request[id]->batch = batch; request[id]->batch = batch;
i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, request[id], 0); err = i915_vma_move_to_active(batch, request[id], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err); GEM_BUG_ON(err);
i915_gem_object_set_active_reference(batch->obj); i915_gem_object_set_active_reference(batch->obj);
......
...@@ -886,7 +886,9 @@ static int igt_vma_remapped_gtt(void *arg) ...@@ -886,7 +886,9 @@ static int igt_vma_remapped_gtt(void *arg)
unsigned int x, y; unsigned int x, y;
int err; int err;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true); err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) if (err)
goto out; goto out;
......
...@@ -76,7 +76,9 @@ static int move_to_active(struct i915_vma *vma, ...@@ -76,7 +76,9 @@ static int move_to_active(struct i915_vma *vma,
{ {
int err; int err;
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags); err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
if (err) if (err)
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment