Commit d4c3022a authored by Eric Anholt's avatar Eric Anholt

drm/v3d: Switch the type of job-> to reduce casting.

All consumers wanted drm_gem_object * now.
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20190416225856.20264-2-eric@anholt.netAcked-by: default avatarRob Clark <robdclark@gmail.com>
parent ad408c76
...@@ -189,7 +189,7 @@ struct v3d_exec_info { ...@@ -189,7 +189,7 @@ struct v3d_exec_info {
struct kref refcount; struct kref refcount;
/* This is the array of BOs that were looked up at the start of exec. */ /* This is the array of BOs that were looked up at the start of exec. */
struct v3d_bo **bo; struct drm_gem_object **bo;
u32 bo_count; u32 bo_count;
/* List of overflow BOs used in the job that need to be /* List of overflow BOs used in the job that need to be
...@@ -217,7 +217,7 @@ struct v3d_tfu_job { ...@@ -217,7 +217,7 @@ struct v3d_tfu_job {
struct kref refcount; struct kref refcount;
/* This is the array of BOs that were looked up at the start of exec. */ /* This is the array of BOs that were looked up at the start of exec. */
struct v3d_bo *bo[4]; struct drm_gem_object *bo[4];
}; };
/** /**
......
...@@ -194,27 +194,17 @@ v3d_invalidate_caches(struct v3d_dev *v3d) ...@@ -194,27 +194,17 @@ v3d_invalidate_caches(struct v3d_dev *v3d)
} }
static void static void
v3d_attach_object_fences(struct v3d_bo **bos, int bo_count, v3d_attach_object_fences(struct drm_gem_object **bos, int bo_count,
struct dma_fence *fence) struct dma_fence *fence)
{ {
int i; int i;
for (i = 0; i < bo_count; i++) { for (i = 0; i < bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */ /* XXX: Use shared fences for read-only objects. */
reservation_object_add_excl_fence(bos[i]->base.base.resv, reservation_object_add_excl_fence(bos[i]->resv, fence);
fence);
} }
} }
static void
v3d_unlock_bo_reservations(struct v3d_bo **bos,
int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count,
acquire_ctx);
}
/* Takes the reservation lock on all the BOs being referenced, so that /* Takes the reservation lock on all the BOs being referenced, so that
* at queue submit time we can update the reservations. * at queue submit time we can update the reservations.
* *
...@@ -223,14 +213,13 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos, ...@@ -223,14 +213,13 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos,
* to v3d, so we don't attach dma-buf fences to them. * to v3d, so we don't attach dma-buf fences to them.
*/ */
static int static int
v3d_lock_bo_reservations(struct v3d_bo **bos, v3d_lock_bo_reservations(struct drm_gem_object **bos,
int bo_count, int bo_count,
struct ww_acquire_ctx *acquire_ctx) struct ww_acquire_ctx *acquire_ctx)
{ {
int i, ret; int i, ret;
ret = drm_gem_lock_reservations((struct drm_gem_object **)bos, ret = drm_gem_lock_reservations(bos, bo_count, acquire_ctx);
bo_count, acquire_ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -238,11 +227,10 @@ v3d_lock_bo_reservations(struct v3d_bo **bos, ...@@ -238,11 +227,10 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
* before we commit the CL to the hardware. * before we commit the CL to the hardware.
*/ */
for (i = 0; i < bo_count; i++) { for (i = 0; i < bo_count; i++) {
ret = reservation_object_reserve_shared(bos[i]->base.base.resv, ret = reservation_object_reserve_shared(bos[i]->resv, 1);
1);
if (ret) { if (ret) {
v3d_unlock_bo_reservations(bos, bo_count, drm_gem_unlock_reservations(bos, bo_count,
acquire_ctx); acquire_ctx);
return ret; return ret;
} }
} }
...@@ -319,7 +307,7 @@ v3d_cl_lookup_bos(struct drm_device *dev, ...@@ -319,7 +307,7 @@ v3d_cl_lookup_bos(struct drm_device *dev,
goto fail; goto fail;
} }
drm_gem_object_get(bo); drm_gem_object_get(bo);
exec->bo[i] = to_v3d_bo(bo); exec->bo[i] = bo;
} }
spin_unlock(&file_priv->table_lock); spin_unlock(&file_priv->table_lock);
...@@ -347,7 +335,7 @@ v3d_exec_cleanup(struct kref *ref) ...@@ -347,7 +335,7 @@ v3d_exec_cleanup(struct kref *ref)
dma_fence_put(exec->render_done_fence); dma_fence_put(exec->render_done_fence);
for (i = 0; i < exec->bo_count; i++) for (i = 0; i < exec->bo_count; i++)
drm_gem_object_put_unlocked(&exec->bo[i]->base.base); drm_gem_object_put_unlocked(exec->bo[i]);
kvfree(exec->bo); kvfree(exec->bo);
list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) { list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
...@@ -378,7 +366,7 @@ v3d_tfu_job_cleanup(struct kref *ref) ...@@ -378,7 +366,7 @@ v3d_tfu_job_cleanup(struct kref *ref)
for (i = 0; i < ARRAY_SIZE(job->bo); i++) { for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
if (job->bo[i]) if (job->bo[i])
drm_gem_object_put_unlocked(&job->bo[i]->base.base); drm_gem_object_put_unlocked(job->bo[i]);
} }
pm_runtime_mark_last_busy(v3d->dev); pm_runtime_mark_last_busy(v3d->dev);
...@@ -532,7 +520,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ...@@ -532,7 +520,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
v3d_attach_object_fences(exec->bo, exec->bo_count, v3d_attach_object_fences(exec->bo, exec->bo_count,
exec->render_done_fence); exec->render_done_fence);
v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx); drm_gem_unlock_reservations(exec->bo, exec->bo_count, &acquire_ctx);
/* Update the return sync object for the */ /* Update the return sync object for the */
sync_out = drm_syncobj_find(file_priv, args->out_sync); sync_out = drm_syncobj_find(file_priv, args->out_sync);
...@@ -547,7 +535,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ...@@ -547,7 +535,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
fail_unreserve: fail_unreserve:
mutex_unlock(&v3d->sched_lock); mutex_unlock(&v3d->sched_lock);
v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx); drm_gem_unlock_reservations(exec->bo, exec->bo_count, &acquire_ctx);
fail: fail:
v3d_exec_put(exec); v3d_exec_put(exec);
...@@ -616,7 +604,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, ...@@ -616,7 +604,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
goto fail; goto fail;
} }
drm_gem_object_get(bo); drm_gem_object_get(bo);
job->bo[bo_count] = to_v3d_bo(bo); job->bo[bo_count] = bo;
} }
spin_unlock(&file_priv->table_lock); spin_unlock(&file_priv->table_lock);
...@@ -639,7 +627,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, ...@@ -639,7 +627,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
v3d_attach_object_fences(job->bo, bo_count, sched_done_fence); v3d_attach_object_fences(job->bo, bo_count, sched_done_fence);
v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx); drm_gem_unlock_reservations(job->bo, bo_count, &acquire_ctx);
/* Update the return sync object */ /* Update the return sync object */
sync_out = drm_syncobj_find(file_priv, args->out_sync); sync_out = drm_syncobj_find(file_priv, args->out_sync);
...@@ -655,7 +643,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, ...@@ -655,7 +643,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
fail_unreserve: fail_unreserve:
mutex_unlock(&v3d->sched_lock); mutex_unlock(&v3d->sched_lock);
v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx); drm_gem_unlock_reservations(job->bo, bo_count, &acquire_ctx);
fail: fail:
v3d_tfu_job_put(job); v3d_tfu_job_put(job);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment