Commit b2af0376 authored by John Harrison's avatar John Harrison Committed by Daniel Vetter

drm/i915: Update [vma|object]_move_to_active() to take request structures

Now that everything above has been converted to use request structures, it is
possible to update the lower level move_to_active() functions to be request
based as well.

For: VIZ-5115
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarTomas Elf <tomas.elf@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 75289874
...@@ -2808,7 +2808,7 @@ int i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -2808,7 +2808,7 @@ int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to, struct intel_engine_cs *to,
struct drm_i915_gem_request **to_req); struct drm_i915_gem_request **to_req);
void i915_vma_move_to_active(struct i915_vma *vma, void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_engine_cs *ring); struct drm_i915_gem_request *req);
int i915_gem_dumb_create(struct drm_file *file_priv, int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
struct drm_mode_create_dumb *args); struct drm_mode_create_dumb *args);
......
...@@ -2340,9 +2340,12 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) ...@@ -2340,9 +2340,12 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
} }
void i915_vma_move_to_active(struct i915_vma *vma, void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_engine_cs *ring) struct drm_i915_gem_request *req)
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
struct intel_engine_cs *ring;
ring = i915_gem_request_get_ring(req);
/* Add a reference if we're newly entering the active list. */ /* Add a reference if we're newly entering the active list. */
if (obj->active == 0) if (obj->active == 0)
...@@ -2350,8 +2353,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -2350,8 +2353,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
obj->active |= intel_ring_flag(ring); obj->active |= intel_ring_flag(ring);
list_move_tail(&obj->ring_list[ring->id], &ring->active_list); list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
i915_gem_request_assign(&obj->last_read_req[ring->id], i915_gem_request_assign(&obj->last_read_req[ring->id], req);
intel_ring_get_request(ring));
list_move_tail(&vma->mm_list, &vma->vm->active_list); list_move_tail(&vma->mm_list, &vma->vm->active_list);
} }
......
...@@ -736,7 +736,7 @@ static int do_switch(struct drm_i915_gem_request *req) ...@@ -736,7 +736,7 @@ static int do_switch(struct drm_i915_gem_request *req)
*/ */
if (from != NULL) { if (from != NULL) {
from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring); i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the * whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be * object dirty. The only exception is that the context must be
......
...@@ -1036,7 +1036,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, ...@@ -1036,7 +1036,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->base.pending_read_domains |= obj->base.read_domains; obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains; obj->base.read_domains = obj->base.pending_read_domains;
i915_vma_move_to_active(vma, ring); i915_vma_move_to_active(vma, req);
if (obj->base.write_domain) { if (obj->base.write_domain) {
obj->dirty = 1; obj->dirty = 1;
i915_gem_request_assign(&obj->last_write_req, req); i915_gem_request_assign(&obj->last_write_req, req);
......
...@@ -171,7 +171,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req) ...@@ -171,7 +171,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
if (ret) if (ret)
goto out; goto out;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req->ring); i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
out: out:
i915_gem_render_state_fini(&so); i915_gem_render_state_fini(&so);
......
...@@ -1593,7 +1593,7 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req) ...@@ -1593,7 +1593,7 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
if (ret) if (ret)
goto out; goto out;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req->ring); i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
out: out:
i915_gem_render_state_fini(&so); i915_gem_render_state_fini(&so);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment