Commit 5cf3d280 authored by Chris Wilson's avatar Chris Wilson

drm/i915: i915_vma_move_to_active prep patch

This patch is broken out of the next just to remove the code motion from
that patch and make it more readable. What we do here is move the
i915_vma_move_to_active() to i915_gem_execbuffer.c and put the three
stages (read, write, fenced) together so that future modifications to
active handling are all located in the same spot. The importance of this
is so that we can more simply control the order in which the requests
are place in the retirement list (i.e. control the order at which we
retire and so control the lifetimes to avoid having to hold onto
references).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-24-git-send-email-chris@chris-wilson.co.uk
parent 4b8de8e6
...@@ -3153,7 +3153,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); ...@@ -3153,7 +3153,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj, int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *to); struct drm_i915_gem_request *to);
void i915_vma_move_to_active(struct i915_vma *vma, void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req); struct drm_i915_gem_request *req,
unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv, int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
struct drm_mode_create_dumb *args); struct drm_mode_create_dumb *args);
......
...@@ -2330,24 +2330,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) ...@@ -2330,24 +2330,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
return obj->mapping; return obj->mapping;
} }
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req)
{
struct drm_i915_gem_object *obj = vma->obj;
struct intel_engine_cs *engine;
engine = i915_gem_request_get_engine(req);
/* Add a reference if we're newly entering the active list. */
if (obj->active == 0)
i915_gem_object_get(obj);
obj->active |= intel_engine_flag(engine);
i915_gem_active_set(&obj->last_read[engine->id], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void static void
i915_gem_object_retire__write(struct i915_gem_active *active, i915_gem_object_retire__write(struct i915_gem_active *active,
struct drm_i915_gem_request *request) struct drm_i915_gem_request *request)
......
...@@ -816,8 +816,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -816,8 +816,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* MI_SET_CONTEXT instead of when the next seqno has completed. * MI_SET_CONTEXT instead of when the next seqno has completed.
*/ */
if (from != NULL) { if (from != NULL) {
from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; struct drm_i915_gem_object *obj = from->engine[RCS].state;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the * whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be * object dirty. The only exception is that the context must be
...@@ -825,10 +825,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -825,10 +825,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* able to defer doing this until we know the object would be * able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet. * swapped, but there is no way to do that yet.
*/ */
from->engine[RCS].state->dirty = 1; obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
/* obj is kept alive until the next request by its active ref */ /* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->engine[RCS].state); i915_gem_object_ggtt_unpin(obj);
i915_gem_context_put(from); i915_gem_context_put(from);
} }
engine->last_context = i915_gem_context_get(to); engine->last_context = i915_gem_context_get(to);
......
...@@ -1143,27 +1143,24 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, ...@@ -1143,27 +1143,24 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ctx; return ctx;
} }
static void void i915_vma_move_to_active(struct i915_vma *vma,
i915_gem_execbuffer_move_to_active(struct list_head *vmas, struct drm_i915_gem_request *req,
struct drm_i915_gem_request *req) unsigned int flags)
{ {
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains; const unsigned int idx = req->engine->id;
u32 old_write = obj->base.write_domain;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
obj->dirty = 1; /* be paranoid */ obj->dirty = 1; /* be paranoid */
obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
i915_vma_move_to_active(vma, req); /* Add a reference if we're newly entering the active list. */
if (obj->base.write_domain) { if (obj->active == 0)
i915_gem_object_get(obj);
obj->active |= 1 << idx;
i915_gem_active_set(&obj->last_read[idx], req);
if (flags & EXEC_OBJECT_WRITE) {
i915_gem_active_set(&obj->last_write, req); i915_gem_active_set(&obj->last_write, req);
intel_fb_obj_invalidate(obj, ORIGIN_CS); intel_fb_obj_invalidate(obj, ORIGIN_CS);
...@@ -1171,15 +1168,39 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, ...@@ -1171,15 +1168,39 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
/* update for the implicit flush after a batch */ /* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
} }
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
if (flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_active_set(&obj->last_fence, req); i915_gem_active_set(&obj->last_fence, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { if (flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = req->i915;
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list); &dev_priv->mm.fence_list);
} }
} }
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
{
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain)
vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
else
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
trace_i915_gem_object_change_domain(obj, old_read, old_write); trace_i915_gem_object_change_domain(obj, old_read, old_write);
} }
} }
......
...@@ -217,7 +217,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req) ...@@ -217,7 +217,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
goto err_unpin; goto err_unpin;
} }
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req); i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req, 0);
err_unpin: err_unpin:
i915_gem_object_ggtt_unpin(so.obj); i915_gem_object_ggtt_unpin(so.obj);
err_obj: err_obj:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment