Commit 0bc40be8 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Rename intel_engine_cs function parameters

@@
identifier func;
@@
func(..., struct intel_engine_cs *
- ring
+ engine
, ...)
{
<...
- ring
+ engine
...>
}
@@
identifier func;
type T;
@@
T func(..., struct intel_engine_cs *
- ring
+ engine
, ...);
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent e2f80391
This diff is collapsed.
...@@ -725,11 +725,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data) ...@@ -725,11 +725,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
} }
static void i915_ring_seqno_info(struct seq_file *m, static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *ring) struct intel_engine_cs *engine)
{ {
if (ring->get_seqno) { if (engine->get_seqno) {
seq_printf(m, "Current sequence (%s): %x\n", seq_printf(m, "Current sequence (%s): %x\n",
ring->name, ring->get_seqno(ring, false)); engine->name, engine->get_seqno(engine, false));
} }
} }
...@@ -1992,22 +1992,22 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -1992,22 +1992,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
static void i915_dump_lrc_obj(struct seq_file *m, static void i915_dump_lrc_obj(struct seq_file *m,
struct intel_context *ctx, struct intel_context *ctx,
struct intel_engine_cs *ring) struct intel_engine_cs *engine)
{ {
struct page *page; struct page *page;
uint32_t *reg_state; uint32_t *reg_state;
int j; int j;
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0; unsigned long ggtt_offset = 0;
if (ctx_obj == NULL) { if (ctx_obj == NULL) {
seq_printf(m, "Context on %s with no gem object\n", seq_printf(m, "Context on %s with no gem object\n",
ring->name); engine->name);
return; return;
} }
seq_printf(m, "CONTEXT: %s %u\n", ring->name, seq_printf(m, "CONTEXT: %s %u\n", engine->name,
intel_execlists_ctx_id(ctx, ring)); intel_execlists_ctx_id(ctx, engine));
if (!i915_gem_obj_ggtt_bound(ctx_obj)) if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n"); seq_puts(m, "\tNot bound in GGTT\n");
......
...@@ -2964,10 +2964,10 @@ int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); ...@@ -2964,10 +2964,10 @@ int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request * struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring); i915_gem_find_active_request(struct intel_engine_cs *engine);
bool i915_gem_retire_requests(struct drm_device *dev); bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible); bool interruptible);
...@@ -3297,10 +3297,10 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); ...@@ -3297,10 +3297,10 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */ /* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void); int i915_cmd_parser_get_version(void);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring); bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
int i915_parse_cmds(struct intel_engine_cs *ring, int i915_parse_cmds(struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj, struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset, u32 batch_start_offset,
...@@ -3571,11 +3571,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) ...@@ -3571,11 +3571,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
} }
} }
static inline void i915_trace_irq_get(struct intel_engine_cs *ring, static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
struct drm_i915_gem_request *req) struct drm_i915_gem_request *req)
{ {
if (ring->trace_irq_req == NULL && ring->irq_get(ring)) if (engine->trace_irq_req == NULL && engine->irq_get(engine))
i915_gem_request_assign(&ring->trace_irq_req, req); i915_gem_request_assign(&engine->trace_irq_req, req);
} }
#endif #endif
...@@ -1141,9 +1141,9 @@ static void fake_irq(unsigned long data) ...@@ -1141,9 +1141,9 @@ static void fake_irq(unsigned long data)
} }
static bool missed_irq(struct drm_i915_private *dev_priv, static bool missed_irq(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring) struct intel_engine_cs *engine)
{ {
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
} }
static unsigned long local_clock_us(unsigned *cpu) static unsigned long local_clock_us(unsigned *cpu)
...@@ -2689,11 +2689,11 @@ void i915_gem_request_free(struct kref *req_ref) ...@@ -2689,11 +2689,11 @@ void i915_gem_request_free(struct kref *req_ref)
} }
static inline int static inline int
__i915_gem_request_alloc(struct intel_engine_cs *ring, __i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx, struct intel_context *ctx,
struct drm_i915_gem_request **req_out) struct drm_i915_gem_request **req_out)
{ {
struct drm_i915_private *dev_priv = to_i915(ring->dev); struct drm_i915_private *dev_priv = to_i915(engine->dev);
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
int ret; int ret;
...@@ -2706,13 +2706,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring, ...@@ -2706,13 +2706,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
if (req == NULL) if (req == NULL)
return -ENOMEM; return -ENOMEM;
ret = i915_gem_get_seqno(ring->dev, &req->seqno); ret = i915_gem_get_seqno(engine->dev, &req->seqno);
if (ret) if (ret)
goto err; goto err;
kref_init(&req->ref); kref_init(&req->ref);
req->i915 = dev_priv; req->i915 = dev_priv;
req->ring = ring; req->ring = engine;
req->ctx = ctx; req->ctx = ctx;
i915_gem_context_reference(req->ctx); i915_gem_context_reference(req->ctx);
...@@ -2787,11 +2787,11 @@ void i915_gem_request_cancel(struct drm_i915_gem_request *req) ...@@ -2787,11 +2787,11 @@ void i915_gem_request_cancel(struct drm_i915_gem_request *req)
} }
struct drm_i915_gem_request * struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring) i915_gem_find_active_request(struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
list_for_each_entry(request, &ring->request_list, list) { list_for_each_entry(request, &engine->request_list, list) {
if (i915_gem_request_completed(request, false)) if (i915_gem_request_completed(request, false))
continue; continue;
...@@ -2802,37 +2802,37 @@ i915_gem_find_active_request(struct intel_engine_cs *ring) ...@@ -2802,37 +2802,37 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
} }
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring) struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
bool ring_hung; bool ring_hung;
request = i915_gem_find_active_request(ring); request = i915_gem_find_active_request(engine);
if (request == NULL) if (request == NULL)
return; return;
ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
i915_set_reset_status(dev_priv, request->ctx, ring_hung); i915_set_reset_status(dev_priv, request->ctx, ring_hung);
list_for_each_entry_continue(request, &ring->request_list, list) list_for_each_entry_continue(request, &engine->request_list, list)
i915_set_reset_status(dev_priv, request->ctx, false); i915_set_reset_status(dev_priv, request->ctx, false);
} }
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring) struct intel_engine_cs *engine)
{ {
struct intel_ringbuffer *buffer; struct intel_ringbuffer *buffer;
while (!list_empty(&ring->active_list)) { while (!list_empty(&engine->active_list)) {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
obj = list_first_entry(&ring->active_list, obj = list_first_entry(&engine->active_list,
struct drm_i915_gem_object, struct drm_i915_gem_object,
ring_list[ring->id]); ring_list[engine->id]);
i915_gem_object_retire__read(obj, ring->id); i915_gem_object_retire__read(obj, engine->id);
} }
/* /*
...@@ -2842,14 +2842,14 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, ...@@ -2842,14 +2842,14 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
*/ */
if (i915.enable_execlists) { if (i915.enable_execlists) {
spin_lock_irq(&ring->execlist_lock); spin_lock_irq(&engine->execlist_lock);
/* list_splice_tail_init checks for empty lists */ /* list_splice_tail_init checks for empty lists */
list_splice_tail_init(&ring->execlist_queue, list_splice_tail_init(&engine->execlist_queue,
&ring->execlist_retired_req_list); &engine->execlist_retired_req_list);
spin_unlock_irq(&ring->execlist_lock); spin_unlock_irq(&engine->execlist_lock);
intel_execlists_retire_requests(ring); intel_execlists_retire_requests(engine);
} }
/* /*
...@@ -2859,10 +2859,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, ...@@ -2859,10 +2859,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* implicit references on things like e.g. ppgtt address spaces through * implicit references on things like e.g. ppgtt address spaces through
* the request. * the request.
*/ */
while (!list_empty(&ring->request_list)) { while (!list_empty(&engine->request_list)) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
request = list_first_entry(&ring->request_list, request = list_first_entry(&engine->request_list,
struct drm_i915_gem_request, struct drm_i915_gem_request,
list); list);
...@@ -2876,7 +2876,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, ...@@ -2876,7 +2876,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* upon reset is less than when we start. Do one more pass over * upon reset is less than when we start. Do one more pass over
* all the ringbuffers to reset last_retired_head. * all the ringbuffers to reset last_retired_head.
*/ */
list_for_each_entry(buffer, &ring->buffers, link) { list_for_each_entry(buffer, &engine->buffers, link) {
buffer->last_retired_head = buffer->tail; buffer->last_retired_head = buffer->tail;
intel_ring_update_space(buffer); intel_ring_update_space(buffer);
} }
...@@ -2910,19 +2910,19 @@ void i915_gem_reset(struct drm_device *dev) ...@@ -2910,19 +2910,19 @@ void i915_gem_reset(struct drm_device *dev)
* This function clears the request list as sequence numbers are passed. * This function clears the request list as sequence numbers are passed.
*/ */
void void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring) i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
{ {
WARN_ON(i915_verify_lists(ring->dev)); WARN_ON(i915_verify_lists(engine->dev));
/* Retire requests first as we use it above for the early return. /* Retire requests first as we use it above for the early return.
* If we retire requests last, we may use a later seqno and so clear * If we retire requests last, we may use a later seqno and so clear
* the requests lists without clearing the active list, leading to * the requests lists without clearing the active list, leading to
* confusion. * confusion.
*/ */
while (!list_empty(&ring->request_list)) { while (!list_empty(&engine->request_list)) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
request = list_first_entry(&ring->request_list, request = list_first_entry(&engine->request_list,
struct drm_i915_gem_request, struct drm_i915_gem_request,
list); list);
...@@ -2936,26 +2936,26 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) ...@@ -2936,26 +2936,26 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
* by the ringbuffer to the flushing/inactive lists as appropriate, * by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests. * before we free the context associated with the requests.
*/ */
while (!list_empty(&ring->active_list)) { while (!list_empty(&engine->active_list)) {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
obj = list_first_entry(&ring->active_list, obj = list_first_entry(&engine->active_list,
struct drm_i915_gem_object, struct drm_i915_gem_object,
ring_list[ring->id]); ring_list[engine->id]);
if (!list_empty(&obj->last_read_req[ring->id]->list)) if (!list_empty(&obj->last_read_req[engine->id]->list))
break; break;
i915_gem_object_retire__read(obj, ring->id); i915_gem_object_retire__read(obj, engine->id);
} }
if (unlikely(ring->trace_irq_req && if (unlikely(engine->trace_irq_req &&
i915_gem_request_completed(ring->trace_irq_req, true))) { i915_gem_request_completed(engine->trace_irq_req, true))) {
ring->irq_put(ring); engine->irq_put(engine);
i915_gem_request_assign(&ring->trace_irq_req, NULL); i915_gem_request_assign(&engine->trace_irq_req, NULL);
} }
WARN_ON(i915_verify_lists(ring->dev)); WARN_ON(i915_verify_lists(engine->dev));
} }
bool bool
...@@ -5022,10 +5022,10 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) ...@@ -5022,10 +5022,10 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
} }
static void static void
init_ring_lists(struct intel_engine_cs *ring) init_ring_lists(struct intel_engine_cs *engine)
{ {
INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&engine->active_list);
INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&engine->request_list);
} }
void void
......
...@@ -600,7 +600,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -600,7 +600,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret; return ret;
} }
static inline bool should_skip_switch(struct intel_engine_cs *ring, static inline bool should_skip_switch(struct intel_engine_cs *engine,
struct intel_context *from, struct intel_context *from,
struct intel_context *to) struct intel_context *to)
{ {
...@@ -608,42 +608,42 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring, ...@@ -608,42 +608,42 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
return false; return false;
if (to->ppgtt && from == to && if (to->ppgtt && from == to &&
!(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) !(intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings))
return true; return true;
return false; return false;
} }
static bool static bool
needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = engine->dev->dev_private;
if (!to->ppgtt) if (!to->ppgtt)
return false; return false;
if (INTEL_INFO(ring->dev)->gen < 8) if (INTEL_INFO(engine->dev)->gen < 8)
return true; return true;
if (ring != &dev_priv->ring[RCS]) if (engine != &dev_priv->ring[RCS])
return true; return true;
return false; return false;
} }
static bool static bool
needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
u32 hw_flags) u32 hw_flags)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = engine->dev->dev_private;
if (!to->ppgtt) if (!to->ppgtt)
return false; return false;
if (!IS_GEN8(ring->dev)) if (!IS_GEN8(engine->dev))
return false; return false;
if (ring != &dev_priv->ring[RCS]) if (engine != &dev_priv->ring[RCS])
return false; return false;
if (hw_flags & MI_RESTORE_INHIBIT) if (hw_flags & MI_RESTORE_INHIBIT)
......
...@@ -599,7 +599,7 @@ static bool only_mappable_for_reloc(unsigned int flags) ...@@ -599,7 +599,7 @@ static bool only_mappable_for_reloc(unsigned int flags)
static int static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_engine_cs *ring, struct intel_engine_cs *engine,
bool *need_reloc) bool *need_reloc)
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
...@@ -713,7 +713,7 @@ eb_vma_misplaced(struct i915_vma *vma) ...@@ -713,7 +713,7 @@ eb_vma_misplaced(struct i915_vma *vma)
} }
static int static int
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct list_head *vmas, struct list_head *vmas,
struct intel_context *ctx, struct intel_context *ctx,
bool *need_relocs) bool *need_relocs)
...@@ -723,10 +723,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, ...@@ -723,10 +723,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct i915_address_space *vm; struct i915_address_space *vm;
struct list_head ordered_vmas; struct list_head ordered_vmas;
struct list_head pinned_vmas; struct list_head pinned_vmas;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
int retry; int retry;
i915_gem_retire_requests_ring(ring); i915_gem_retire_requests_ring(engine);
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
...@@ -788,7 +788,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, ...@@ -788,7 +788,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
if (eb_vma_misplaced(vma)) if (eb_vma_misplaced(vma))
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
else else
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); ret = i915_gem_execbuffer_reserve_vma(vma,
engine,
need_relocs);
if (ret) if (ret)
goto err; goto err;
} }
...@@ -798,7 +800,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, ...@@ -798,7 +800,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
if (drm_mm_node_allocated(&vma->node)) if (drm_mm_node_allocated(&vma->node))
continue; continue;
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); ret = i915_gem_execbuffer_reserve_vma(vma, engine,
need_relocs);
if (ret) if (ret)
goto err; goto err;
} }
...@@ -821,7 +824,7 @@ static int ...@@ -821,7 +824,7 @@ static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev, i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file, struct drm_file *file,
struct intel_engine_cs *ring, struct intel_engine_cs *engine,
struct eb_vmas *eb, struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec, struct drm_i915_gem_exec_object2 *exec,
struct intel_context *ctx) struct intel_context *ctx)
...@@ -910,7 +913,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -910,7 +913,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err; goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
&need_relocs);
if (ret) if (ret)
goto err; goto err;
...@@ -1062,12 +1066,12 @@ validate_exec_list(struct drm_device *dev, ...@@ -1062,12 +1066,12 @@ validate_exec_list(struct drm_device *dev,
static struct intel_context * static struct intel_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring, const u32 ctx_id) struct intel_engine_cs *engine, const u32 ctx_id)
{ {
struct intel_context *ctx = NULL; struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs; struct i915_ctx_hang_stats *hs;
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
ctx = i915_gem_context_get(file->driver_priv, ctx_id); ctx = i915_gem_context_get(file->driver_priv, ctx_id);
...@@ -1080,8 +1084,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, ...@@ -1080,8 +1084,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
if (i915.enable_execlists && !ctx->engine[ring->id].state) { if (i915.enable_execlists && !ctx->engine[engine->id].state) {
int ret = intel_lr_context_deferred_alloc(ctx, ring); int ret = intel_lr_context_deferred_alloc(ctx, engine);
if (ret) { if (ret) {
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -1171,7 +1175,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, ...@@ -1171,7 +1175,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
} }
static struct drm_i915_gem_object* static struct drm_i915_gem_object*
i915_gem_execbuffer_parse(struct intel_engine_cs *ring, i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
struct drm_i915_gem_exec_object2 *shadow_exec_entry, struct drm_i915_gem_exec_object2 *shadow_exec_entry,
struct eb_vmas *eb, struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
...@@ -1183,12 +1187,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, ...@@ -1183,12 +1187,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool, shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
PAGE_ALIGN(batch_len)); PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj)) if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj; return shadow_batch_obj;
ret = i915_parse_cmds(ring, ret = i915_parse_cmds(engine,
batch_obj, batch_obj,
shadow_batch_obj, shadow_batch_obj,
batch_start_offset, batch_start_offset,
......
...@@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so) ...@@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so)
drm_gem_object_unreference(&so->obj->base); drm_gem_object_unreference(&so->obj->base);
} }
int i915_gem_render_state_prepare(struct intel_engine_cs *ring, int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
struct render_state *so) struct render_state *so)
{ {
int ret; int ret;
if (WARN_ON(ring->id != RCS)) if (WARN_ON(engine->id != RCS))
return -ENOENT; return -ENOENT;
ret = render_state_init(so, ring->dev); ret = render_state_init(so, engine->dev);
if (ret) if (ret)
return ret; return ret;
......
...@@ -43,7 +43,7 @@ struct render_state { ...@@ -43,7 +43,7 @@ struct render_state {
int i915_gem_render_state_init(struct drm_i915_gem_request *req); int i915_gem_render_state_init(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct render_state *so); void i915_gem_render_state_fini(struct render_state *so);
int i915_gem_render_state_prepare(struct intel_engine_cs *ring, int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
struct render_state *so); struct render_state *so);
#endif /* _I915_GEM_RENDER_STATE_H_ */ #endif /* _I915_GEM_RENDER_STATE_H_ */
...@@ -842,7 +842,7 @@ static void i915_gem_record_fences(struct drm_device *dev, ...@@ -842,7 +842,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
struct intel_engine_cs *ring, struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
struct intel_engine_cs *to; struct intel_engine_cs *to;
...@@ -861,63 +861,64 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, ...@@ -861,63 +861,64 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
u16 signal_offset; u16 signal_offset;
u32 *tmp; u32 *tmp;
if (ring == to) if (engine == to)
continue; continue;
signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1)) signal_offset = (GEN8_SIGNAL_OFFSET(engine, i) & (PAGE_SIZE - 1))
/ 4; / 4;
tmp = error->semaphore_obj->pages[0]; tmp = error->semaphore_obj->pages[0];
idx = intel_ring_sync_index(ring, to); idx = intel_ring_sync_index(engine, to);
ering->semaphore_mboxes[idx] = tmp[signal_offset]; ering->semaphore_mboxes[idx] = tmp[signal_offset];
ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx]; ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
} }
} }
static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring, struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base)); ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base)); ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0]; ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1]; ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
if (HAS_VEBOX(dev_priv->dev)) { if (HAS_VEBOX(dev_priv->dev)) {
ering->semaphore_mboxes[2] = ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(ring->mmio_base)); I915_READ(RING_SYNC_2(engine->mmio_base));
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2]; ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
} }
} }
static void i915_record_ring_state(struct drm_device *dev, static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
struct intel_engine_cs *ring, struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base)); ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
if (INTEL_INFO(dev)->gen >= 8) if (INTEL_INFO(dev)->gen >= 8)
gen8_record_semaphore_state(dev_priv, error, ring, ering); gen8_record_semaphore_state(dev_priv, error, engine,
ering);
else else
gen6_record_semaphore_state(dev_priv, ring, ering); gen6_record_semaphore_state(dev_priv, engine, ering);
} }
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base)); ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base)); ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base)); ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ering->instps = I915_READ(RING_INSTPS(ring->mmio_base)); ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base)); ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_INFO(dev)->gen >= 8) { if (INTEL_INFO(dev)->gen >= 8) {
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32; ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
} }
ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base)); ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
} else { } else {
ering->faddr = I915_READ(DMA_FADD_I8XX); ering->faddr = I915_READ(DMA_FADD_I8XX);
ering->ipeir = I915_READ(IPEIR); ering->ipeir = I915_READ(IPEIR);
...@@ -925,20 +926,20 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -925,20 +926,20 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->instdone = I915_READ(GEN2_INSTDONE); ering->instdone = I915_READ(GEN2_INSTDONE);
} }
ering->waiting = waitqueue_active(&ring->irq_queue); ering->waiting = waitqueue_active(&engine->irq_queue);
ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base)); ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ering->seqno = ring->get_seqno(ring, false); ering->seqno = engine->get_seqno(engine, false);
ering->acthd = intel_ring_get_active_head(ring); ering->acthd = intel_ring_get_active_head(engine);
ering->start = I915_READ_START(ring); ering->start = I915_READ_START(engine);
ering->head = I915_READ_HEAD(ring); ering->head = I915_READ_HEAD(engine);
ering->tail = I915_READ_TAIL(ring); ering->tail = I915_READ_TAIL(engine);
ering->ctl = I915_READ_CTL(ring); ering->ctl = I915_READ_CTL(engine);
if (I915_NEED_GFX_HWS(dev)) { if (I915_NEED_GFX_HWS(dev)) {
i915_reg_t mmio; i915_reg_t mmio;
if (IS_GEN7(dev)) { if (IS_GEN7(dev)) {
switch (ring->id) { switch (engine->id) {
default: default:
case RCS: case RCS:
mmio = RENDER_HWS_PGA_GEN7; mmio = RENDER_HWS_PGA_GEN7;
...@@ -953,51 +954,51 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -953,51 +954,51 @@ static void i915_record_ring_state(struct drm_device *dev,
mmio = VEBOX_HWS_PGA_GEN7; mmio = VEBOX_HWS_PGA_GEN7;
break; break;
} }
} else if (IS_GEN6(ring->dev)) { } else if (IS_GEN6(engine->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base); mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else { } else {
/* XXX: gen8 returns to sanity */ /* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base); mmio = RING_HWS_PGA(engine->mmio_base);
} }
ering->hws = I915_READ(mmio); ering->hws = I915_READ(mmio);
} }
ering->hangcheck_score = ring->hangcheck.score; ering->hangcheck_score = engine->hangcheck.score;
ering->hangcheck_action = ring->hangcheck.action; ering->hangcheck_action = engine->hangcheck.action;
if (USES_PPGTT(dev)) { if (USES_PPGTT(dev)) {
int i; int i;
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
if (IS_GEN6(dev)) if (IS_GEN6(dev))
ering->vm_info.pp_dir_base = ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(ring)); I915_READ(RING_PP_DIR_BASE_READ(engine));
else if (IS_GEN7(dev)) else if (IS_GEN7(dev))
ering->vm_info.pp_dir_base = ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(ring)); I915_READ(RING_PP_DIR_BASE(engine));
else if (INTEL_INFO(dev)->gen >= 8) else if (INTEL_INFO(dev)->gen >= 8)
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] = ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(ring, i)); I915_READ(GEN8_RING_PDP_UDW(engine, i));
ering->vm_info.pdp[i] <<= 32; ering->vm_info.pdp[i] <<= 32;
ering->vm_info.pdp[i] |= ering->vm_info.pdp[i] |=
I915_READ(GEN8_RING_PDP_LDW(ring, i)); I915_READ(GEN8_RING_PDP_LDW(engine, i));
} }
} }
} }
static void i915_gem_record_active_context(struct intel_engine_cs *ring, static void i915_gem_record_active_context(struct intel_engine_cs *engine,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
/* Currently render ring is the only HW context user */ /* Currently render ring is the only HW context user */
if (ring->id != RCS || !error->ccid) if (engine->id != RCS || !error->ccid)
return; return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
......
...@@ -994,14 +994,14 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) ...@@ -994,14 +994,14 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
return; return;
} }
static void notify_ring(struct intel_engine_cs *ring) static void notify_ring(struct intel_engine_cs *engine)
{ {
if (!intel_ring_initialized(ring)) if (!intel_ring_initialized(engine))
return; return;
trace_i915_gem_request_notify(ring); trace_i915_gem_request_notify(engine);
wake_up_all(&ring->irq_queue); wake_up_all(&engine->irq_queue);
} }
static void vlv_c0_read(struct drm_i915_private *dev_priv, static void vlv_c0_read(struct drm_i915_private *dev_priv,
...@@ -1319,12 +1319,12 @@ static void snb_gt_irq_handler(struct drm_device *dev, ...@@ -1319,12 +1319,12 @@ static void snb_gt_irq_handler(struct drm_device *dev,
} }
static __always_inline void static __always_inline void
gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift) gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
{ {
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
notify_ring(ring); notify_ring(engine);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
intel_lrc_irq_handler(ring); intel_lrc_irq_handler(engine);
} }
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
...@@ -2805,10 +2805,10 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) ...@@ -2805,10 +2805,10 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
} }
static bool static bool
ring_idle(struct intel_engine_cs *ring, u32 seqno) ring_idle(struct intel_engine_cs *engine, u32 seqno)
{ {
return (list_empty(&ring->request_list) || return (list_empty(&engine->request_list) ||
i915_seqno_passed(seqno, ring->last_submitted_seqno)); i915_seqno_passed(seqno, engine->last_submitted_seqno));
} }
static bool static bool
...@@ -2824,42 +2824,43 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) ...@@ -2824,42 +2824,43 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
} }
static struct intel_engine_cs * static struct intel_engine_cs *
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
u64 offset)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct intel_engine_cs *signaller; struct intel_engine_cs *signaller;
int i; int i;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) { if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
for_each_ring(signaller, dev_priv, i) { for_each_ring(signaller, dev_priv, i) {
if (ring == signaller) if (engine == signaller)
continue; continue;
if (offset == signaller->semaphore.signal_ggtt[ring->id]) if (offset == signaller->semaphore.signal_ggtt[engine->id])
return signaller; return signaller;
} }
} else { } else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
for_each_ring(signaller, dev_priv, i) { for_each_ring(signaller, dev_priv, i) {
if(ring == signaller) if(engine == signaller)
continue; continue;
if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
return signaller; return signaller;
} }
} }
DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
ring->id, ipehr, offset); engine->id, ipehr, offset);
return NULL; return NULL;
} }
static struct intel_engine_cs * static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = engine->dev->dev_private;
u32 cmd, ipehr, head; u32 cmd, ipehr, head;
u64 offset = 0; u64 offset = 0;
int i, backwards; int i, backwards;
...@@ -2881,11 +2882,11 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) ...@@ -2881,11 +2882,11 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
* Therefore, this function does not support execlist mode in its * Therefore, this function does not support execlist mode in its
* current form. Just return NULL and move on. * current form. Just return NULL and move on.
*/ */
if (ring->buffer == NULL) if (engine->buffer == NULL)
return NULL; return NULL;
ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
return NULL; return NULL;
/* /*
...@@ -2896,8 +2897,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) ...@@ -2896,8 +2897,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
* point at at batch, and semaphores are always emitted into the * point at at batch, and semaphores are always emitted into the
* ringbuffer itself. * ringbuffer itself.
*/ */
head = I915_READ_HEAD(ring) & HEAD_ADDR; head = I915_READ_HEAD(engine) & HEAD_ADDR;
backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
for (i = backwards; i; --i) { for (i = backwards; i; --i) {
/* /*
...@@ -2905,10 +2906,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) ...@@ -2905,10 +2906,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
* our ring is smaller than what the hardware (and hence * our ring is smaller than what the hardware (and hence
* HEAD_ADDR) allows. Also handles wrap-around. * HEAD_ADDR) allows. Also handles wrap-around.
*/ */
head &= ring->buffer->size - 1; head &= engine->buffer->size - 1;
/* This here seems to blow up */ /* This here seems to blow up */
cmd = ioread32(ring->buffer->virtual_start + head); cmd = ioread32(engine->buffer->virtual_start + head);
if (cmd == ipehr) if (cmd == ipehr)
break; break;
...@@ -2918,24 +2919,24 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) ...@@ -2918,24 +2919,24 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
if (!i) if (!i)
return NULL; return NULL;
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
if (INTEL_INFO(ring->dev)->gen >= 8) { if (INTEL_INFO(engine->dev)->gen >= 8) {
offset = ioread32(ring->buffer->virtual_start + head + 12); offset = ioread32(engine->buffer->virtual_start + head + 12);
offset <<= 32; offset <<= 32;
offset = ioread32(ring->buffer->virtual_start + head + 8); offset = ioread32(engine->buffer->virtual_start + head + 8);
} }
return semaphore_wait_to_signaller_ring(ring, ipehr, offset); return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
} }
static int semaphore_passed(struct intel_engine_cs *ring) static int semaphore_passed(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct intel_engine_cs *signaller; struct intel_engine_cs *signaller;
u32 seqno; u32 seqno;
ring->hangcheck.deadlock++; engine->hangcheck.deadlock++;
signaller = semaphore_waits_for(ring, &seqno); signaller = semaphore_waits_for(engine, &seqno);
if (signaller == NULL) if (signaller == NULL)
return -1; return -1;
...@@ -2963,16 +2964,16 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) ...@@ -2963,16 +2964,16 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
engine->hangcheck.deadlock = 0; engine->hangcheck.deadlock = 0;
} }
static bool subunits_stuck(struct intel_engine_cs *ring) static bool subunits_stuck(struct intel_engine_cs *engine)
{ {
u32 instdone[I915_NUM_INSTDONE_REG]; u32 instdone[I915_NUM_INSTDONE_REG];
bool stuck; bool stuck;
int i; int i;
if (ring->id != RCS) if (engine->id != RCS)
return true; return true;
i915_get_extra_instdone(ring->dev, instdone); i915_get_extra_instdone(engine->dev, instdone);
/* There might be unstable subunit states even when /* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by * actual head is not moving. Filter out the unstable ones by
...@@ -2981,44 +2982,44 @@ static bool subunits_stuck(struct intel_engine_cs *ring) ...@@ -2981,44 +2982,44 @@ static bool subunits_stuck(struct intel_engine_cs *ring)
*/ */
stuck = true; stuck = true;
for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
const u32 tmp = instdone[i] | ring->hangcheck.instdone[i]; const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
if (tmp != ring->hangcheck.instdone[i]) if (tmp != engine->hangcheck.instdone[i])
stuck = false; stuck = false;
ring->hangcheck.instdone[i] |= tmp; engine->hangcheck.instdone[i] |= tmp;
} }
return stuck; return stuck;
} }
static enum intel_ring_hangcheck_action static enum intel_ring_hangcheck_action
head_stuck(struct intel_engine_cs *ring, u64 acthd) head_stuck(struct intel_engine_cs *engine, u64 acthd)
{ {
if (acthd != ring->hangcheck.acthd) { if (acthd != engine->hangcheck.acthd) {
/* Clear subunit states on head movement */ /* Clear subunit states on head movement */
memset(ring->hangcheck.instdone, 0, memset(engine->hangcheck.instdone, 0,
sizeof(ring->hangcheck.instdone)); sizeof(engine->hangcheck.instdone));
return HANGCHECK_ACTIVE; return HANGCHECK_ACTIVE;
} }
if (!subunits_stuck(ring)) if (!subunits_stuck(engine))
return HANGCHECK_ACTIVE; return HANGCHECK_ACTIVE;
return HANGCHECK_HUNG; return HANGCHECK_HUNG;
} }
static enum intel_ring_hangcheck_action static enum intel_ring_hangcheck_action
ring_stuck(struct intel_engine_cs *ring, u64 acthd) ring_stuck(struct intel_engine_cs *engine, u64 acthd)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_ring_hangcheck_action ha; enum intel_ring_hangcheck_action ha;
u32 tmp; u32 tmp;
ha = head_stuck(ring, acthd); ha = head_stuck(engine, acthd);
if (ha != HANGCHECK_HUNG) if (ha != HANGCHECK_HUNG)
return ha; return ha;
...@@ -3030,24 +3031,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) ...@@ -3030,24 +3031,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
* and break the hang. This should work on * and break the hang. This should work on
* all but the second generation chipsets. * all but the second generation chipsets.
*/ */
tmp = I915_READ_CTL(ring); tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) { if (tmp & RING_WAIT) {
i915_handle_error(dev, false, i915_handle_error(dev, false,
"Kicking stuck wait on %s", "Kicking stuck wait on %s",
ring->name); engine->name);
I915_WRITE_CTL(ring, tmp); I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK; return HANGCHECK_KICK;
} }
if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(ring)) { switch (semaphore_passed(engine)) {
default: default:
return HANGCHECK_HUNG; return HANGCHECK_HUNG;
case 1: case 1:
i915_handle_error(dev, false, i915_handle_error(dev, false,
"Kicking stuck semaphore on %s", "Kicking stuck semaphore on %s",
ring->name); engine->name);
I915_WRITE_CTL(ring, tmp); I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK; return HANGCHECK_KICK;
case 0: case 0:
return HANGCHECK_WAIT; return HANGCHECK_WAIT;
......
...@@ -11214,7 +11214,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, ...@@ -11214,7 +11214,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
return 0; return 0;
} }
static bool use_mmio_flip(struct intel_engine_cs *ring, static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
/* /*
...@@ -11225,10 +11225,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, ...@@ -11225,10 +11225,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
* So using MMIO flips there would disrupt this mechanism. * So using MMIO flips there would disrupt this mechanism.
*/ */
if (ring == NULL) if (engine == NULL)
return true; return true;
if (INTEL_INFO(ring->dev)->gen < 5) if (INTEL_INFO(engine->dev)->gen < 5)
return false; return false;
if (i915.use_mmio_flip < 0) if (i915.use_mmio_flip < 0)
...@@ -11242,7 +11242,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, ...@@ -11242,7 +11242,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
false)) false))
return true; return true;
else else
return ring != i915_gem_request_get_ring(obj->last_write_req); return engine != i915_gem_request_get_ring(obj->last_write_req);
} }
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
......
This diff is collapsed.
...@@ -57,8 +57,8 @@ ...@@ -57,8 +57,8 @@
/* Logical Rings */ /* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
void intel_logical_ring_stop(struct intel_engine_cs *ring); void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring); void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int intel_logical_rings_init(struct drm_device *dev); int intel_logical_rings_init(struct drm_device *dev);
int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords); int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
...@@ -98,18 +98,18 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, ...@@ -98,18 +98,18 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) #define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
void intel_lr_context_free(struct intel_context *ctx); void intel_lr_context_free(struct intel_context *ctx);
uint32_t intel_lr_context_size(struct intel_engine_cs *ring); uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
int intel_lr_context_deferred_alloc(struct intel_context *ctx, int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring); struct intel_engine_cs *engine);
void intel_lr_context_unpin(struct intel_context *ctx, void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
void intel_lr_context_reset(struct drm_device *dev, void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx); struct intel_context *ctx);
uint64_t intel_lr_context_descriptor(struct intel_context *ctx, uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring); struct intel_engine_cs *engine);
u32 intel_execlists_ctx_id(struct intel_context *ctx, u32 intel_execlists_ctx_id(struct intel_context *ctx,
struct intel_engine_cs *ring); struct intel_engine_cs *engine);
/* Execlists */ /* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
...@@ -118,7 +118,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, ...@@ -118,7 +118,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas); struct list_head *vmas);
void intel_lrc_irq_handler(struct intel_engine_cs *ring); void intel_lrc_irq_handler(struct intel_engine_cs *engine);
void intel_execlists_retire_requests(struct intel_engine_cs *ring); void intel_execlists_retire_requests(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */ #endif /* _INTEL_LRC_H_ */
This diff is collapsed.
...@@ -355,19 +355,19 @@ struct intel_engine_cs { ...@@ -355,19 +355,19 @@ struct intel_engine_cs {
}; };
static inline bool static inline bool
intel_ring_initialized(struct intel_engine_cs *ring) intel_ring_initialized(struct intel_engine_cs *engine)
{ {
return ring->dev != NULL; return engine->dev != NULL;
} }
static inline unsigned static inline unsigned
intel_ring_flag(struct intel_engine_cs *ring) intel_ring_flag(struct intel_engine_cs *engine)
{ {
return 1 << ring->id; return 1 << engine->id;
} }
static inline u32 static inline u32
intel_ring_sync_index(struct intel_engine_cs *ring, intel_ring_sync_index(struct intel_engine_cs *engine,
struct intel_engine_cs *other) struct intel_engine_cs *other)
{ {
int idx; int idx;
...@@ -380,7 +380,7 @@ intel_ring_sync_index(struct intel_engine_cs *ring, ...@@ -380,7 +380,7 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/ */
idx = (other - ring) - 1; idx = (other - engine) - 1;
if (idx < 0) if (idx < 0)
idx += I915_NUM_RINGS; idx += I915_NUM_RINGS;
...@@ -388,26 +388,26 @@ intel_ring_sync_index(struct intel_engine_cs *ring, ...@@ -388,26 +388,26 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
} }
static inline void static inline void
intel_flush_status_page(struct intel_engine_cs *ring, int reg) intel_flush_status_page(struct intel_engine_cs *engine, int reg)
{ {
drm_clflush_virt_range(&ring->status_page.page_addr[reg], drm_clflush_virt_range(&engine->status_page.page_addr[reg],
sizeof(uint32_t)); sizeof(uint32_t));
} }
static inline u32 static inline u32
intel_read_status_page(struct intel_engine_cs *ring, intel_read_status_page(struct intel_engine_cs *engine,
int reg) int reg)
{ {
/* Ensure that the compiler doesn't optimize away the load. */ /* Ensure that the compiler doesn't optimize away the load. */
barrier(); barrier();
return ring->status_page.page_addr[reg]; return engine->status_page.page_addr[reg];
} }
static inline void static inline void
intel_write_status_page(struct intel_engine_cs *ring, intel_write_status_page(struct intel_engine_cs *engine,
int reg, u32 value) int reg, u32 value)
{ {
ring->status_page.page_addr[reg] = value; engine->status_page.page_addr[reg] = value;
} }
/* /*
...@@ -438,42 +438,42 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, ...@@ -438,42 +438,42 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring); void intel_ringbuffer_free(struct intel_ringbuffer *ring);
void intel_stop_ring_buffer(struct intel_engine_cs *ring); void intel_stop_ring_buffer(struct intel_engine_cs *engine);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); void intel_cleanup_ring_buffer(struct intel_engine_cs *engine);
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
static inline void intel_ring_emit(struct intel_engine_cs *ring, static inline void intel_ring_emit(struct intel_engine_cs *engine,
u32 data) u32 data)
{ {
struct intel_ringbuffer *ringbuf = ring->buffer; struct intel_ringbuffer *ringbuf = engine->buffer;
iowrite32(data, ringbuf->virtual_start + ringbuf->tail); iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4; ringbuf->tail += 4;
} }
static inline void intel_ring_emit_reg(struct intel_engine_cs *ring, static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
i915_reg_t reg) i915_reg_t reg)
{ {
intel_ring_emit(ring, i915_mmio_reg_offset(reg)); intel_ring_emit(engine, i915_mmio_reg_offset(reg));
} }
static inline void intel_ring_advance(struct intel_engine_cs *ring) static inline void intel_ring_advance(struct intel_engine_cs *engine)
{ {
struct intel_ringbuffer *ringbuf = ring->buffer; struct intel_ringbuffer *ringbuf = engine->buffer;
ringbuf->tail &= ringbuf->size - 1; ringbuf->tail &= ringbuf->size - 1;
} }
int __intel_ring_space(int head, int tail, int size); int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf); void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf); int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring); bool intel_ring_stopped(struct intel_engine_cs *engine);
int __must_check intel_ring_idle(struct intel_engine_cs *ring); int __must_check intel_ring_idle(struct intel_engine_cs *engine);
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
void intel_fini_pipe_control(struct intel_engine_cs *ring); void intel_fini_pipe_control(struct intel_engine_cs *engine);
int intel_init_pipe_control(struct intel_engine_cs *ring); int intel_init_pipe_control(struct intel_engine_cs *engine);
int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev);
...@@ -481,9 +481,9 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev); ...@@ -481,9 +481,9 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev); int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev); int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring); u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
int init_workarounds_ring(struct intel_engine_cs *ring); int init_workarounds_ring(struct intel_engine_cs *engine);
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment