Commit e2f80391 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Rename local struct intel_engine_cs variables

Done by the Coccinelle script below plus a manual
intervention to GEN8_RING_SEMAPHORE_INIT.

@@
expression E;
@@
- struct intel_engine_cs *ring = E;
+ struct intel_engine_cs *engine = E;
<+...
- ring
+ engine
...+>
@@
@@
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
<+...
- ring
+ engine
...+>
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 08250c4b
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -36,29 +36,30 @@ i915_verify_lists(struct drm_device *dev)
static int warned;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int err = 0;
int i;
if (warned)
return 0;
for_each_ring(ring, dev_priv, i) {
list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
for_each_ring(engine, dev_priv, i) {
list_for_each_entry(obj, &engine->active_list,
ring_list[engine->id]) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("%s: freed active obj %p\n",
ring->name, obj);
engine->name, obj);
err++;
break;
} else if (!obj->active ||
obj->last_read_req[ring->id] == NULL) {
obj->last_read_req[engine->id] == NULL) {
DRM_ERROR("%s: invalid active obj %p\n",
ring->name, obj);
engine->name, obj);
err++;
} else if (obj->base.write_domain) {
DRM_ERROR("%s: invalid write obj %p (w %x)\n",
ring->name,
engine->name,
obj, obj->base.write_domain);
err++;
}
......
......@@ -1095,7 +1095,7 @@ void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct intel_engine_cs *engine = i915_gem_request_get_ring(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
......@@ -1122,7 +1122,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_private *dev_priv = to_i915(engine->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
......@@ -1146,11 +1146,11 @@ static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
if (!IS_GEN7(dev) || engine != &dev_priv->ring[RCS]) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
......@@ -1160,12 +1160,12 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return ret;
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(engine, 0);
}
intel_ring_advance(ring);
intel_ring_advance(engine);
return 0;
}
......@@ -1229,7 +1229,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
struct intel_engine_cs *ring = params->ring;
struct intel_engine_cs *engine = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 exec_start, exec_len;
int instp_mode;
......@@ -1244,8 +1244,8 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (ret)
return ret;
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
"%s didn't clear reload\n", ring->name);
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
"%s didn't clear reload\n", engine->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
......@@ -1253,7 +1253,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
......@@ -1280,17 +1280,17 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
return -EINVAL;
}
if (ring == &dev_priv->ring[RCS] &&
if (engine == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, INSTPM);
intel_ring_emit(engine, instp_mask << 16 | instp_mode);
intel_ring_advance(engine);
dev_priv->relative_constants_mode = instp_mode;
}
......@@ -1308,7 +1308,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (exec_len == 0)
exec_len = params->batch_obj->base.size;
ret = ring->dispatch_execbuffer(params->request,
ret = engine->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
if (ret)
......@@ -1432,7 +1432,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct intel_context *ctx;
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
......@@ -1459,7 +1459,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
ret = eb_select_ring(dev_priv, file, args, &ring);
ret = eb_select_ring(dev_priv, file, args, &engine);
if (ret)
return ret;
......@@ -1473,9 +1473,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
if (ring->id != RCS) {
if (engine->id != RCS) {
DRM_DEBUG("RS is not available on %s\n",
ring->name);
engine->name);
return -EINVAL;
}
......@@ -1488,7 +1488,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
ret = PTR_ERR(ctx);
......@@ -1522,7 +1522,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
&need_relocs);
if (ret)
goto err;
......@@ -1531,7 +1532,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_execbuffer_relocate(eb);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
engine,
eb, exec, ctx);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
......@@ -1547,16 +1549,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
params->args_batch_start_offset = args->batch_start_offset;
if (i915_needs_cmd_parser(ring) && args->batch_len) {
if (i915_needs_cmd_parser(engine) && args->batch_len) {
struct drm_i915_gem_object *parsed_batch_obj;
parsed_batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,
batch_obj,
args->batch_start_offset,
args->batch_len,
file->is_master);
parsed_batch_obj = i915_gem_execbuffer_parse(engine,
&shadow_exec_entry,
eb,
batch_obj,
args->batch_start_offset,
args->batch_len,
file->is_master);
if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(parsed_batch_obj);
goto err;
......@@ -1608,7 +1610,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
req = i915_gem_request_alloc(ring, ctx);
req = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin;
......@@ -1626,7 +1628,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
params->dev = dev;
params->file = file;
params->ring = ring;
params->ring = engine;
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
......
......@@ -658,7 +658,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
unsigned entry,
dma_addr_t addr)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
int ret;
BUG_ON(entry >= 4);
......@@ -667,13 +667,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
if (ret)
return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit(ring, upper_32_bits(addr));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit(ring, lower_32_bits(addr));
intel_ring_advance(ring);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
intel_ring_emit(engine, upper_32_bits(addr));
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
intel_ring_emit(engine, lower_32_bits(addr));
intel_ring_advance(engine);
return 0;
}
......@@ -1650,11 +1650,11 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
......@@ -1662,13 +1662,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
intel_ring_emit(engine, PP_DIR_DCLV_2G);
intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
intel_ring_emit(engine, get_pd_offset(ppgtt));
intel_ring_emit(engine, MI_NOOP);
intel_ring_advance(engine);
return 0;
}
......@@ -1676,22 +1676,22 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
......@@ -1699,17 +1699,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
intel_ring_emit(engine, PP_DIR_DCLV_2G);
intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
intel_ring_emit(engine, get_pd_offset(ppgtt));
intel_ring_emit(engine, MI_NOOP);
intel_ring_advance(engine);
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (ring->id != RCS) {
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (engine->id != RCS) {
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
......@@ -1720,15 +1720,15 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
POSTING_READ(RING_PP_DIR_DCLV(ring));
POSTING_READ(RING_PP_DIR_DCLV(engine));
return 0;
}
......@@ -1736,12 +1736,12 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
static void gen8_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int j;
for_each_ring(ring, dev_priv, j) {
for_each_ring(engine, dev_priv, j) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(ring),
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
......@@ -1749,7 +1749,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
static void gen7_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
int i;
......@@ -1765,9 +1765,9 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
I915_WRITE(GAM_ECOCHK, ecochk);
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(ring),
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
......@@ -2286,15 +2286,15 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
void i915_check_and_clear_faults(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
if (INTEL_INFO(dev)->gen < 6)
return;
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(ring));
fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
......@@ -2305,7 +2305,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
RING_FAULT_SRCID(fault_reg),
RING_FAULT_FAULT_TYPE(fault_reg));
I915_WRITE(RING_FAULT_REG(ring),
I915_WRITE(RING_FAULT_REG(engine),
fault_reg & ~RING_FAULT_VALID);
}
}
......
......@@ -495,9 +495,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (obj) {
u64 wa_ctx_offset = obj->gtt_offset;
u32 *wa_ctx_page = &obj->pages[0][0];
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
u32 wa_ctx_size = (ring->wa_ctx.indirect_ctx.size +
ring->wa_ctx.per_ctx.size);
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
engine->wa_ctx.per_ctx.size);
err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
dev_priv->ring[i].name, wa_ctx_offset);
......@@ -1019,19 +1019,19 @@ static void i915_gem_record_rings(struct drm_device *dev,
int i, count;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_engine_cs *engine = &dev_priv->ring[i];
struct intel_ringbuffer *rbuf;
error->ring[i].pid = -1;
if (ring->dev == NULL)
if (engine->dev == NULL)
continue;
error->ring[i].valid = true;
i915_record_ring_state(dev, error, ring, &error->ring[i]);
i915_record_ring_state(dev, error, engine, &error->ring[i]);
request = i915_gem_find_active_request(ring);
request = i915_gem_find_active_request(engine);
if (request) {
struct i915_address_space *vm;
......@@ -1051,7 +1051,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
if (HAS_BROKEN_CS_TLB(dev_priv->dev))
error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
ring->scratch.obj);
engine->scratch.obj);
if (request->pid) {
struct task_struct *task;
......@@ -1073,11 +1073,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
* executed).
*/
if (request)
rbuf = request->ctx->engine[ring->id].ringbuf;
rbuf = request->ctx->engine[engine->id].ringbuf;
else
rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
} else
rbuf = ring->buffer;
rbuf = engine->buffer;
error->ring[i].cpu_ring_head = rbuf->head;
error->ring[i].cpu_ring_tail = rbuf->tail;
......@@ -1086,18 +1086,19 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_error_ggtt_object_create(dev_priv, rbuf->obj);
error->ring[i].hws_page =
i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
i915_error_ggtt_object_create(dev_priv,
engine->status_page.obj);
if (ring->wa_ctx.obj) {
if (engine->wa_ctx.obj) {
error->ring[i].wa_ctx =
i915_error_ggtt_object_create(dev_priv,
ring->wa_ctx.obj);
engine->wa_ctx.obj);
}
i915_gem_record_active_context(ring, error, &error->ring[i]);
i915_gem_record_active_context(engine, error, &error->ring[i]);
count = 0;
list_for_each_entry(request, &ring->request_list, list)
list_for_each_entry(request, &engine->request_list, list)
count++;
error->ring[i].num_requests = count;
......@@ -1110,7 +1111,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
count = 0;
list_for_each_entry(request, &ring->request_list, list) {
list_for_each_entry(request, &engine->request_list, list) {
struct drm_i915_error_request *erq;
if (count >= error->ring[i].num_requests) {
......
......@@ -377,7 +377,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
......@@ -390,8 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
for_each_ring(ring, dev_priv, i) {
struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
for_each_ring(engine, dev_priv, i) {
struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
......@@ -406,14 +406,14 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
if (!obj)
break; /* XXX: continue? */
ctx_desc = intel_lr_context_descriptor(ctx, ring);
ctx_desc = intel_lr_context_descriptor(ctx, engine);
lrc->context_desc = (u32)ctx_desc;
/* The state page is after PPHWSP */
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
obj = ctx->engine[i].ringbuf->obj;
......@@ -422,7 +422,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
desc.engines_used |= (1 << ring->guc_id);
desc.engines_used |= (1 << engine->guc_id);
}
WARN_ON(desc.engines_used == 0);
......@@ -839,7 +839,7 @@ static void guc_create_ads(struct intel_guc *guc)
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct page *page;
u32 size, i;
......@@ -867,11 +867,11 @@ static void guc_create_ads(struct intel_guc *guc)
* so its address won't change after we've told the GuC where
* to find it.
*/
ring = &dev_priv->ring[RCS];
ads->golden_context_lrca = ring->status_page.gfx_addr;
engine = &dev_priv->ring[RCS];
ads->golden_context_lrca = engine->status_page.gfx_addr;
for_each_ring(ring, dev_priv, i)
ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
for_each_ring(engine, dev_priv, i)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
......@@ -883,12 +883,12 @@ static void guc_create_ads(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
for_each_ring(ring, dev_priv, i) {
reg_state->mmio_white_list[ring->guc_id].mmio_start =
ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
for_each_ring(engine, dev_priv, i) {
reg_state->mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
reg_state->mmio_white_list[ring->guc_id].count = 0;
reg_state->mmio_white_list[engine->guc_id].count = 0;
}
ads->reg_state_addr = ads->scheduler_policies +
......
......@@ -1079,11 +1079,11 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
for_each_ring(ring, dev_priv, i)
if (ring->irq_refcount)
for_each_ring(engine, dev_priv, i)
if (engine->irq_refcount)
return true;
return false;
......@@ -2449,7 +2449,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
/*
......@@ -2460,8 +2460,8 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
*/
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
for_each_ring(engine, dev_priv, i)
wake_up_all(&engine->irq_queue);
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
......@@ -2956,11 +2956,11 @@ static int semaphore_passed(struct intel_engine_cs *ring)
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
for_each_ring(ring, dev_priv, i)
ring->hangcheck.deadlock = 0;
for_each_ring(engine, dev_priv, i)
engine->hangcheck.deadlock = 0;
}
static bool subunits_stuck(struct intel_engine_cs *ring)
......@@ -3071,7 +3071,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
int busy_count = 0, rings_hung = 0;
bool stuck[I915_NUM_RINGS] = { 0 };
......@@ -3096,33 +3096,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
u64 acthd;
u32 seqno;
bool busy = true;
semaphore_clear_deadlocks(dev_priv);
seqno = ring->get_seqno(ring, false);
acthd = intel_ring_get_active_head(ring);
seqno = engine->get_seqno(engine, false);
acthd = intel_ring_get_active_head(engine);
if (ring->hangcheck.seqno == seqno) {
if (ring_idle(ring, seqno)) {
ring->hangcheck.action = HANGCHECK_IDLE;
if (engine->hangcheck.seqno == seqno) {
if (ring_idle(engine, seqno)) {
engine->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) {
if (waitqueue_active(&engine->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) {
if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(engine)))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
ring->name);
engine->name);
else
DRM_INFO("Fake missed irq on %s\n",
ring->name);
wake_up_all(&ring->irq_queue);
engine->name);
wake_up_all(&engine->irq_queue);
}
/* Safeguard against driver failure */
ring->hangcheck.score += BUSY;
engine->hangcheck.score += BUSY;
} else
busy = false;
} else {
......@@ -3141,53 +3141,53 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
ring->hangcheck.action = ring_stuck(ring,
acthd);
engine->hangcheck.action = ring_stuck(engine,
acthd);
switch (ring->hangcheck.action) {
switch (engine->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
break;
case HANGCHECK_ACTIVE:
ring->hangcheck.score += BUSY;
engine->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
ring->hangcheck.score += KICK;
engine->hangcheck.score += KICK;
break;
case HANGCHECK_HUNG:
ring->hangcheck.score += HUNG;
engine->hangcheck.score += HUNG;
stuck[i] = true;
break;
}
}
} else {
ring->hangcheck.action = HANGCHECK_ACTIVE;
engine->hangcheck.action = HANGCHECK_ACTIVE;
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
if (ring->hangcheck.score > 0)
ring->hangcheck.score -= ACTIVE_DECAY;
if (ring->hangcheck.score < 0)
ring->hangcheck.score = 0;
if (engine->hangcheck.score > 0)
engine->hangcheck.score -= ACTIVE_DECAY;
if (engine->hangcheck.score < 0)
engine->hangcheck.score = 0;
/* Clear head and subunit states on seqno movement */
ring->hangcheck.acthd = 0;
engine->hangcheck.acthd = 0;
memset(ring->hangcheck.instdone, 0,
sizeof(ring->hangcheck.instdone));
memset(engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
}
ring->hangcheck.seqno = seqno;
ring->hangcheck.acthd = acthd;
engine->hangcheck.seqno = seqno;
engine->hangcheck.acthd = acthd;
busy_count += busy;
}
for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
for_each_ring(engine, dev_priv, i) {
if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
DRM_INFO("%s on %s\n",
stuck[i] ? "stuck" : "no progress",
ring->name);
engine->name);
rings_hung++;
}
}
......
......@@ -10984,7 +10984,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
......@@ -11000,13 +11000,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_DISPLAY_FLIP |
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_ring_emit(engine, fb->pitches[0]);
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
......@@ -11019,7 +11019,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
......@@ -11032,13 +11032,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(engine, fb->pitches[0]);
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, MI_NOOP);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
......@@ -11051,7 +11051,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
......@@ -11065,10 +11065,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
intel_ring_emit(ring, MI_DISPLAY_FLIP |
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
intel_ring_emit(engine, fb->pitches[0]);
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
......@@ -11077,7 +11077,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
......@@ -11090,7 +11090,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
......@@ -11100,10 +11100,10 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
if (ret)
return ret;
intel_ring_emit(ring, MI_DISPLAY_FLIP |
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
......@@ -11113,7 +11113,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
......@@ -11126,7 +11126,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
......@@ -11147,7 +11147,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
}
len = 4;
if (ring->id == RCS) {
if (engine->id == RCS) {
len += 6;
/*
* On Gen 8, SRM is now taking an extra dword to accommodate
......@@ -11185,30 +11185,30 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* for the RCS also doesn't appear to drop events. Setting the DERRMR
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
if (ring->id == RCS) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
if (engine->id == RCS) {
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, DERRMR);
intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
if (IS_GEN8(dev))
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
intel_ring_emit_reg(engine, DERRMR);
intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
if (IS_GEN8(dev)) {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(engine, 0);
intel_ring_emit(engine, MI_NOOP);
}
}
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
......@@ -11488,7 +11488,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
bool mmio_flip;
struct drm_i915_gem_request *request = NULL;
int ret;
......@@ -11575,21 +11575,21 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
engine = &dev_priv->ring[BCS];
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
engine = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
ring = &dev_priv->ring[BCS];
engine = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = i915_gem_request_get_ring(obj->last_write_req);
if (ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
engine = i915_gem_request_get_ring(obj->last_write_req);
if (engine == NULL || engine->id != RCS)
engine = &dev_priv->ring[BCS];
} else {
ring = &dev_priv->ring[RCS];
engine = &dev_priv->ring[RCS];
}
mmio_flip = use_mmio_flip(ring, obj);
mmio_flip = use_mmio_flip(engine, obj);
/* When using CS flips, we want to emit semaphores between rings.
* However, when using mmio flips we will create a task to do the
......@@ -11597,7 +11597,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* into the display plane and skip any waits.
*/
if (!mmio_flip) {
ret = i915_gem_object_sync(obj, ring, &request);
ret = i915_gem_object_sync(obj, engine, &request);
if (ret)
goto cleanup_pending;
}
......@@ -11619,7 +11619,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj->last_write_req);
} else {
if (!request) {
request = i915_gem_request_alloc(ring, NULL);
request = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin;
......
......@@ -81,14 +81,14 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i, irqs;
/* tell all command streamers NOT to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(ring), irqs);
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
I915_WRITE(GUC_BCS_RCS_IER, 0);
......@@ -98,14 +98,14 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i, irqs;
/* tell all command streamers to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(ring), irqs);
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
......
This diff is collapsed.
......@@ -324,11 +324,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
if (get_mocs_settings(req->ring->dev, &t)) {
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
enum intel_ring_id ring_id;
/* Program the control registers */
for_each_ring(ring, dev_priv, ring_id) {
for_each_ring(engine, dev_priv, ring_id) {
ret = emit_mocs_control_table(req, &t, ring_id);
if (ret)
return ret;
......
......@@ -233,14 +233,14 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
struct drm_i915_gem_request *req;
int ret;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
req = i915_gem_request_alloc(ring, NULL);
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
......@@ -252,11 +252,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
overlay->active = true;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(engine, MI_NOOP);
intel_ring_advance(engine);
return intel_overlay_do_wait_request(overlay, req, NULL);
}
......@@ -267,7 +267,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
......@@ -283,7 +283,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
req = i915_gem_request_alloc(ring, NULL);
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
......@@ -293,9 +293,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
return ret;
}
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(engine, flip_addr);
intel_ring_advance(engine);
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req);
......@@ -336,7 +336,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
int ret;
......@@ -349,7 +349,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
req = i915_gem_request_alloc(ring, NULL);
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
......@@ -360,22 +360,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
}
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(engine, flip_addr);
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
if (IS_I830(dev)) {
/* Workaround: Don't disable the overlay fully, since otherwise
* it dies on the next OVERLAY_ON cmd. */
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_NOOP);
} else {
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
intel_ring_emit(engine, flip_addr);
intel_ring_emit(engine,
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
}
intel_ring_advance(ring);
intel_ring_advance(engine);
return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
}
......@@ -408,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
......@@ -423,7 +424,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
/* synchronous slowpath */
struct drm_i915_gem_request *req;
req = i915_gem_request_alloc(ring, NULL);
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
......@@ -433,9 +434,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return ret;
}
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
intel_ring_emit(engine,
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(engine, MI_NOOP);
intel_ring_advance(engine);
ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);
......
......@@ -4815,7 +4815,7 @@ static void gen9_enable_rps(struct drm_device *dev)
static void gen9_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
int unused;
......@@ -4838,8 +4838,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
for_each_ring(engine, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC_UCODE(dev))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
......@@ -4885,7 +4885,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
int unused;
......@@ -4906,8 +4906,8 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
for_each_ring(engine, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
......@@ -4967,7 +4967,7 @@ static void gen8_enable_rps(struct drm_device *dev)
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
......@@ -5003,8 +5003,8 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
......@@ -5495,7 +5495,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
static void cherryview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
int i;
......@@ -5522,8 +5522,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
......@@ -5593,7 +5593,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0;
int i;
......@@ -5633,8 +5633,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
......@@ -6010,7 +6010,7 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
bool ret = false;
int i;
......@@ -6019,8 +6019,8 @@ bool i915_gpu_busy(void)
goto out_unlock;
dev_priv = i915_mch_dev;
for_each_ring(ring, dev_priv, i)
ret |= !list_empty(&ring->request_list);
for_each_ring(engine, dev_priv, i)
ret |= !list_empty(&engine->request_list);
out_unlock:
spin_unlock_irq(&mchdev_lock);
......
This diff is collapsed.
......@@ -63,16 +63,16 @@ struct intel_hw_status_page {
((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (__ring)->id))
#define GEN8_RING_SEMAPHORE_INIT do { \
#define GEN8_RING_SEMAPHORE_INIT(e) do { \
if (!dev_priv->semaphore_obj) { \
break; \
} \
ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
(e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
(e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
(e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
(e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
(e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
(e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
} while(0)
enum intel_ring_hangcheck_action {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment