Commit 9258811c authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: Don't use atomics for pg_dirty_rings

It's already protected by the bkl^Wdev->struct_mutex. While at it
realign some related code.
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
parent 71b7e54f
......@@ -578,11 +578,9 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
if (to->remap_slice)
return false;
if (to->ppgtt) {
if (from == to && !test_bit(ring->id,
&to->ppgtt->pd_dirty_rings))
if (to->ppgtt && from == to &&
!(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
return true;
}
return false;
}
......@@ -668,7 +666,7 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out;
/* Doing a PD load always reloads the page dirs */
clear_bit(ring->id, &to->ppgtt->pd_dirty_rings);
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
}
if (ring != &dev_priv->ring[RCS]) {
......@@ -696,8 +694,10 @@ static int do_switch(struct intel_engine_cs *ring,
* space. This means we must enforce that a page table load
* occur when this occurs. */
} else if (to->ppgtt &&
test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
hw_flags |= MI_FORCE_RESTORE;
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
}
/* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(ring, to) &&
......
......@@ -1248,8 +1248,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
if (ret)
goto error;
if (ctx->ppgtt)
WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
"%s didn't clear reload\n", ring->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment