Commit 500bfa38 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Convert i915_gem_init_swizzling to intel_gt

Start using the newly introduced struct intel_gt to fuse together correct
logical init flow with uncore for more removal of implicit dev_priv in
mmio access.

v2:
 * Move code to i915_gem_fence_reg. (Chris)
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-7-tvrtko.ursulin@linux.intel.com
parent eaf522f6
...@@ -2957,7 +2957,7 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2957,7 +2957,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_uc_resume(dev_priv); intel_uc_resume(dev_priv);
i915_gem_init_swizzling(dev_priv); intel_gt_init_swizzling(&dev_priv->gt);
i915_gem_restore_fences(dev_priv); i915_gem_restore_fences(dev_priv);
enable_rpm_wakeref_asserts(rpm); enable_rpm_wakeref_asserts(rpm);
...@@ -3059,7 +3059,7 @@ static int intel_runtime_resume(struct device *kdev) ...@@ -3059,7 +3059,7 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best * No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM). * we can do is to hope that things will still work (and disable RPM).
*/ */
i915_gem_init_swizzling(dev_priv); intel_gt_init_swizzling(&dev_priv->gt);
i915_gem_restore_fences(dev_priv); i915_gem_restore_fences(dev_priv);
/* /*
......
...@@ -2537,7 +2537,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); ...@@ -2537,7 +2537,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_init_mmio(struct drm_i915_private *i915); void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_fini_hw(struct drm_i915_private *dev_priv); void i915_gem_fini_hw(struct drm_i915_private *dev_priv);
void i915_gem_fini(struct drm_i915_private *dev_priv); void i915_gem_fini(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
......
...@@ -1202,29 +1202,6 @@ void i915_gem_sanitize(struct drm_i915_private *i915) ...@@ -1202,29 +1202,6 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
intel_runtime_pm_put(&i915->runtime_pm, wakeref); intel_runtime_pm_put(&i915->runtime_pm, wakeref);
} }
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
if (IS_GEN(dev_priv, 5))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN(dev_priv, 6))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (IS_GEN(dev_priv, 7))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
else if (IS_GEN(dev_priv, 8))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
}
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
{ {
I915_WRITE(RING_CTL(base), 0); I915_WRITE(RING_CTL(base), 0);
...@@ -1271,7 +1248,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) ...@@ -1271,7 +1248,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
/* ...and determine whether they are sticking. */ /* ...and determine whether they are sticking. */
intel_gt_verify_workarounds(dev_priv, "init"); intel_gt_verify_workarounds(dev_priv, "init");
i915_gem_init_swizzling(dev_priv); intel_gt_init_swizzling(&dev_priv->gt);
/* /*
* At least 830 can leave some of the unused rings * At least 830 can leave some of the unused rings
......
...@@ -834,3 +834,40 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt) ...@@ -834,3 +834,40 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
i915_gem_restore_fences(i915); i915_gem_restore_fences(i915);
} }
void intel_gt_init_swizzling(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
if (INTEL_GEN(i915) < 5 ||
i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
intel_uncore_write(uncore,
DISP_ARB_CTL,
intel_uncore_read(uncore, DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
if (IS_GEN(i915, 5))
return;
intel_uncore_write(uncore,
TILECTL,
intel_uncore_read(uncore, TILECTL) | TILECTL_SWZCTL);
if (IS_GEN(i915, 6))
intel_uncore_write(uncore,
ARB_MODE,
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (IS_GEN(i915, 7))
intel_uncore_write(uncore,
ARB_MODE,
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
else if (IS_GEN(i915, 8))
intel_uncore_write(uncore,
GAMTARBMODE,
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
MISSING_CASE(INTEL_GEN(i915));
}
...@@ -32,6 +32,7 @@ struct drm_i915_gem_object; ...@@ -32,6 +32,7 @@ struct drm_i915_gem_object;
struct drm_i915_private; struct drm_i915_private;
struct i915_ggtt; struct i915_ggtt;
struct i915_vma; struct i915_vma;
struct intel_gt;
struct sg_table; struct sg_table;
#define I965_FENCE_PAGE 4096UL #define I965_FENCE_PAGE 4096UL
...@@ -66,4 +67,6 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, ...@@ -66,4 +67,6 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_ggtt_init_fences(struct i915_ggtt *ggtt); void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
void intel_gt_init_swizzling(struct intel_gt *gt);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment