Commit e9d4c924 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Store i915_ggtt as the backpointer on fence registers

Now that i915_ggtt knows everything about its own paths to perform mmio,
we can use that as our primary backpointer for individual fence
registers. This reduces the amount of pointer dancing we have to perform
on the common paths, but more importantly finishes our fence register
encapsulation.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191016143234.4075-1-chris@chris-wilson.co.uk
parent eca0b720
......@@ -715,7 +715,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
for_each_engine(engine, gt->i915, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
i915_gem_restore_fences(gt->i915);
i915_gem_restore_fences(gt->ggtt);
return err;
}
......
......@@ -198,7 +198,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
mutex_lock(&dev_priv->ggtt.vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = i915_reserve_fence(dev_priv);
reg = i915_reserve_fence(&dev_priv->ggtt);
if (IS_ERR(reg))
goto out_free_fence;
......
......@@ -1804,7 +1804,7 @@ static int i915_drm_resume(struct drm_device *dev)
DRM_ERROR("failed to re-enable GGTT\n");
i915_gem_restore_gtt_mappings(dev_priv);
i915_gem_restore_fences(dev_priv);
i915_gem_restore_fences(&dev_priv->ggtt);
intel_csr_ucode_resume(dev_priv);
......@@ -2502,7 +2502,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_gt_runtime_resume(&dev_priv->gt);
i915_gem_restore_fences(dev_priv);
i915_gem_restore_fences(&dev_priv->ggtt);
enable_rpm_wakeref_asserts(rpm);
......@@ -2582,7 +2582,7 @@ static int intel_runtime_resume(struct device *kdev)
* we can do is to hope that things will still work (and disable RPM).
*/
intel_gt_runtime_resume(&dev_priv->gt);
i915_gem_restore_fences(dev_priv);
i915_gem_restore_fences(&dev_priv->ggtt);
/*
* On VLV/CHV display interrupts are part of the display
......
......@@ -1360,7 +1360,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_gem_restore_gtt_mappings(dev_priv);
i915_gem_restore_fences(dev_priv);
i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
}
......
......@@ -59,6 +59,16 @@
#define pipelined 0
static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence)
{
return fence->ggtt->vm.i915;
}
static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
{
return fence->ggtt->vm.gt->uncore;
}
static void i965_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
......@@ -66,7 +76,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
int fence_pitch_shift;
u64 val;
if (INTEL_GEN(fence->i915) >= 6) {
if (INTEL_GEN(fence_to_i915(fence)) >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
......@@ -95,7 +105,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
struct intel_uncore *uncore = &fence->i915->uncore;
struct intel_uncore *uncore = fence_to_uncore(fence);
/*
* To w/a incoherency with non-atomic 64-bit register updates,
......@@ -132,7 +142,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
stride /= 128;
else
stride /= 512;
......@@ -148,7 +158,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
struct intel_uncore *uncore = &fence->i915->uncore;
struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
......@@ -180,7 +190,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
struct intel_uncore *uncore = &fence->i915->uncore;
struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
......@@ -191,15 +201,17 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
static void fence_write(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
struct drm_i915_private *i915 = fence_to_i915(fence);
/*
* Previous access through the fence register is marshalled by
* the mb() inside the fault handlers (i915_gem_release_mmaps)
* and explicitly managed for internal users.
*/
if (IS_GEN(fence->i915, 2))
if (IS_GEN(i915, 2))
i830_write_fence_reg(fence, vma);
else if (IS_GEN(fence->i915, 3))
else if (IS_GEN(i915, 3))
i915_write_fence_reg(fence, vma);
else
i965_write_fence_reg(fence, vma);
......@@ -215,6 +227,8 @@ static void fence_write(struct i915_fence_reg *fence,
static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
struct i915_ggtt *ggtt = fence->ggtt;
struct intel_uncore *uncore = fence_to_uncore(fence);
intel_wakeref_t wakeref;
struct i915_vma *old;
int ret;
......@@ -256,7 +270,7 @@ static int fence_update(struct i915_fence_reg *fence,
old->fence = NULL;
}
list_move(&fence->link, &fence->i915->ggtt.fence_list);
list_move(&fence->link, &ggtt->fence_list);
}
/*
......@@ -269,7 +283,7 @@ static int fence_update(struct i915_fence_reg *fence,
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
......@@ -280,10 +294,10 @@ static int fence_update(struct i915_fence_reg *fence,
if (vma) {
vma->fence = fence;
list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
list_move_tail(&fence->link, &ggtt->fence_list);
}
intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;
}
......@@ -312,11 +326,11 @@ int i915_vma_revoke_fence(struct i915_vma *vma)
return fence_update(fence, NULL);
}
static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
struct i915_fence_reg *fence;
list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
list_for_each_entry(fence, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (atomic_read(&fence->pin_count))
......@@ -326,7 +340,7 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
}
/* Wait for completion of pending flips which consume fences */
if (intel_has_pending_fb_unpin(i915))
if (intel_has_pending_fb_unpin(ggtt->vm.i915))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
......@@ -351,7 +365,7 @@ int __i915_vma_pin_fence(struct i915_vma *vma)
return 0;
}
} else if (set) {
fence = fence_find(vma->vm->i915);
fence = fence_find(ggtt);
if (IS_ERR(fence))
return PTR_ERR(fence);
......@@ -402,7 +416,7 @@ int i915_vma_pin_fence(struct i915_vma *vma)
* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
......@@ -418,14 +432,13 @@ int i915_vma_pin_fence(struct i915_vma *vma)
/**
* i915_reserve_fence - Reserve a fence for vGPU
* @i915: i915 device private
* @ggtt: Global GTT
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt)
{
struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_fence_reg *fence;
int count;
int ret;
......@@ -439,7 +452,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
if (count <= 1)
return ERR_PTR(-ENOSPC);
fence = fence_find(i915);
fence = fence_find(ggtt);
if (IS_ERR(fence))
return fence;
......@@ -463,7 +476,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
*/
void i915_unreserve_fence(struct i915_fence_reg *fence)
{
struct i915_ggtt *ggtt = &fence->i915->ggtt;
struct i915_ggtt *ggtt = fence->ggtt;
lockdep_assert_held(&ggtt->vm.mutex);
......@@ -472,19 +485,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence)
/**
* i915_gem_restore_fences - restore fence state
* @i915: i915 device private
* @ggtt: Global GTT
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
void i915_gem_restore_fences(struct drm_i915_private *i915)
void i915_gem_restore_fences(struct i915_ggtt *ggtt)
{
int i;
rcu_read_lock(); /* keep obj alive as we dereference */
for (i = 0; i < i915->ggtt.num_fences; i++) {
struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
for (i = 0; i < ggtt->num_fences; i++) {
struct i915_fence_reg *reg = &ggtt->fence_regs[i];
struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
......@@ -550,7 +563,7 @@ void i915_gem_restore_fences(struct drm_i915_private *i915)
*/
/**
* i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
* detect_bit_6_swizzle - detect bit 6 swizzling pattern
* @i915: i915 device private
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
......@@ -822,12 +835,13 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
int num_fences;
int i;
INIT_LIST_HEAD(&ggtt->fence_list);
INIT_LIST_HEAD(&ggtt->userfault_list);
intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm);
intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
detect_bit_6_swizzle(i915);
......@@ -842,20 +856,20 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
num_fences = 8;
if (intel_vgpu_active(i915))
num_fences = intel_uncore_read(&i915->uncore,
num_fences = intel_uncore_read(uncore,
vgtif_reg(avail_rs.fence_num));
/* Initialize fence registers to zero */
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
fence->i915 = i915;
fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
}
ggtt->num_fences = num_fences;
i915_gem_restore_fences(i915);
i915_gem_restore_fences(ggtt);
}
void intel_gt_init_swizzling(struct intel_gt *gt)
......
......@@ -29,7 +29,6 @@
#include <linux/types.h>
struct drm_i915_gem_object;
struct drm_i915_private;
struct i915_ggtt;
struct i915_vma;
struct intel_gt;
......@@ -39,7 +38,7 @@ struct sg_table;
struct i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;
struct i915_ggtt *ggtt;
struct i915_vma *vma;
atomic_t pin_count;
int id;
......@@ -55,10 +54,10 @@ struct i915_fence_reg {
};
/* i915_gem_fence_reg.c */
struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915);
struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
void i915_unreserve_fence(struct i915_fence_reg *fence);
void i915_gem_restore_fences(struct drm_i915_private *i915);
void i915_gem_restore_fences(struct i915_ggtt *ggtt);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
......
......@@ -120,7 +120,7 @@ static void pm_resume(struct drm_i915_private *i915)
i915_gem_sanitize(i915);
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
i915_gem_restore_fences(&i915->ggtt);
i915_gem_resume(i915);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment