Commit 0875546c authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: Fix up the vma aliasing ppgtt binding

Currently we have the problem that the decision whether ptes need to
be (re)written is splattered all over the codebase. Move all that into
i915_vma_bind. This needs a few changes:
- Just reuse the PIN_* flags for i915_vma_bind and do the conversion
  to vma->bound in there to avoid duplicating the conversion code all
  over.
- We need to make binding for EXECBUF (i.e. pick aliasing ppgtt if
  around) explicit, add PIN_USER for that.
- Two callers want to update ptes, give them a PIN_UPDATE for that.

Of course we still want to avoid double-binding, but that should be
taken care of:
- A ppgtt vma will only ever see PIN_USER, so no issue with
  double-binding.
- A ggtt vma with aliasing ppgtt needs both types of binding, and we
  track that properly now.
- A ggtt vma without aliasing ppgtt could be bound twice. In the
  lower-level ->bind_vma functions hence unconditionally set
  GLOBAL_BIND when writing the ggtt ptes.

There's still a bit room for cleanup, but that's for follow-up
patches.

v2: Fixup fumbles.

v3: s/PIN_EXECBUF/PIN_USER/ for clearer meaning, suggested by Chris.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
parent cd102a68
...@@ -2645,10 +2645,13 @@ void i915_init_vm(struct drm_i915_private *dev_priv, ...@@ -2645,10 +2645,13 @@ void i915_init_vm(struct drm_i915_private *dev_priv,
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma); void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_MAPPABLE 0x1 /* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK 0x2 #define PIN_MAPPABLE (1<<0)
#define PIN_GLOBAL 0x4 #define PIN_NONBLOCK (1<<1)
#define PIN_OFFSET_BIAS 0x8 #define PIN_GLOBAL (1<<2)
#define PIN_OFFSET_BIAS (1<<3)
#define PIN_USER (1<<4)
#define PIN_UPDATE (1<<5)
#define PIN_OFFSET_MASK (~4095) #define PIN_OFFSET_MASK (~4095)
int __must_check int __must_check
i915_gem_object_pin(struct drm_i915_gem_object *obj, i915_gem_object_pin(struct drm_i915_gem_object *obj,
......
...@@ -3589,8 +3589,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3589,8 +3589,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
goto err_remove_node; goto err_remove_node;
trace_i915_vma_bind(vma, flags); trace_i915_vma_bind(vma, flags);
ret = i915_vma_bind(vma, obj->cache_level, ret = i915_vma_bind(vma, obj->cache_level, flags);
flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
if (ret) if (ret)
goto err_finish_gtt; goto err_finish_gtt;
...@@ -3816,7 +3815,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3816,7 +3815,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
list_for_each_entry(vma, &obj->vma_list, vma_link) list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node)) { if (drm_mm_node_allocated(&vma->node)) {
ret = i915_vma_bind(vma, cache_level, ret = i915_vma_bind(vma, cache_level,
vma->bound & GLOBAL_BIND); PIN_UPDATE);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -4201,10 +4200,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, ...@@ -4201,10 +4200,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
flags); flags);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
} } else {
ret = i915_vma_bind(vma, obj->cache_level, flags);
if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -400,10 +400,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -400,10 +400,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* pipe_control writes because the gpu doesn't properly redirect them * pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */ * through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) && if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
!(target_vma->bound & GLOBAL_BIND))) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level, ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
GLOBAL_BIND); PIN_GLOBAL);
if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!")) if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
return ret; return ret;
} }
...@@ -585,7 +584,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, ...@@ -585,7 +584,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
uint64_t flags; uint64_t flags;
int ret; int ret;
flags = 0; flags = PIN_USER;
if (entry->flags & EXEC_OBJECT_NEEDS_GTT) if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL; flags |= PIN_GLOBAL;
......
...@@ -1748,15 +1748,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) ...@@ -1748,15 +1748,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
continue; continue;
i915_gem_clflush_object(obj, obj->pin_display); i915_gem_clflush_object(obj, obj->pin_display);
/* The bind_vma code tries to be smart about tracking mappings. WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
*
* Bind is not expected to fail since this is only called on
* resume and assumption is all requirements exist already.
*/
vma->bound &= ~GLOBAL_BIND;
WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND));
} }
...@@ -1957,7 +1949,8 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma, ...@@ -1957,7 +1949,8 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
BUG_ON(!i915_is_ggtt(vma->vm)); BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags); intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags);
vma->bound = GLOBAL_BIND;
vma->bound |= GLOBAL_BIND;
} }
static void i915_ggtt_clear_range(struct i915_address_space *vm, static void i915_ggtt_clear_range(struct i915_address_space *vm,
...@@ -1976,7 +1969,6 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma) ...@@ -1976,7 +1969,6 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma)
const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
BUG_ON(!i915_is_ggtt(vma->vm)); BUG_ON(!i915_is_ggtt(vma->vm));
vma->bound = 0;
intel_gtt_clear_range(first, size); intel_gtt_clear_range(first, size);
} }
...@@ -1997,35 +1989,19 @@ static void ggtt_bind_vma(struct i915_vma *vma, ...@@ -1997,35 +1989,19 @@ static void ggtt_bind_vma(struct i915_vma *vma,
if (i915_is_ggtt(vma->vm)) if (i915_is_ggtt(vma->vm))
pages = vma->ggtt_view.pages; pages = vma->ggtt_view.pages;
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
*
* If there is an aliasing PPGTT it is anecdotally faster, so use that
* instead if none of the above hold true.
*
* NB: A global mapping should only be needed for special regions like
* "gtt mappable", SNB errata, or if specified via special execbuf
* flags. At all other times, the GPU will use the aliasing PPGTT.
*/
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) || vma->vm->insert_entries(vma->vm, pages,
(cache_level != obj->cache_level)) { vma->node.start,
vma->vm->insert_entries(vma->vm, pages, cache_level, pte_flags);
vma->node.start,
cache_level, pte_flags); vma->bound |= GLOBAL_BIND;
vma->bound |= GLOBAL_BIND;
}
} }
if (dev_priv->mm.aliasing_ppgtt && if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
(!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base, pages, appgtt->base.insert_entries(&appgtt->base, pages,
vma->node.start, vma->node.start,
cache_level, pte_flags); cache_level, pte_flags);
vma->bound |= LOCAL_BIND;
} }
} }
...@@ -2040,16 +2016,14 @@ static void ggtt_unbind_vma(struct i915_vma *vma) ...@@ -2040,16 +2016,14 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
vma->node.start, vma->node.start,
obj->base.size, obj->base.size,
true); true);
vma->bound &= ~GLOBAL_BIND;
} }
if (vma->bound & LOCAL_BIND) { if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.clear_range(&appgtt->base, appgtt->base.clear_range(&appgtt->base,
vma->node.start, vma->node.start,
obj->base.size, obj->base.size,
true); true);
vma->bound &= ~LOCAL_BIND;
} }
} }
...@@ -2839,6 +2813,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) ...@@ -2839,6 +2813,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
u32 bind_flags = 0;
int ret; int ret;
if (vma->vm->allocate_va_range) { if (vma->vm->allocate_va_range) {
...@@ -2855,12 +2830,24 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, ...@@ -2855,12 +2830,24 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (i915_is_ggtt(vma->vm)) { if (i915_is_ggtt(vma->vm)) {
ret = i915_get_ggtt_vma_pages(vma); ret = i915_get_ggtt_vma_pages(vma);
if (ret) if (ret)
return ret; return 0;
} }
vma->vm->bind_vma(vma, cache_level, flags); if (flags & PIN_GLOBAL)
bind_flags |= GLOBAL_BIND;
if (flags & PIN_USER)
bind_flags |= LOCAL_BIND;
if (flags & PIN_UPDATE)
bind_flags |= vma->bound;
else
bind_flags &= ~vma->bound;
if (bind_flags)
vma->vm->bind_vma(vma, cache_level, bind_flags);
vma->bound |= bind_flags;
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment