Commit 75d04a37 authored by Mika Kuoppala's avatar Mika Kuoppala Committed by Jani Nikula

drm/i915/gtt: Allocate va range only if vma is not bound

When we have bound vma into an address space, the layout
of page table structures is immutable. So we can be absolutely
certain that if vma is already bound, there is no need to
(re)allocate a virtual address range for it.

v2: - add sanity checks and remove superfluous GLOBAL_BIND set
    - we might do update for an unbound vma (Chris)

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=90224
Testcase: igt/gem_exec_big #bdw
Reported-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent 245054a1
...@@ -1928,8 +1928,6 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -1928,8 +1928,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
vma->vm->insert_entries(vma->vm, pages, vma->vm->insert_entries(vma->vm, pages,
vma->node.start, vma->node.start,
cache_level, pte_flags); cache_level, pte_flags);
vma->bound |= GLOBAL_BIND;
} }
if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
...@@ -2804,21 +2802,13 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) ...@@ -2804,21 +2802,13 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
int ret = 0; int ret;
u32 bind_flags = 0; u32 bind_flags;
if (vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma->vm, vma->node.start,
vma->node.size,
VM_TO_TRACE_NAME(vma->vm));
ret = vma->vm->allocate_va_range(vma->vm, if (WARN_ON(flags == 0))
vma->node.start, return -EINVAL;
vma->node.size);
if (ret)
return ret;
}
bind_flags = 0;
if (flags & PIN_GLOBAL) if (flags & PIN_GLOBAL)
bind_flags |= GLOBAL_BIND; bind_flags |= GLOBAL_BIND;
if (flags & PIN_USER) if (flags & PIN_USER)
...@@ -2829,7 +2819,22 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, ...@@ -2829,7 +2819,22 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
else else
bind_flags &= ~vma->bound; bind_flags &= ~vma->bound;
if (bind_flags) if (bind_flags == 0)
return 0;
if (vma->bound == 0 && vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma->vm,
vma->node.start,
vma->node.size,
VM_TO_TRACE_NAME(vma->vm));
ret = vma->vm->allocate_va_range(vma->vm,
vma->node.start,
vma->node.size);
if (ret)
return ret;
}
ret = vma->vm->bind_vma(vma, cache_level, bind_flags); ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment