Commit de4e783a authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Tidy batch pool logic

Move the madvise logic out of the execbuffer main path into the
relatively rare allocation path, making the execbuffer manipulation less
fragile.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent ed9ddd25
...@@ -869,6 +869,9 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, ...@@ -869,6 +869,9 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
batch_len + batch_start_offset > src_obj->base.size) batch_len + batch_start_offset > src_obj->base.size)
return ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);
if (WARN_ON(dest_obj->pages_pin_count == 0))
return ERR_PTR(-ENODEV);
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n"); DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
...@@ -882,13 +885,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, ...@@ -882,13 +885,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
goto unpin_src; goto unpin_src;
} }
ret = i915_gem_object_get_pages(dest_obj);
if (ret) {
DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
goto unmap_src;
}
i915_gem_object_pin_pages(dest_obj);
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n"); DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
...@@ -898,7 +894,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, ...@@ -898,7 +894,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
dst = vmap_batch(dest_obj, 0, batch_len); dst = vmap_batch(dest_obj, 0, batch_len);
if (!dst) { if (!dst) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
i915_gem_object_unpin_pages(dest_obj);
ret = -ENOMEM; ret = -ENOMEM;
goto unmap_src; goto unmap_src;
} }
...@@ -1129,7 +1124,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring, ...@@ -1129,7 +1124,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
} }
vunmap(batch_base); vunmap(batch_base);
i915_gem_object_unpin_pages(shadow_batch_obj);
return ret; return ret;
} }
......
...@@ -67,25 +67,23 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) ...@@ -67,25 +67,23 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
struct drm_i915_gem_object, struct drm_i915_gem_object,
batch_pool_list); batch_pool_list);
WARN_ON(obj->active); list_del(&obj->batch_pool_list);
list_del_init(&obj->batch_pool_list);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
} }
} }
/** /**
* i915_gem_batch_pool_get() - select a buffer from the pool * i915_gem_batch_pool_get() - allocate a buffer from the pool
* @pool: the batch buffer pool * @pool: the batch buffer pool
* @size: the minimum desired size of the returned buffer * @size: the minimum desired size of the returned buffer
* *
* Finds or allocates a batch buffer in the pool with at least the requested * Returns an inactive buffer from @pool with at least @size bytes,
* size. The caller is responsible for any domain, active/inactive, or * with the pages pinned. The caller must i915_gem_object_unpin_pages()
* purgeability management for the returned buffer. * on the returned object.
* *
* Note: Callers must hold the struct_mutex * Note: Callers must hold the struct_mutex
* *
* Return: the selected batch buffer object * Return: the buffer object or an error pointer
*/ */
struct drm_i915_gem_object * struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
...@@ -97,8 +95,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -97,8 +95,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
list_for_each_entry_safe(tmp, next, list_for_each_entry_safe(tmp, next,
&pool->cache_list, batch_pool_list) { &pool->cache_list, batch_pool_list) {
if (tmp->active) if (tmp->active)
continue; continue;
...@@ -114,25 +111,27 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -114,25 +111,27 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
* but not 'too much' bigger. A better way to do this * but not 'too much' bigger. A better way to do this
* might be to bucket the pool objects based on size. * might be to bucket the pool objects based on size.
*/ */
if (tmp->base.size >= size && if (tmp->base.size >= size && tmp->base.size <= 2 * size) {
tmp->base.size <= (2 * size)) {
obj = tmp; obj = tmp;
break; break;
} }
} }
if (!obj) { if (obj == NULL) {
int ret;
obj = i915_gem_alloc_object(pool->dev, size); obj = i915_gem_alloc_object(pool->dev, size);
if (!obj) if (obj == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
list_add_tail(&obj->batch_pool_list, &pool->cache_list); ret = i915_gem_object_get_pages(obj);
} if (ret)
else return ERR_PTR(ret);
/* Keep list in LRU order */
list_move_tail(&obj->batch_pool_list, &pool->cache_list);
obj->madv = I915_MADV_WILLNEED; obj->madv = I915_MADV_DONTNEED;
}
list_move_tail(&obj->batch_pool_list, &pool->cache_list);
i915_gem_object_pin_pages(obj);
return obj; return obj;
} }
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
#define __EXEC_OBJECT_HAS_FENCE (1<<30) #define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_MAP (1<<29) #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
#define __EXEC_OBJECT_PURGEABLE (1<<27)
#define BATCH_OFFSET_BIAS (256*1024) #define BATCH_OFFSET_BIAS (256*1024)
...@@ -224,12 +223,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) ...@@ -224,12 +223,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
if (entry->flags & __EXEC_OBJECT_HAS_PIN) if (entry->flags & __EXEC_OBJECT_HAS_PIN)
vma->pin_count--; vma->pin_count--;
if (entry->flags & __EXEC_OBJECT_PURGEABLE) entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
obj->madv = I915_MADV_DONTNEED;
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
__EXEC_OBJECT_HAS_PIN |
__EXEC_OBJECT_PURGEABLE);
} }
static void eb_destroy(struct eb_vmas *eb) static void eb_destroy(struct eb_vmas *eb)
...@@ -1165,11 +1159,13 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, ...@@ -1165,11 +1159,13 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
if (ret) if (ret)
goto err; goto err;
i915_gem_object_unpin_pages(shadow_batch_obj);
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
vma = i915_gem_obj_to_ggtt(shadow_batch_obj); vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
vma->exec_entry = shadow_exec_entry; vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN; vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
drm_gem_object_reference(&shadow_batch_obj->base); drm_gem_object_reference(&shadow_batch_obj->base);
list_add_tail(&vma->exec_list, &eb->vmas); list_add_tail(&vma->exec_list, &eb->vmas);
...@@ -1178,6 +1174,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, ...@@ -1178,6 +1174,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
return shadow_batch_obj; return shadow_batch_obj;
err: err:
i915_gem_object_unpin_pages(shadow_batch_obj);
if (ret == -EACCES) /* unhandled chained batch */ if (ret == -EACCES) /* unhandled chained batch */
return batch_obj; return batch_obj;
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment