Commit 516198d3 authored by Christian König's avatar Christian König Committed by Christian König

drm/i915: audit bo->resource usage v3

Make sure we can at least move and alloc TT objects without backing store.

v2: clear the tt object even when no resource is allocated.
v3: add Matthews changes for i915 as well.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230124125726.13323-1-christian.koenig@amd.com
parent 51affef3
...@@ -271,8 +271,6 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -271,8 +271,6 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
{ {
struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
bdev); bdev);
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
unsigned long ccs_pages = 0; unsigned long ccs_pages = 0;
enum ttm_caching caching; enum ttm_caching caching;
...@@ -286,8 +284,8 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -286,8 +284,8 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
if (!i915_tt) if (!i915_tt)
return NULL; return NULL;
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource ||
man->use_tt) ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt))
page_flags |= TTM_TT_FLAG_ZERO_ALLOC; page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
caching = i915_ttm_select_tt_caching(obj); caching = i915_ttm_select_tt_caching(obj);
...@@ -1051,7 +1049,26 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) ...@@ -1051,7 +1049,26 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
if (!i915_ttm_resource_mappable(bo->resource)) { /*
* This must be swapped out with shmem ttm_tt (pipeline-gutting).
* Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as
* far as far doing a ttm_bo_move_null(), which should skip all the
* other junk.
*/
if (!bo->resource) {
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = true, /* should be idle already */
};
GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
if (ret) {
dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS;
}
} else if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV; int err = -ENODEV;
int i; int i;
......
...@@ -103,7 +103,27 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) ...@@ -103,7 +103,27 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
{ {
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
unsigned int cache_level; unsigned int cache_level;
unsigned int mem_flags;
unsigned int i; unsigned int i;
int mem_type;
/*
* We might have been purged (or swapped out) if the resource is NULL,
* in which case the SYSTEM placement is the closest match to describe
* the current domain. If the object is ever used in this state then we
* will require moving it again.
*/
if (!bo->resource) {
mem_flags = I915_BO_FLAG_STRUCT_PAGE;
mem_type = I915_PL_SYSTEM;
cache_level = I915_CACHE_NONE;
} else {
mem_flags = i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
I915_BO_FLAG_STRUCT_PAGE;
mem_type = bo->resource->mem_type;
cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
bo->ttm);
}
/* /*
* If object was moved to an allowable region, update the object * If object was moved to an allowable region, update the object
...@@ -111,11 +131,11 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) ...@@ -111,11 +131,11 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
* in an allowable region, it's evicted and we don't update the * in an allowable region, it's evicted and we don't update the
* object region. * object region.
*/ */
if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { if (intel_region_to_ttm_type(obj->mm.region) != mem_type) {
for (i = 0; i < obj->mm.n_placements; ++i) { for (i = 0; i < obj->mm.n_placements; ++i) {
struct intel_memory_region *mr = obj->mm.placements[i]; struct intel_memory_region *mr = obj->mm.placements[i];
if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && if (intel_region_to_ttm_type(mr) == mem_type &&
mr != obj->mm.region) { mr != obj->mm.region) {
i915_gem_object_release_memory_region(obj); i915_gem_object_release_memory_region(obj);
i915_gem_object_init_memory_region(obj, mr); i915_gem_object_init_memory_region(obj, mr);
...@@ -125,12 +145,8 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) ...@@ -125,12 +145,8 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
} }
obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
obj->mem_flags |= mem_flags;
obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
I915_BO_FLAG_STRUCT_PAGE;
cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
bo->ttm);
i915_gem_object_set_cache_coherency(obj, cache_level); i915_gem_object_set_cache_coherency(obj, cache_level);
} }
...@@ -565,6 +581,32 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, ...@@ -565,6 +581,32 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
return 0; return 0;
} }
if (!bo->resource) {
if (dst_mem->mem_type != TTM_PL_SYSTEM) {
hop->mem_type = TTM_PL_SYSTEM;
hop->flags = TTM_PL_FLAG_TEMPORARY;
return -EMULTIHOP;
}
/*
* This is only reached when first creating the object, or if
* the object was purged or swapped out (pipeline-gutting). For
* the former we can safely skip all of the below since we are
* only using a dummy SYSTEM placement here. And with the latter
* we will always re-enter here with bo->resource set correctly
* (as per the above), since this is part of a multi-hop
* sequence, where at the end we can do the move for real.
*
* The special case here is when the dst_mem is TTM_PL_SYSTEM,
* which doens't require any kind of move, so it should be safe
* to skip all the below and call ttm_bo_move_null() here, where
* the caller in __i915_ttm_get_pages() will take care of the
* rest, since we should have a valid ttm_tt.
*/
ttm_bo_move_null(bo, dst_mem);
return 0;
}
ret = i915_ttm_move_notify(bo); ret = i915_ttm_move_notify(bo);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment