Commit 3312a4ac authored by Matthew Auld's avatar Matthew Auld

drm/i915/ttm: require mappable by default

On devices with non-mappable LMEM ensure we always allocate the pages
within the mappable portion. For now we assume that all LMEM buffers
will require CPU access, which is also inline with pretty much all
current kernel internal users. In the next patch we will introduce a new
flag to override this behaviour.
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Acked-by: default avatarNirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220225145502.331818-2-matthew.auld@intel.com
parent 235582ca
...@@ -130,6 +130,10 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr, ...@@ -130,6 +130,10 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
if (flags & I915_BO_ALLOC_CONTIGUOUS) if (flags & I915_BO_ALLOC_CONTIGUOUS)
place->flags = TTM_PL_FLAG_CONTIGUOUS; place->flags = TTM_PL_FLAG_CONTIGUOUS;
if (mr->io_size && mr->io_size < mr->total) {
place->fpfn = 0;
place->lpfn = mr->io_size >> PAGE_SHIFT;
}
} }
static void static void
......
...@@ -199,6 +199,11 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem, ...@@ -199,6 +199,11 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
struct ttm_resource *res; struct ttm_resource *res;
int ret; int ret;
if (mem->io_size && mem->io_size < mem->total) {
place.fpfn = 0;
place.lpfn = mem->io_size >> PAGE_SHIFT;
}
mock_bo.base.size = size; mock_bo.base.size = size;
mock_bo.bdev = &mem->i915->bdev; mock_bo.bdev = &mem->i915->bdev;
place.flags = flags; place.flags = flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment