Commit 8ee262ba authored by Matthew Auld's avatar Matthew Auld Committed by Tvrtko Ursulin

drm/i915/ttm: add unmap_virtual callback

Ensure we call ttm_bo_unmap_virtual when releasing the pages.
Importantly this should now handle the ttm swapping case, and all other
places that already call into i915_ttm_move_notify().

v2: fix up the selftest

Fixes: cf3e3e86 ("drm/i915: Use ttm mmap handling for ttm bo's.")
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220106174910.280616-3-matthew.auld@intel.com
(cherry picked from commit 903e0387)
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
parent 03ee5956
...@@ -538,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) ...@@ -538,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{ {
struct i915_mmap_offset *mmo, *mn; struct i915_mmap_offset *mmo, *mn;
if (obj->ops->unmap_virtual)
obj->ops->unmap_virtual(obj);
spin_lock(&obj->mmo.lock); spin_lock(&obj->mmo.lock);
rbtree_postorder_for_each_entry_safe(mmo, mn, rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets, offset) { &obj->mmo.offsets, offset) {
......
...@@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops { ...@@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops {
int (*pwrite)(struct drm_i915_gem_object *obj, int (*pwrite)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg); const struct drm_i915_gem_pwrite *arg);
u64 (*mmap_offset)(struct drm_i915_gem_object *obj); u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
void (*unmap_virtual)(struct drm_i915_gem_object *obj);
int (*dmabuf_export)(struct drm_i915_gem_object *obj); int (*dmabuf_export)(struct drm_i915_gem_object *obj);
......
...@@ -950,6 +950,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) ...@@ -950,6 +950,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
return drm_vma_node_offset_addr(&obj->base.vma_node); return drm_vma_node_offset_addr(&obj->base.vma_node);
} }
static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
{
ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
}
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.name = "i915_gem_object_ttm", .name = "i915_gem_object_ttm",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE | .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
...@@ -965,6 +970,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { ...@@ -965,6 +970,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.migrate = i915_ttm_migrate, .migrate = i915_ttm_migrate,
.mmap_offset = i915_ttm_mmap_offset, .mmap_offset = i915_ttm_mmap_offset,
.unmap_virtual = i915_ttm_unmap_virtual,
.mmap_ops = &vm_ops_ttm, .mmap_ops = &vm_ops_ttm,
}; };
......
...@@ -1368,21 +1368,11 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, ...@@ -1368,21 +1368,11 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
} }
} }
if (!obj->ops->mmap_ops) {
err = check_absent(addr, obj->base.size); err = check_absent(addr, obj->base.size);
if (err) { if (err) {
pr_err("%s: was not absent\n", obj->mm.region->name); pr_err("%s: was not absent\n", obj->mm.region->name);
goto out_unmap; goto out_unmap;
} }
} else {
/* ttm allows access to evicted regions by design */
err = check_present(addr, obj->base.size);
if (err) {
pr_err("%s: was not present\n", obj->mm.region->name);
goto out_unmap;
}
}
out_unmap: out_unmap:
vm_munmap(addr, obj->base.size); vm_munmap(addr, obj->base.size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment