Commit 8f18a987 authored by Daniel Vetter's avatar Daniel Vetter

Merge tag 'drm-intel-next-fixes-2022-01-13' of...

Merge tag 'drm-intel-next-fixes-2022-01-13' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- Hold runtime PM wakelock during PXP unbind (Juston Li)
- Three fixes for the TTM backend fault handling (Matthew Auld)
- Make sure to unmap when purging in the TTM backend (Matthew Auld)
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Yd/xzyCM87rfrwQT@tursulin-mobl2
parents cb6846fb 6ef295e3
...@@ -538,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) ...@@ -538,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{ {
struct i915_mmap_offset *mmo, *mn; struct i915_mmap_offset *mmo, *mn;
if (obj->ops->unmap_virtual)
obj->ops->unmap_virtual(obj);
spin_lock(&obj->mmo.lock); spin_lock(&obj->mmo.lock);
rbtree_postorder_for_each_entry_safe(mmo, mn, rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets, offset) { &obj->mmo.offsets, offset) {
......
...@@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops { ...@@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops {
int (*pwrite)(struct drm_i915_gem_object *obj, int (*pwrite)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg); const struct drm_i915_gem_pwrite *arg);
u64 (*mmap_offset)(struct drm_i915_gem_object *obj); u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
void (*unmap_virtual)(struct drm_i915_gem_object *obj);
int (*dmabuf_export)(struct drm_i915_gem_object *obj); int (*dmabuf_export)(struct drm_i915_gem_object *obj);
......
...@@ -161,7 +161,6 @@ int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj) ...@@ -161,7 +161,6 @@ int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
/* Immediately discard the backing storage */ /* Immediately discard the backing storage */
int i915_gem_object_truncate(struct drm_i915_gem_object *obj) int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{ {
drm_gem_free_mmap_offset(&obj->base);
if (obj->ops->truncate) if (obj->ops->truncate)
return obj->ops->truncate(obj); return obj->ops->truncate(obj);
......
...@@ -556,6 +556,20 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, ...@@ -556,6 +556,20 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
return intel_region_ttm_resource_to_rsgt(obj->mm.region, res); return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
} }
static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
int err;
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
err = i915_ttm_move_notify(bo);
if (err)
return err;
return i915_ttm_purge(obj);
}
static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
{ {
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
...@@ -883,6 +897,11 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) ...@@ -883,6 +897,11 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
if (ret) if (ret)
return ret; return ret;
if (obj->mm.madv != I915_MADV_WILLNEED) {
dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS;
}
if (drm_dev_enter(dev, &idx)) { if (drm_dev_enter(dev, &idx)) {
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT); TTM_BO_VM_NUM_PREFAULT);
...@@ -945,6 +964,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) ...@@ -945,6 +964,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
return drm_vma_node_offset_addr(&obj->base.vma_node); return drm_vma_node_offset_addr(&obj->base.vma_node);
} }
static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
{
ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
}
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.name = "i915_gem_object_ttm", .name = "i915_gem_object_ttm",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE | .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
...@@ -952,7 +976,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { ...@@ -952,7 +976,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.get_pages = i915_ttm_get_pages, .get_pages = i915_ttm_get_pages,
.put_pages = i915_ttm_put_pages, .put_pages = i915_ttm_put_pages,
.truncate = i915_ttm_purge, .truncate = i915_ttm_truncate,
.shrinker_release_pages = i915_ttm_shrinker_release_pages, .shrinker_release_pages = i915_ttm_shrinker_release_pages,
.adjust_lru = i915_ttm_adjust_lru, .adjust_lru = i915_ttm_adjust_lru,
...@@ -960,6 +984,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { ...@@ -960,6 +984,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.migrate = i915_ttm_migrate, .migrate = i915_ttm_migrate,
.mmap_offset = i915_ttm_mmap_offset, .mmap_offset = i915_ttm_mmap_offset,
.unmap_virtual = i915_ttm_unmap_virtual,
.mmap_ops = &vm_ops_ttm, .mmap_ops = &vm_ops_ttm,
}; };
......
...@@ -1368,20 +1368,10 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, ...@@ -1368,20 +1368,10 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
} }
} }
if (!obj->ops->mmap_ops) { err = check_absent(addr, obj->base.size);
err = check_absent(addr, obj->base.size); if (err) {
if (err) { pr_err("%s: was not absent\n", obj->mm.region->name);
pr_err("%s: was not absent\n", obj->mm.region->name); goto out_unmap;
goto out_unmap;
}
} else {
/* ttm allows access to evicted regions by design */
err = check_present(addr, obj->base.size);
if (err) {
pr_err("%s: was not present\n", obj->mm.region->name);
goto out_unmap;
}
} }
out_unmap: out_unmap:
......
...@@ -107,9 +107,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev, ...@@ -107,9 +107,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev,
static void i915_pxp_tee_component_unbind(struct device *i915_kdev, static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
struct device *tee_kdev, void *data) struct device *tee_kdev, void *data)
{ {
struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev); struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
intel_wakeref_t wakeref;
intel_pxp_fini_hw(pxp); with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref)
intel_pxp_fini_hw(pxp);
mutex_lock(&pxp->tee_mutex); mutex_lock(&pxp->tee_mutex);
pxp->pxp_component = NULL; pxp->pxp_component = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment