Commit f1ac8a02 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Daniel Vetter

drm/i915: Fix pread/pwrite to work with new locking rules.

We are removing obj->mm.lock, and need to take the reservation lock
before we can pin pages. Move the pinning pages into the helper, and
merge gtt pwrite/pread preparation and cleanup paths.

The fence lock is also removed; it will conflict with fence annotations,
because of memory allocations done when pagefaulting inside copy_*_user.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
[danvet: Pick the older version to avoid the conflicts]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210128162612.927917-31-maarten.lankhorst@linux.intel.com
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-31-maarten.lankhorst@linux.intel.com
parent c9398775
...@@ -139,7 +139,6 @@ gem-y += \ ...@@ -139,7 +139,6 @@ gem-y += \
gem/i915_gem_dmabuf.o \ gem/i915_gem_dmabuf.o \
gem/i915_gem_domain.o \ gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \ gem/i915_gem_execbuffer.o \
gem/i915_gem_fence.o \
gem/i915_gem_internal.o \ gem/i915_gem_internal.o \
gem/i915_gem_object.o \ gem/i915_gem_object.o \
gem/i915_gem_object_blt.o \ gem/i915_gem_object_blt.o \
......
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_gem_object.h"
struct stub_fence {
struct dma_fence dma;
struct i915_sw_fence chain;
};
static int __i915_sw_fence_call
stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct stub_fence *stub = container_of(fence, typeof(*stub), chain);
switch (state) {
case FENCE_COMPLETE:
dma_fence_signal(&stub->dma);
break;
case FENCE_FREE:
dma_fence_put(&stub->dma);
break;
}
return NOTIFY_DONE;
}
static const char *stub_driver_name(struct dma_fence *fence)
{
return DRIVER_NAME;
}
static const char *stub_timeline_name(struct dma_fence *fence)
{
return "object";
}
static void stub_release(struct dma_fence *fence)
{
struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
i915_sw_fence_fini(&stub->chain);
BUILD_BUG_ON(offsetof(typeof(*stub), dma));
dma_fence_free(&stub->dma);
}
static const struct dma_fence_ops stub_fence_ops = {
.get_driver_name = stub_driver_name,
.get_timeline_name = stub_timeline_name,
.release = stub_release,
};
struct dma_fence *
i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
{
struct stub_fence *stub;
assert_object_held(obj);
stub = kmalloc(sizeof(*stub), GFP_KERNEL);
if (!stub)
return NULL;
i915_sw_fence_init(&stub->chain, stub_notify);
dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
0, 0);
if (i915_sw_fence_await_reservation(&stub->chain,
obj->base.resv, NULL, true,
i915_fence_timeout(to_i915(obj->base.dev)),
I915_FENCE_GFP) < 0)
goto err;
dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
return &stub->dma;
err:
stub_release(&stub->dma);
return NULL;
}
void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
struct dma_fence *fence)
{
struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
i915_sw_fence_commit(&stub->chain);
}
...@@ -163,11 +163,6 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) ...@@ -163,11 +163,6 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
dma_resv_unlock(obj->base.resv); dma_resv_unlock(obj->base.resv);
} }
struct dma_fence *
i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
struct dma_fence *fence);
static inline void static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{ {
......
...@@ -204,7 +204,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, ...@@ -204,7 +204,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
{ {
unsigned int needs_clflush; unsigned int needs_clflush;
unsigned int idx, offset; unsigned int idx, offset;
struct dma_fence *fence;
char __user *user_data; char __user *user_data;
u64 remain; u64 remain;
int ret; int ret;
...@@ -213,19 +212,17 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, ...@@ -213,19 +212,17 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err_unlock;
ret = i915_gem_object_prepare_read(obj, &needs_clflush); ret = i915_gem_object_prepare_read(obj, &needs_clflush);
if (ret) { if (ret)
i915_gem_object_unlock(obj); goto err_unpin;
return ret;
}
fence = i915_gem_object_lock_fence(obj);
i915_gem_object_finish_access(obj); i915_gem_object_finish_access(obj);
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
if (!fence)
return -ENOMEM;
remain = args->size; remain = args->size;
user_data = u64_to_user_ptr(args->data_ptr); user_data = u64_to_user_ptr(args->data_ptr);
offset = offset_in_page(args->offset); offset = offset_in_page(args->offset);
...@@ -243,7 +240,13 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, ...@@ -243,7 +240,13 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
offset = 0; offset = 0;
} }
i915_gem_object_unlock_fence(obj, fence); i915_gem_object_unpin_pages(obj);
return ret;
err_unpin:
i915_gem_object_unpin_pages(obj);
err_unlock:
i915_gem_object_unlock(obj);
return ret; return ret;
} }
...@@ -271,52 +274,102 @@ gtt_user_read(struct io_mapping *mapping, ...@@ -271,52 +274,102 @@ gtt_user_read(struct io_mapping *mapping,
return unwritten; return unwritten;
} }
static int static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
i915_gem_gtt_pread(struct drm_i915_gem_object *obj, struct drm_mm_node *node,
const struct drm_i915_gem_pread *args) bool write)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt; struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
struct dma_fence *fence;
void __user *user_data;
struct i915_vma *vma; struct i915_vma *vma;
u64 remain, offset; struct i915_gem_ww_ctx ww;
int ret; int ret;
wakeref = intel_runtime_pm_get(&i915->runtime_pm); i915_gem_ww_ctx_init(&ww, true);
retry:
vma = ERR_PTR(-ENODEV); vma = ERR_PTR(-ENODEV);
ret = i915_gem_object_lock(obj, &ww);
if (ret)
goto err_ww;
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto err_ww;
if (!i915_gem_object_is_tiled(obj)) if (!i915_gem_object_is_tiled(obj))
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
PIN_MAPPABLE | PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ | PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT); PIN_NOEVICT);
if (!IS_ERR(vma)) { if (vma == ERR_PTR(-EDEADLK)) {
node.start = i915_ggtt_offset(vma); ret = -EDEADLK;
node.flags = 0; goto err_ww;
} else if (!IS_ERR(vma)) {
node->start = i915_ggtt_offset(vma);
node->flags = 0;
} else { } else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
if (ret) if (ret)
goto out_rpm; goto err_ww;
GEM_BUG_ON(!drm_mm_node_allocated(&node)); GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = NULL;
} }
ret = i915_gem_object_lock_interruptible(obj, NULL); ret = i915_gem_object_pin_pages(obj);
if (ret)
goto out_unpin;
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) { if (ret) {
i915_gem_object_unlock(obj); if (drm_mm_node_allocated(node)) {
goto out_unpin; ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
remove_mappable_node(ggtt, node);
} else {
i915_vma_unpin(vma);
}
} }
fence = i915_gem_object_lock_fence(obj); err_ww:
i915_gem_object_unlock(obj); if (ret == -EDEADLK) {
if (!fence) { ret = i915_gem_ww_ctx_backoff(&ww);
ret = -ENOMEM; if (!ret)
goto out_unpin; goto retry;
}
i915_gem_ww_ctx_fini(&ww);
return ret ? ERR_PTR(ret) : vma;
}
static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
struct drm_mm_node *node,
struct i915_vma *vma)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
i915_gem_object_unpin_pages(obj);
if (drm_mm_node_allocated(node)) {
ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
remove_mappable_node(ggtt, node);
} else {
i915_vma_unpin(vma);
}
}
static int
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
void __user *user_data;
struct i915_vma *vma;
u64 remain, offset;
int ret = 0;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vma = i915_gem_gtt_prepare(obj, &node, false);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_rpm;
} }
user_data = u64_to_user_ptr(args->data_ptr); user_data = u64_to_user_ptr(args->data_ptr);
...@@ -353,14 +406,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -353,14 +406,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
offset += page_length; offset += page_length;
} }
i915_gem_object_unlock_fence(obj, fence); i915_gem_gtt_cleanup(obj, &node, vma);
out_unpin:
if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(ggtt, &node);
} else {
i915_vma_unpin(vma);
}
out_rpm: out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref); intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return ret; return ret;
...@@ -425,15 +471,10 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ...@@ -425,15 +471,10 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
goto out; goto out;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto out;
ret = i915_gem_shmem_pread(obj, args); ret = i915_gem_shmem_pread(obj, args);
if (ret == -EFAULT || ret == -ENODEV) if (ret == -EFAULT || ret == -ENODEV)
ret = i915_gem_gtt_pread(obj, args); ret = i915_gem_gtt_pread(obj, args);
i915_gem_object_unpin_pages(obj);
out: out:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return ret; return ret;
...@@ -481,11 +522,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -481,11 +522,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
struct intel_runtime_pm *rpm = &i915->runtime_pm; struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct drm_mm_node node; struct drm_mm_node node;
struct dma_fence *fence;
struct i915_vma *vma; struct i915_vma *vma;
u64 remain, offset; u64 remain, offset;
void __user *user_data; void __user *user_data;
int ret; int ret = 0;
if (i915_gem_object_has_struct_page(obj)) { if (i915_gem_object_has_struct_page(obj)) {
/* /*
...@@ -503,37 +543,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -503,37 +543,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
wakeref = intel_runtime_pm_get(rpm); wakeref = intel_runtime_pm_get(rpm);
} }
vma = ERR_PTR(-ENODEV); vma = i915_gem_gtt_prepare(obj, &node, true);
if (!i915_gem_object_is_tiled(obj)) if (IS_ERR(vma)) {
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, ret = PTR_ERR(vma);
PIN_MAPPABLE | goto out_rpm;
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.flags = 0;
} else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_rpm;
GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
goto out_unpin;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret) {
i915_gem_object_unlock(obj);
goto out_unpin;
}
fence = i915_gem_object_lock_fence(obj);
i915_gem_object_unlock(obj);
if (!fence) {
ret = -ENOMEM;
goto out_unpin;
} }
i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
...@@ -582,14 +595,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -582,14 +595,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
intel_gt_flush_ggtt_writes(ggtt->vm.gt); intel_gt_flush_ggtt_writes(ggtt->vm.gt);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence); i915_gem_gtt_cleanup(obj, &node, vma);
out_unpin:
if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(ggtt, &node);
} else {
i915_vma_unpin(vma);
}
out_rpm: out_rpm:
intel_runtime_pm_put(rpm, wakeref); intel_runtime_pm_put(rpm, wakeref);
return ret; return ret;
...@@ -629,7 +635,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, ...@@ -629,7 +635,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
unsigned int partial_cacheline_write; unsigned int partial_cacheline_write;
unsigned int needs_clflush; unsigned int needs_clflush;
unsigned int offset, idx; unsigned int offset, idx;
struct dma_fence *fence;
void __user *user_data; void __user *user_data;
u64 remain; u64 remain;
int ret; int ret;
...@@ -638,19 +643,17 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, ...@@ -638,19 +643,17 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err_unlock;
ret = i915_gem_object_prepare_write(obj, &needs_clflush); ret = i915_gem_object_prepare_write(obj, &needs_clflush);
if (ret) { if (ret)
i915_gem_object_unlock(obj); goto err_unpin;
return ret;
}
fence = i915_gem_object_lock_fence(obj);
i915_gem_object_finish_access(obj); i915_gem_object_finish_access(obj);
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
if (!fence)
return -ENOMEM;
/* If we don't overwrite a cacheline completely we need to be /* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't * careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch. * overcomplicate things and flush the entire patch.
...@@ -678,8 +681,14 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, ...@@ -678,8 +681,14 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
} }
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
i915_gem_object_unpin_pages(obj);
return ret;
err_unpin:
i915_gem_object_unpin_pages(obj);
err_unlock:
i915_gem_object_unlock(obj);
return ret; return ret;
} }
...@@ -743,10 +752,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -743,10 +752,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
goto err; goto err;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
ret = -EFAULT; ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise /* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get * it would end up going through the fenced access, and we'll get
...@@ -767,7 +772,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -767,7 +772,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_shmem_pwrite(obj, args); ret = i915_gem_shmem_pwrite(obj, args);
} }
i915_gem_object_unpin_pages(obj);
err: err:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment