Commit d04a836e authored by Dave Airlie's avatar Dave Airlie

Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next

Mostly code reorganizations and optimizations for vmwgfx.
- Move TTM code that's only used by vmwgfx to vmwgfx
- Break out the vmwgfx buffer- and resource validation code to a separate source file
- Get rid of a number of atomic operations during command buffer validation.

From: Thomas Hellstrom <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180928131157.2810-1-thellstrom@vmware.com
parents 87c2ee74 e8c66efb
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \
ttm_bo_manager.o ttm_page_alloc_dma.o ttm_page_alloc_dma.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o obj-$(CONFIG_DRM_TTM) += ttm.o
...@@ -409,8 +409,7 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, ...@@ -409,8 +409,7 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
if (likely(node)) { if (likely(node)) {
bo = container_of(node, struct ttm_buffer_object, vma_node); bo = container_of(node, struct ttm_buffer_object, vma_node);
if (!kref_get_unless_zero(&bo->kref)) bo = ttm_bo_get_unless_zero(bo);
bo = NULL;
} }
drm_vma_offset_unlock_lookup(&bdev->vma_manager); drm_vma_offset_unlock_lookup(&bdev->vma_manager);
......
...@@ -7,6 +7,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ ...@@ -7,6 +7,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o \
ttm_object.o ttm_lock.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
...@@ -29,13 +29,13 @@ ...@@ -29,13 +29,13 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/ */
#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_module.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/module.h> #include "ttm_lock.h"
#include "ttm_object.h"
#define TTM_WRITE_LOCK_PENDING (1 << 0) #define TTM_WRITE_LOCK_PENDING (1 << 0)
#define TTM_VT_LOCK_PENDING (1 << 1) #define TTM_VT_LOCK_PENDING (1 << 1)
...@@ -52,7 +52,6 @@ void ttm_lock_init(struct ttm_lock *lock) ...@@ -52,7 +52,6 @@ void ttm_lock_init(struct ttm_lock *lock)
lock->kill_takers = false; lock->kill_takers = false;
lock->signal = SIGKILL; lock->signal = SIGKILL;
} }
EXPORT_SYMBOL(ttm_lock_init);
void ttm_read_unlock(struct ttm_lock *lock) void ttm_read_unlock(struct ttm_lock *lock)
{ {
...@@ -61,7 +60,6 @@ void ttm_read_unlock(struct ttm_lock *lock) ...@@ -61,7 +60,6 @@ void ttm_read_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue); wake_up_all(&lock->queue);
spin_unlock(&lock->lock); spin_unlock(&lock->lock);
} }
EXPORT_SYMBOL(ttm_read_unlock);
static bool __ttm_read_lock(struct ttm_lock *lock) static bool __ttm_read_lock(struct ttm_lock *lock)
{ {
...@@ -92,7 +90,6 @@ int ttm_read_lock(struct ttm_lock *lock, bool interruptible) ...@@ -92,7 +90,6 @@ int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
wait_event(lock->queue, __ttm_read_lock(lock)); wait_event(lock->queue, __ttm_read_lock(lock));
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_read_lock);
static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
{ {
...@@ -144,7 +141,6 @@ void ttm_write_unlock(struct ttm_lock *lock) ...@@ -144,7 +141,6 @@ void ttm_write_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue); wake_up_all(&lock->queue);
spin_unlock(&lock->lock); spin_unlock(&lock->lock);
} }
EXPORT_SYMBOL(ttm_write_unlock);
static bool __ttm_write_lock(struct ttm_lock *lock) static bool __ttm_write_lock(struct ttm_lock *lock)
{ {
...@@ -185,7 +181,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible) ...@@ -185,7 +181,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_write_lock);
static int __ttm_vt_unlock(struct ttm_lock *lock) static int __ttm_vt_unlock(struct ttm_lock *lock)
{ {
...@@ -262,14 +257,12 @@ int ttm_vt_lock(struct ttm_lock *lock, ...@@ -262,14 +257,12 @@ int ttm_vt_lock(struct ttm_lock *lock,
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_vt_lock);
int ttm_vt_unlock(struct ttm_lock *lock) int ttm_vt_unlock(struct ttm_lock *lock)
{ {
return ttm_ref_object_base_unref(lock->vt_holder, return ttm_ref_object_base_unref(lock->vt_holder,
lock->base.hash.key, TTM_REF_USAGE); lock->base.handle, TTM_REF_USAGE);
} }
EXPORT_SYMBOL(ttm_vt_unlock);
void ttm_suspend_unlock(struct ttm_lock *lock) void ttm_suspend_unlock(struct ttm_lock *lock)
{ {
...@@ -278,7 +271,6 @@ void ttm_suspend_unlock(struct ttm_lock *lock) ...@@ -278,7 +271,6 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue); wake_up_all(&lock->queue);
spin_unlock(&lock->lock); spin_unlock(&lock->lock);
} }
EXPORT_SYMBOL(ttm_suspend_unlock);
static bool __ttm_suspend_lock(struct ttm_lock *lock) static bool __ttm_suspend_lock(struct ttm_lock *lock)
{ {
...@@ -300,4 +292,3 @@ void ttm_suspend_lock(struct ttm_lock *lock) ...@@ -300,4 +292,3 @@ void ttm_suspend_lock(struct ttm_lock *lock)
{ {
wait_event(lock->queue, __ttm_suspend_lock(lock)); wait_event(lock->queue, __ttm_suspend_lock(lock));
} }
EXPORT_SYMBOL(ttm_suspend_lock);
...@@ -59,13 +59,12 @@ ...@@ -59,13 +59,12 @@
#define pr_fmt(fmt) "[TTM] " fmt #define pr_fmt(fmt) "[TTM] " fmt
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_module.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include "ttm_object.h"
struct ttm_object_file { struct ttm_object_file {
struct ttm_object_device *tdev; struct ttm_object_device *tdev;
...@@ -95,6 +94,7 @@ struct ttm_object_device { ...@@ -95,6 +94,7 @@ struct ttm_object_device {
struct dma_buf_ops ops; struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf); void (*dmabuf_release)(struct dma_buf *dma_buf);
size_t dma_buf_size; size_t dma_buf_size;
struct idr idr;
}; };
/** /**
...@@ -172,14 +172,15 @@ int ttm_base_object_init(struct ttm_object_file *tfile, ...@@ -172,14 +172,15 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->ref_obj_release = ref_obj_release; base->ref_obj_release = ref_obj_release;
base->object_type = object_type; base->object_type = object_type;
kref_init(&base->refcount); kref_init(&base->refcount);
idr_preload(GFP_KERNEL);
spin_lock(&tdev->object_lock); spin_lock(&tdev->object_lock);
ret = drm_ht_just_insert_please_rcu(&tdev->object_hash, ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
&base->hash,
(unsigned long)base, 31, 0, 0);
spin_unlock(&tdev->object_lock); spin_unlock(&tdev->object_lock);
if (unlikely(ret != 0)) idr_preload_end();
goto out_err0; if (ret < 0)
return ret;
base->handle = ret;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err1; goto out_err1;
...@@ -189,12 +190,10 @@ int ttm_base_object_init(struct ttm_object_file *tfile, ...@@ -189,12 +190,10 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return 0; return 0;
out_err1: out_err1:
spin_lock(&tdev->object_lock); spin_lock(&tdev->object_lock);
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash); idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock); spin_unlock(&tdev->object_lock);
out_err0:
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_base_object_init);
static void ttm_release_base(struct kref *kref) static void ttm_release_base(struct kref *kref)
{ {
...@@ -203,7 +202,7 @@ static void ttm_release_base(struct kref *kref) ...@@ -203,7 +202,7 @@ static void ttm_release_base(struct kref *kref)
struct ttm_object_device *tdev = base->tfile->tdev; struct ttm_object_device *tdev = base->tfile->tdev;
spin_lock(&tdev->object_lock); spin_lock(&tdev->object_lock);
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash); idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock); spin_unlock(&tdev->object_lock);
/* /*
...@@ -225,7 +224,41 @@ void ttm_base_object_unref(struct ttm_base_object **p_base) ...@@ -225,7 +224,41 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
kref_put(&base->refcount, ttm_release_base); kref_put(&base->refcount, ttm_release_base);
} }
EXPORT_SYMBOL(ttm_base_object_unref);
/**
* ttm_base_object_noref_lookup - look up a base object without reference
* @tfile: The struct ttm_object_file the object is registered with.
* @key: The object handle.
*
* This function looks up a ttm base object and returns a pointer to it
* without refcounting the pointer. The returned pointer is only valid
* until ttm_base_object_noref_release() is called, and the object
* pointed to by the returned pointer may be doomed. Any persistent usage
* of the object requires a refcount to be taken using kref_get_unless_zero().
* Iff this function returns successfully it needs to be paired with
* ttm_base_object_noref_release() and no sleeping- or scheduling functions
* may be called inbetween these function callse.
*
* Return: A pointer to the object if successful or NULL otherwise.
*/
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
{
struct drm_hash_item *hash;
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
int ret;
rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, key, &hash);
if (ret) {
rcu_read_unlock();
return NULL;
}
__release(RCU);
return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
}
EXPORT_SYMBOL(ttm_base_object_noref_lookup);
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key) uint32_t key)
...@@ -247,29 +280,21 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, ...@@ -247,29 +280,21 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
return base; return base;
} }
EXPORT_SYMBOL(ttm_base_object_lookup);
struct ttm_base_object * struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
{ {
struct ttm_base_object *base = NULL; struct ttm_base_object *base;
struct drm_hash_item *hash;
struct drm_open_hash *ht = &tdev->object_hash;
int ret;
rcu_read_lock(); rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, key, &hash); base = idr_find(&tdev->idr, key);
if (likely(ret == 0)) { if (base && !kref_get_unless_zero(&base->refcount))
base = drm_hash_entry(hash, struct ttm_base_object, hash);
if (!kref_get_unless_zero(&base->refcount))
base = NULL; base = NULL;
}
rcu_read_unlock(); rcu_read_unlock();
return base; return base;
} }
EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
/** /**
* ttm_ref_object_exists - Check whether a caller has a valid ref object * ttm_ref_object_exists - Check whether a caller has a valid ref object
...@@ -289,7 +314,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile, ...@@ -289,7 +314,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_ref_object *ref; struct ttm_ref_object *ref;
rcu_read_lock(); rcu_read_lock();
if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0)) if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
goto out_false; goto out_false;
/* /*
...@@ -315,7 +340,6 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile, ...@@ -315,7 +340,6 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
rcu_read_unlock(); rcu_read_unlock();
return false; return false;
} }
EXPORT_SYMBOL(ttm_ref_object_exists);
int ttm_ref_object_add(struct ttm_object_file *tfile, int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base, struct ttm_base_object *base,
...@@ -340,7 +364,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ...@@ -340,7 +364,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
while (ret == -EINVAL) { while (ret == -EINVAL) {
rcu_read_lock(); rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash); ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
if (ret == 0) { if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
...@@ -364,7 +388,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ...@@ -364,7 +388,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
return -ENOMEM; return -ENOMEM;
} }
ref->hash.key = base->hash.key; ref->hash.key = base->handle;
ref->obj = base; ref->obj = base;
ref->tfile = tfile; ref->tfile = tfile;
ref->ref_type = ref_type; ref->ref_type = ref_type;
...@@ -391,9 +415,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ...@@ -391,9 +415,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_ref_object_add);
static void ttm_ref_object_release(struct kref *kref) static void __releases(tfile->lock) __acquires(tfile->lock)
ttm_ref_object_release(struct kref *kref)
{ {
struct ttm_ref_object *ref = struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref); container_of(kref, struct ttm_ref_object, kref);
...@@ -435,7 +459,6 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile, ...@@ -435,7 +459,6 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
spin_unlock(&tfile->lock); spin_unlock(&tfile->lock);
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_ref_object_base_unref);
void ttm_object_file_release(struct ttm_object_file **p_tfile) void ttm_object_file_release(struct ttm_object_file **p_tfile)
{ {
...@@ -464,7 +487,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) ...@@ -464,7 +487,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
ttm_object_file_unref(&tfile); ttm_object_file_unref(&tfile);
} }
EXPORT_SYMBOL(ttm_object_file_release);
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
unsigned int hash_order) unsigned int hash_order)
...@@ -499,7 +521,6 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, ...@@ -499,7 +521,6 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
return NULL; return NULL;
} }
EXPORT_SYMBOL(ttm_object_file_init);
struct ttm_object_device * struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global *mem_glob, ttm_object_device_init(struct ttm_mem_global *mem_glob,
...@@ -519,6 +540,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob, ...@@ -519,6 +540,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
if (ret != 0) if (ret != 0)
goto out_no_object_hash; goto out_no_object_hash;
idr_init(&tdev->idr);
tdev->ops = *ops; tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release; tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release; tdev->ops.release = ttm_prime_dmabuf_release;
...@@ -530,7 +552,6 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob, ...@@ -530,7 +552,6 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
kfree(tdev); kfree(tdev);
return NULL; return NULL;
} }
EXPORT_SYMBOL(ttm_object_device_init);
void ttm_object_device_release(struct ttm_object_device **p_tdev) void ttm_object_device_release(struct ttm_object_device **p_tdev)
{ {
...@@ -538,11 +559,12 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) ...@@ -538,11 +559,12 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL; *p_tdev = NULL;
WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
idr_destroy(&tdev->idr);
drm_ht_remove(&tdev->object_hash); drm_ht_remove(&tdev->object_hash);
kfree(tdev); kfree(tdev);
} }
EXPORT_SYMBOL(ttm_object_device_release);
/** /**
* get_dma_buf_unless_doomed - get a dma_buf reference if possible. * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
...@@ -641,14 +663,13 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, ...@@ -641,14 +663,13 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv; prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base; base = &prime->base;
*handle = base->hash.key; *handle = base->handle;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf); dma_buf_put(dma_buf);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
/** /**
* ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
...@@ -739,7 +760,6 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, ...@@ -739,7 +760,6 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
ttm_base_object_unref(&base); ttm_base_object_unref(&base);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
/** /**
* ttm_prime_object_init - Initialize a ttm_prime_object * ttm_prime_object_init - Initialize a ttm_prime_object
...@@ -772,4 +792,3 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, ...@@ -772,4 +792,3 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
ttm_prime_refcount_release, ttm_prime_refcount_release,
ref_obj_release); ref_obj_release);
} }
EXPORT_SYMBOL(ttm_prime_object_init);
...@@ -42,8 +42,7 @@ ...@@ -42,8 +42,7 @@
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <drm/ttm/ttm_memory.h>
#include "ttm_memory.h"
/** /**
* enum ttm_ref_type * enum ttm_ref_type
...@@ -125,14 +124,14 @@ struct ttm_object_device; ...@@ -125,14 +124,14 @@ struct ttm_object_device;
struct ttm_base_object { struct ttm_base_object {
struct rcu_head rhead; struct rcu_head rhead;
struct drm_hash_item hash;
enum ttm_object_type object_type;
bool shareable;
struct ttm_object_file *tfile; struct ttm_object_file *tfile;
struct kref refcount; struct kref refcount;
void (*refcount_release) (struct ttm_base_object **base); void (*refcount_release) (struct ttm_base_object **base);
void (*ref_obj_release) (struct ttm_base_object *base, void (*ref_obj_release) (struct ttm_base_object *base,
enum ttm_ref_type ref_type); enum ttm_ref_type ref_type);
u32 handle;
enum ttm_object_type object_type;
u32 shareable;
}; };
...@@ -351,4 +350,26 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, ...@@ -351,4 +350,26 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
#define ttm_prime_object_kfree(__obj, __prime) \ #define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead) kfree_rcu(__obj, __prime.base.rhead)
/*
* Extra memory required by the base object's idr storage, which is allocated
* separately from the base object itself. We estimate an on-average 128 bytes
* per idr.
*/
#define TTM_OBJ_EXTRA_SIZE 128
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
/**
* ttm_base_object_noref_release - release a base object pointer looked up
* without reference
*
* Releases a base object pointer looked up with ttm_base_object_noref_lookup().
*/
static inline void ttm_base_object_noref_release(void)
{
__acquire(RCU);
rcu_read_unlock();
}
#endif #endif
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "drm/ttm/ttm_object.h" #include "ttm_object.h"
/** /**
...@@ -441,7 +441,8 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size, ...@@ -441,7 +441,8 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
struct_size = backend_size + struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_buffer_object)); ttm_round_pot(sizeof(struct vmw_buffer_object));
user_struct_size = backend_size + user_struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_user_buffer_object)); ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
TTM_OBJ_EXTRA_SIZE;
} }
if (dev_priv->map_mode == vmw_dma_alloc_coherent) if (dev_priv->map_mode == vmw_dma_alloc_coherent)
...@@ -631,7 +632,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv, ...@@ -631,7 +632,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
*p_base = &user_bo->prime.base; *p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount); kref_get(&(*p_base)->refcount);
} }
*handle = user_bo->prime.base.hash.key; *handle = user_bo->prime.base.handle;
out_no_base_object: out_no_base_object:
return ret; return ret;
...@@ -920,6 +921,47 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile, ...@@ -920,6 +921,47 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
return 0; return 0;
} }
/**
* vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
* @tfile: The TTM object file the handle is registered with.
* @handle: The user buffer object handle.
*
* This function looks up a struct vmw_user_bo and returns a pointer to the
* struct vmw_buffer_object it derives from without refcounting the pointer.
* The returned pointer is only valid until vmw_user_bo_noref_release() is
* called, and the object pointed to by the returned pointer may be doomed.
* Any persistent usage of the object requires a refcount to be taken using
* ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
* needs to be paired with vmw_user_bo_noref_release() and no sleeping-
* or scheduling functions may be called inbetween these function calls.
*
* Return: A struct vmw_buffer_object pointer if successful or negative
* error pointer on failure.
*/
struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
{
struct vmw_user_buffer_object *vmw_user_bo;
struct ttm_base_object *base;
base = ttm_base_object_noref_lookup(tfile, handle);
if (!base) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return ERR_PTR(-ESRCH);
}
if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
ttm_base_object_noref_release();
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return ERR_PTR(-EINVAL);
}
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
return &vmw_user_bo->vbo;
}
/** /**
* vmw_user_bo_reference - Open a handle to a vmw user buffer object. * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
...@@ -940,7 +982,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile, ...@@ -940,7 +982,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile,
user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo); user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
*handle = user_bo->prime.base.hash.key; *handle = user_bo->prime.base.handle;
return ttm_ref_object_add(tfile, &user_bo->prime.base, return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL, false); TTM_REF_USAGE, NULL, false);
} }
......
...@@ -660,7 +660,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) ...@@ -660,7 +660,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
{ {
struct vmw_cmdbuf_header *cur = man->cur; struct vmw_cmdbuf_header *cur = man->cur;
WARN_ON(!mutex_is_locked(&man->cur_mutex)); lockdep_assert_held_once(&man->cur_mutex);
if (!cur) if (!cur)
return; return;
...@@ -1045,7 +1045,7 @@ static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, ...@@ -1045,7 +1045,7 @@ static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
{ {
struct vmw_cmdbuf_header *cur = man->cur; struct vmw_cmdbuf_header *cur = man->cur;
WARN_ON(!mutex_is_locked(&man->cur_mutex)); lockdep_assert_held_once(&man->cur_mutex);
WARN_ON(size > cur->reserved); WARN_ON(size > cur->reserved);
man->cur_pos += size; man->cur_pos += size;
......
...@@ -89,8 +89,7 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, ...@@ -89,8 +89,7 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ERR_PTR(ret); return ERR_PTR(ret);
return vmw_resource_reference return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res;
(drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
} }
/** /**
......
...@@ -217,9 +217,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, ...@@ -217,9 +217,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
} }
} }
res->hw_destroy = vmw_hw_context_destroy;
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0; return 0;
out_cotables: out_cotables:
...@@ -274,7 +272,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, ...@@ -274,7 +272,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, sizeof(*cmd)); vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv); vmw_fifo_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy); res->hw_destroy = vmw_hw_context_destroy;
return 0; return 0;
out_early: out_early:
...@@ -757,14 +755,10 @@ static int vmw_context_define(struct drm_device *dev, void *data, ...@@ -757,14 +755,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of contexts anyway.
*/
if (unlikely(vmw_user_context_size == 0)) if (unlikely(vmw_user_context_size == 0))
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 + vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0); ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -809,7 +803,7 @@ static int vmw_context_define(struct drm_device *dev, void *data, ...@@ -809,7 +803,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
goto out_err; goto out_err;
} }
arg->cid = ctx->base.hash.key; arg->cid = ctx->base.handle;
out_err: out_err:
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
out_unlock: out_unlock:
...@@ -867,9 +861,8 @@ struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, ...@@ -867,9 +861,8 @@ struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
if (cotable_type >= SVGA_COTABLE_DX10_MAX) if (cotable_type >= SVGA_COTABLE_DX10_MAX)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
return vmw_resource_reference return container_of(ctx, struct vmw_user_context, res)->
(container_of(ctx, struct vmw_user_context, res)-> cotables[cotable_type];
cotables[cotable_type]);
} }
/** /**
......
...@@ -615,7 +615,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, ...@@ -615,7 +615,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
vcotbl->type = type; vcotbl->type = type;
vcotbl->ctx = ctx; vcotbl->ctx = ctx;
vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy); vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
return &vcotbl->res; return &vcotbl->res;
......
...@@ -30,9 +30,9 @@ ...@@ -30,9 +30,9 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "vmwgfx_binding.h" #include "vmwgfx_binding.h"
#include "ttm_object.h"
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_module.h>
#include <linux/dma_remapping.h> #include <linux/dma_remapping.h>
...@@ -667,8 +667,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -667,8 +667,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->binding_mutex); mutex_init(&dev_priv->binding_mutex);
mutex_init(&dev_priv->requested_layout_mutex); mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex); mutex_init(&dev_priv->global_kms_state_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem); ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->resource_lock);
spin_lock_init(&dev_priv->hw_lock); spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock); spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock); spin_lock_init(&dev_priv->cap_lock);
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#ifndef _VMWGFX_DRV_H_ #ifndef _VMWGFX_DRV_H_
#define _VMWGFX_DRV_H_ #define _VMWGFX_DRV_H_
#include "vmwgfx_validation.h"
#include "vmwgfx_reg.h" #include "vmwgfx_reg.h"
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/vmwgfx_drm.h> #include <drm/vmwgfx_drm.h>
...@@ -35,11 +36,11 @@ ...@@ -35,11 +36,11 @@
#include <drm/drm_auth.h> #include <drm/drm_auth.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h" #include "vmwgfx_fence.h"
#include "ttm_object.h"
#include "ttm_lock.h"
#include <linux/sync_file.h> #include <linux/sync_file.h>
#define VMWGFX_DRIVER_NAME "vmwgfx" #define VMWGFX_DRIVER_NAME "vmwgfx"
...@@ -112,21 +113,49 @@ struct vmw_validate_buffer { ...@@ -112,21 +113,49 @@ struct vmw_validate_buffer {
}; };
struct vmw_res_func; struct vmw_res_func;
/**
* struct vmw-resource - base class for hardware resources
*
* @kref: For refcounting.
* @dev_priv: Pointer to the device private for this resource. Immutable.
* @id: Device id. Protected by @dev_priv::resource_lock.
* @backup_size: Backup buffer size. Immutable.
* @res_dirty: Resource contains data not yet in the backup buffer. Protected
* by resource reserved.
* @backup_dirty: Backup buffer contains data not yet in the HW resource.
* Protecte by resource reserved.
* @backup: The backup buffer if any. Protected by resource reserved.
* @backup_offset: Offset into the backup buffer if any. Protected by resource
* reserved. Note that only a few resource types can have a @backup_offset
* different from zero.
* @pin_count: The pin count for this resource. A pinned resource has a
* pin-count greater than zero. It is not on the resource LRU lists and its
* backup buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
* @mob_head: List head for the MOB backup list. Protected by @backup reserved.
* @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex
* @res_free: The resource destructor.
* @hw_destroy: Callback to destroy the resource on the device, as part of
* resource destruction.
*/
struct vmw_resource { struct vmw_resource {
struct kref kref; struct kref kref;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
int id; int id;
bool avail;
unsigned long backup_size; unsigned long backup_size;
bool res_dirty; /* Protected by backup buffer reserved */ bool res_dirty;
bool backup_dirty; /* Protected by backup buffer reserved */ bool backup_dirty;
struct vmw_buffer_object *backup; struct vmw_buffer_object *backup;
unsigned long backup_offset; unsigned long backup_offset;
unsigned long pin_count; /* Protected by resource reserved */ unsigned long pin_count;
const struct vmw_res_func *func; const struct vmw_res_func *func;
struct list_head lru_head; /* Protected by the resource lock */ struct list_head lru_head;
struct list_head mob_head; /* Protected by @backup reserved */ struct list_head mob_head;
struct list_head binding_head; /* Protected by binding_mutex */ struct list_head binding_head;
void (*res_free) (struct vmw_resource *res); void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res); void (*hw_destroy) (struct vmw_resource *res);
}; };
...@@ -204,29 +233,24 @@ struct vmw_fifo_state { ...@@ -204,29 +233,24 @@ struct vmw_fifo_state {
bool dx; bool dx;
}; };
struct vmw_relocation {
SVGAMobId *mob_loc;
SVGAGuestPtr *location;
uint32_t index;
};
/** /**
* struct vmw_res_cache_entry - resource information cache entry * struct vmw_res_cache_entry - resource information cache entry
* * @handle: User-space handle of a resource.
* @res: Non-ref-counted pointer to the resource.
* @valid_handle: Whether the @handle member is valid.
* @valid: Whether the entry is valid, which also implies that the execbuf * @valid: Whether the entry is valid, which also implies that the execbuf
* code holds a reference to the resource, and it's placed on the * code holds a reference to the resource, and it's placed on the
* validation list. * validation list.
* @handle: User-space handle of a resource.
* @res: Non-ref-counted pointer to the resource.
* *
* Used to avoid frequent repeated user-space handle lookups of the * Used to avoid frequent repeated user-space handle lookups of the
* same resource. * same resource.
*/ */
struct vmw_res_cache_entry { struct vmw_res_cache_entry {
bool valid;
uint32_t handle; uint32_t handle;
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_resource_val_node *node; void *private;
unsigned short valid_handle;
unsigned short valid;
}; };
/** /**
...@@ -291,35 +315,63 @@ enum vmw_display_unit_type { ...@@ -291,35 +315,63 @@ enum vmw_display_unit_type {
vmw_du_screen_target vmw_du_screen_target
}; };
struct vmw_validation_context;
struct vmw_ctx_validation_info;
/**
* struct vmw_sw_context - Command submission context
* @res_ht: Pointer hash table used to find validation duplicates
* @kernel: Whether the command buffer originates from kernel code rather
* than from user-space
* @fp: If @kernel is false, points to the file of the client. Otherwise
* NULL
* @cmd_bounce: Command bounce buffer used for command validation before
* copying to fifo space
* @cmd_bounce_size: Current command bounce buffer size
* @cur_query_bo: Current buffer object used as query result buffer
* @bo_relocations: List of buffer object relocations
* @res_relocations: List of resource relocations
* @buf_start: Pointer to start of memory where command validation takes
* place
* @res_cache: Cache of recently looked up resources
* @last_query_ctx: Last context that submitted a query
* @needs_post_query_barrier: Whether a query barrier is needed after
* command submission
* @staged_bindings: Cached per-context binding tracker
* @staged_bindings_inuse: Whether the cached per-context binding tracker
* is in use
* @staged_cmd_res: List of staged command buffer managed resources in this
* command buffer
* @ctx_list: List of context resources referenced in this command buffer
* @dx_ctx_node: Validation metadata of the current DX context
* @dx_query_mob: The MOB used for DX queries
* @dx_query_ctx: The DX context used for the last DX query
* @man: Pointer to the command buffer managed resource manager
* @ctx: The validation context
*/
struct vmw_sw_context{ struct vmw_sw_context{
struct drm_open_hash res_ht; struct drm_open_hash res_ht;
bool res_ht_initialized; bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */ bool kernel;
struct vmw_fpriv *fp; struct vmw_fpriv *fp;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf;
uint32_t *cmd_bounce; uint32_t *cmd_bounce;
uint32_t cmd_bounce_size; uint32_t cmd_bounce_size;
struct list_head resource_list;
struct list_head ctx_resource_list; /* For contexts and cotables */
struct vmw_buffer_object *cur_query_bo; struct vmw_buffer_object *cur_query_bo;
struct list_head bo_relocations;
struct list_head res_relocations; struct list_head res_relocations;
uint32_t *buf_start; uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max]; struct vmw_res_cache_entry res_cache[vmw_res_max];
struct vmw_resource *last_query_ctx; struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier; bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state *staged_bindings; struct vmw_ctx_binding_state *staged_bindings;
bool staged_bindings_inuse; bool staged_bindings_inuse;
struct list_head staged_cmd_res; struct list_head staged_cmd_res;
struct vmw_resource_val_node *dx_ctx_node; struct list_head ctx_list;
struct vmw_ctx_validation_info *dx_ctx_node;
struct vmw_buffer_object *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
struct vmw_resource *dx_query_ctx; struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man; struct vmw_cmdbuf_res_manager *man;
struct vmw_validation_context *ctx;
}; };
struct vmw_legacy_display; struct vmw_legacy_display;
...@@ -444,7 +496,7 @@ struct vmw_private { ...@@ -444,7 +496,7 @@ struct vmw_private {
* Context and surface management. * Context and surface management.
*/ */
rwlock_t resource_lock; spinlock_t resource_lock;
struct idr res_idr[vmw_res_max]; struct idr res_idr[vmw_res_max];
/* /*
* Block lastclose from racing with firstopen. * Block lastclose from racing with firstopen.
...@@ -628,7 +680,7 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res); ...@@ -628,7 +680,7 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource * extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res); vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res); extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup); bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res); extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
...@@ -643,6 +695,12 @@ extern int vmw_user_resource_lookup_handle( ...@@ -643,6 +695,12 @@ extern int vmw_user_resource_lookup_handle(
uint32_t handle, uint32_t handle,
const struct vmw_user_resource_conv *converter, const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res); struct vmw_resource **p_res);
extern struct vmw_resource *
vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv *
converter);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
...@@ -661,6 +719,15 @@ extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); ...@@ -661,6 +719,15 @@ extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv); extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
/**
* vmw_user_resource_noref_release - release a user resource pointer looked up
* without reference
*/
static inline void vmw_user_resource_noref_release(void)
{
ttm_base_object_noref_release();
}
/** /**
* Buffer object helper functions - vmwgfx_bo.c * Buffer object helper functions - vmwgfx_bo.c
*/ */
...@@ -717,6 +784,18 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); ...@@ -717,6 +784,18 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
extern struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
/**
* vmw_user_bo_noref_release - release a buffer object pointer looked up
* without reference
*/
static inline void vmw_user_bo_noref_release(void)
{
ttm_base_object_noref_release();
}
/** /**
* Misc Ioctl functionality - vmwgfx_ioctl.c * Misc Ioctl functionality - vmwgfx_ioctl.c
...@@ -864,10 +943,6 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ...@@ -864,10 +943,6 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
uint32_t fence_handle, uint32_t fence_handle,
int32_t out_fence_fd, int32_t out_fence_fd,
struct sync_file *sync_file); struct sync_file *sync_file);
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/** /**
......
...@@ -35,6 +35,23 @@ ...@@ -35,6 +35,23 @@
#define VMW_RES_HT_ORDER 12 #define VMW_RES_HT_ORDER 12
/*
* struct vmw_relocation - Buffer object relocation
*
* @head: List head for the command submission context's relocation list
* @vbo: Non ref-counted pointer to buffer object
* @mob_loc: Pointer to location for mob id to be modified
* @location: Pointer to location for guest pointer to be modified
*/
struct vmw_relocation {
struct list_head head;
struct vmw_buffer_object *vbo;
union {
SVGAMobId *mob_loc;
SVGAGuestPtr *location;
};
};
/** /**
* enum vmw_resource_relocation_type - Relocation type for resources * enum vmw_resource_relocation_type - Relocation type for resources
* *
...@@ -69,35 +86,18 @@ struct vmw_resource_relocation { ...@@ -69,35 +86,18 @@ struct vmw_resource_relocation {
enum vmw_resource_relocation_type rel_type:3; enum vmw_resource_relocation_type rel_type:3;
}; };
/** /*
* struct vmw_resource_val_node - Validation info for resources * struct vmw_ctx_validation_info - Extra validation metadata for contexts
* * @head: List head of context list
* @head: List head for the software context's resource list. * @ctx: The context resource
* @hash: Hash entry for quick resouce to val_node lookup. * @cur: The context's persistent binding state
* @res: Ref-counted pointer to the resource. * @staged: The binding state changes of this command buffer
* @switch_backup: Boolean whether to switch backup buffer on unreserve.
* @new_backup: Refcounted pointer to the new backup buffer.
* @staged_bindings: If @res is a context, tracks bindings set up during
* the command batch. Otherwise NULL.
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
* @switching_backup: The command stream provides a new backup buffer for a
* resource.
* @no_buffer_needed: This means @switching_backup is true on first buffer
* reference. So resource reservation does not need to allocate a backup
* buffer for the resource.
*/ */
struct vmw_resource_val_node { struct vmw_ctx_validation_info {
struct list_head head; struct list_head head;
struct drm_hash_item hash; struct vmw_resource *ctx;
struct vmw_resource *res; struct vmw_ctx_binding_state *cur;
struct vmw_buffer_object *new_backup; struct vmw_ctx_binding_state *staged;
struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
u32 first_usage : 1;
u32 switching_backup : 1;
u32 no_buffer_needed : 1;
}; };
/** /**
...@@ -127,10 +127,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -127,10 +127,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGAMobId *id, SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p); struct vmw_buffer_object **vmw_bo_p);
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_buffer_object *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
/** /**
* vmw_ptr_diff - Compute the offset from a to b in bytes * vmw_ptr_diff - Compute the offset from a to b in bytes
* *
...@@ -145,48 +141,38 @@ static size_t vmw_ptr_diff(void *a, void *b) ...@@ -145,48 +141,38 @@ static size_t vmw_ptr_diff(void *a, void *b)
} }
/** /**
* vmw_resources_unreserve - unreserve resources previously reserved for * vmw_execbuf_bindings_commit - Commit modified binding state
* command submission. * @sw_context: The command submission context
* * @backoff: Whether this is part of the error path and binding state
* @sw_context: pointer to the software context * changes should be ignored
* @backoff: Whether command submission failed.
*/ */
static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
bool backoff) bool backoff)
{ {
struct vmw_resource_val_node *val; struct vmw_ctx_validation_info *entry;
struct list_head *list = &sw_context->resource_list;
if (sw_context->dx_query_mob && !backoff) list_for_each_entry(entry, &sw_context->ctx_list, head) {
vmw_context_bind_dx_query(sw_context->dx_query_ctx, if (!backoff)
sw_context->dx_query_mob); vmw_binding_state_commit(entry->cur, entry->staged);
if (entry->staged != sw_context->staged_bindings)
list_for_each_entry(val, list, head) { vmw_binding_state_free(entry->staged);
struct vmw_resource *res = val->res;
bool switch_backup =
(backoff) ? false : val->switching_backup;
/*
* Transfer staged context bindings to the
* persistent context binding tracker.
*/
if (unlikely(val->staged_bindings)) {
if (!backoff) {
vmw_binding_state_commit
(vmw_context_binding_state(val->res),
val->staged_bindings);
}
if (val->staged_bindings != sw_context->staged_bindings)
vmw_binding_state_free(val->staged_bindings);
else else
sw_context->staged_bindings_inuse = false; sw_context->staged_bindings_inuse = false;
val->staged_bindings = NULL;
}
vmw_resource_unreserve(res, switch_backup, val->new_backup,
val->new_backup_offset);
vmw_bo_unreference(&val->new_backup);
} }
/* List entries are freed with the validation context */
INIT_LIST_HEAD(&sw_context->ctx_list);
}
/**
* vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
* @sw_context: The command submission context
*/
static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
{
if (sw_context->dx_query_mob)
vmw_context_bind_dx_query(sw_context->dx_query_ctx,
sw_context->dx_query_mob);
} }
/** /**
...@@ -194,16 +180,17 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, ...@@ -194,16 +180,17 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
* added to the validate list. * added to the validate list.
* *
* @dev_priv: Pointer to the device private: * @dev_priv: Pointer to the device private:
* @sw_context: The validation context: * @sw_context: The command submission context
* @node: The validation node holding this context. * @node: The validation node holding the context resource metadata
*/ */
static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
struct vmw_resource_val_node *node) struct vmw_resource *res,
struct vmw_ctx_validation_info *node)
{ {
int ret; int ret;
ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res); ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
...@@ -220,91 +207,138 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, ...@@ -220,91 +207,138 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
} }
if (sw_context->staged_bindings_inuse) { if (sw_context->staged_bindings_inuse) {
node->staged_bindings = vmw_binding_state_alloc(dev_priv); node->staged = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(node->staged_bindings)) { if (IS_ERR(node->staged)) {
DRM_ERROR("Failed to allocate context binding " DRM_ERROR("Failed to allocate context binding "
"information.\n"); "information.\n");
ret = PTR_ERR(node->staged_bindings); ret = PTR_ERR(node->staged);
node->staged_bindings = NULL; node->staged = NULL;
goto out_err; goto out_err;
} }
} else { } else {
node->staged_bindings = sw_context->staged_bindings; node->staged = sw_context->staged_bindings;
sw_context->staged_bindings_inuse = true; sw_context->staged_bindings_inuse = true;
} }
node->ctx = res;
node->cur = vmw_context_binding_state(res);
list_add_tail(&node->head, &sw_context->ctx_list);
return 0; return 0;
out_err: out_err:
return ret; return ret;
} }
/** /**
* vmw_resource_val_add - Add a resource to the software context's * vmw_execbuf_res_size - calculate extra size fore the resource validation
* resource list if it's not already on it. * node
* @dev_priv: Pointer to the device private struct.
* @res_type: The resource type.
* *
* @sw_context: Pointer to the software context. * Guest-backed contexts and DX contexts require extra size to store
* execbuf private information in the validation node. Typically the
* binding manager associated data structures.
*
* Returns: The extra size requirement based on resource type.
*/
static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
enum vmw_res_type res_type)
{
return (res_type == vmw_res_dx_context ||
(res_type == vmw_res_context && dev_priv->has_mob)) ?
sizeof(struct vmw_ctx_validation_info) : 0;
}
/**
* vmw_execbuf_rcache_update - Update a resource-node cache entry
*
* @rcache: Pointer to the entry to update.
* @res: Pointer to the resource. * @res: Pointer to the resource.
* @p_node On successful return points to a valid pointer to a * @private: Pointer to the execbuf-private space in the resource
* struct vmw_resource_val_node, if non-NULL on entry. * validation node.
*/ */
static int vmw_resource_val_add(struct vmw_sw_context *sw_context, static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
struct vmw_resource *res, struct vmw_resource *res,
struct vmw_resource_val_node **p_node) void *private)
{
rcache->res = res;
rcache->private = private;
rcache->valid = 1;
rcache->valid_handle = 0;
}
/**
* vmw_execbuf_res_noref_val_add - Add a resource described by an
* unreferenced rcu-protected pointer to the validation list.
* @sw_context: Pointer to the software context.
* @res: Unreferenced rcu-protected pointer to the resource.
*
* Returns: 0 on success. Negative error code on failure. Typical error
* codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
* doomed.
*/
static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *res)
{ {
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
struct vmw_resource_val_node *node;
struct drm_hash_item *hash;
int ret; int ret;
enum vmw_res_type res_type = vmw_res_type(res);
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, struct vmw_res_cache_entry *rcache;
&hash) == 0)) { struct vmw_ctx_validation_info *ctx_info;
node = container_of(hash, struct vmw_resource_val_node, hash); bool first_usage;
node->first_usage = false; unsigned int priv_size;
if (unlikely(p_node != NULL))
*p_node = node; rcache = &sw_context->res_cache[res_type];
if (likely(rcache->valid && rcache->res == res)) {
vmw_user_resource_noref_release();
return 0; return 0;
} }
node = kzalloc(sizeof(*node), GFP_KERNEL); priv_size = vmw_execbuf_res_size(dev_priv, res_type);
if (unlikely(!node)) { ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
DRM_ERROR("Failed to allocate a resource validation " (void **)&ctx_info, &first_usage);
"entry.\n"); vmw_user_resource_noref_release();
return -ENOMEM; if (ret)
} return ret;
node->hash.key = (unsigned long) res; if (priv_size && first_usage) {
ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
if (unlikely(ret != 0)) { ctx_info);
DRM_ERROR("Failed to initialize a resource validation " if (ret)
"entry.\n");
kfree(node);
return ret; return ret;
} }
node->res = vmw_resource_reference(res);
node->first_usage = true;
if (unlikely(p_node != NULL))
*p_node = node;
if (!dev_priv->has_mob) { vmw_execbuf_rcache_update(rcache, res, ctx_info);
list_add_tail(&node->head, &sw_context->resource_list);
return 0; return 0;
} }
switch (vmw_res_type(res)) { /**
case vmw_res_context: * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
case vmw_res_dx_context: * validation list if it's not already on it
list_add(&node->head, &sw_context->ctx_resource_list); * @sw_context: Pointer to the software context.
ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node); * @res: Pointer to the resource.
break; *
case vmw_res_cotable: * Returns: Zero on success. Negative error code on failure.
list_add_tail(&node->head, &sw_context->ctx_resource_list); */
break; static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
default: struct vmw_resource *res)
list_add_tail(&node->head, &sw_context->resource_list); {
break; struct vmw_res_cache_entry *rcache;
} enum vmw_res_type res_type = vmw_res_type(res);
void *ptr;
int ret;
rcache = &sw_context->res_cache[res_type];
if (likely(rcache->valid && rcache->res == res))
return 0;
ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL);
if (ret)
return ret; return ret;
vmw_execbuf_rcache_update(rcache, res, ptr);
return 0;
} }
/** /**
...@@ -325,11 +359,11 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, ...@@ -325,11 +359,11 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
* First add the resource the view is pointing to, otherwise * First add the resource the view is pointing to, otherwise
* it may be swapped out when the view is validated. * it may be swapped out when the view is validated.
*/ */
ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL); ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view));
if (ret) if (ret)
return ret; return ret;
return vmw_resource_val_add(sw_context, view, NULL); return vmw_execbuf_res_noctx_val_add(sw_context, view);
} }
/** /**
...@@ -342,28 +376,33 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, ...@@ -342,28 +376,33 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
* *
* The view is represented by a view id and the DX context it's created on, * The view is represented by a view id and the DX context it's created on,
* or scheduled for creation on. If there is no DX context set, the function * or scheduled for creation on. If there is no DX context set, the function
* will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure. * will return an -EINVAL error pointer.
*
* Returns: Unreferenced pointer to the resource on success, negative error
* pointer on failure.
*/ */
static int vmw_view_id_val_add(struct vmw_sw_context *sw_context, static struct vmw_resource *
vmw_view_id_val_add(struct vmw_sw_context *sw_context,
enum vmw_view_type view_type, u32 id) enum vmw_view_type view_type, u32 id)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *view; struct vmw_resource *view;
int ret; int ret;
if (!ctx_node) { if (!ctx_node) {
DRM_ERROR("DX Context not set.\n"); DRM_ERROR("DX Context not set.\n");
return -EINVAL; return ERR_PTR(-EINVAL);
} }
view = vmw_view_lookup(sw_context->man, view_type, id); view = vmw_view_lookup(sw_context->man, view_type, id);
if (IS_ERR(view)) if (IS_ERR(view))
return PTR_ERR(view); return view;
ret = vmw_view_res_val_add(sw_context, view); ret = vmw_view_res_val_add(sw_context, view);
vmw_resource_unreference(&view); if (ret)
return ERR_PTR(ret);
return ret; return view;
} }
/** /**
...@@ -394,8 +433,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -394,8 +433,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (IS_ERR(res)) if (IS_ERR(res))
continue; continue;
ret = vmw_resource_val_add(sw_context, res, NULL); ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
vmw_resource_unreference(&res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
} }
...@@ -407,17 +445,11 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -407,17 +445,11 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
binding_list = vmw_context_binding_list(ctx); binding_list = vmw_context_binding_list(ctx);
list_for_each_entry(entry, binding_list, ctx_list) { list_for_each_entry(entry, binding_list, ctx_list) {
/* entry->res is not refcounted */
res = vmw_resource_reference_unless_doomed(entry->res);
if (unlikely(res == NULL))
continue;
if (vmw_res_type(entry->res) == vmw_res_view) if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res); ret = vmw_view_res_val_add(sw_context, entry->res);
else else
ret = vmw_resource_val_add(sw_context, entry->res, ret = vmw_execbuf_res_noctx_val_add(sw_context,
NULL); entry->res);
vmw_resource_unreference(&res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
break; break;
} }
...@@ -427,9 +459,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -427,9 +459,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
dx_query_mob = vmw_context_get_dx_query_mob(ctx); dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob) if (dx_query_mob)
ret = vmw_bo_to_validate_list(sw_context, ret = vmw_validation_add_bo(sw_context->ctx,
dx_query_mob, dx_query_mob, true, false);
true, NULL);
} }
mutex_unlock(&dev_priv->binding_mutex); mutex_unlock(&dev_priv->binding_mutex);
...@@ -445,7 +476,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -445,7 +476,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
* id that needs fixup is located. Granularity is one byte. * id that needs fixup is located. Granularity is one byte.
* @rel_type: Relocation type. * @rel_type: Relocation type.
*/ */
static int vmw_resource_relocation_add(struct list_head *list, static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
const struct vmw_resource *res, const struct vmw_resource *res,
unsigned long offset, unsigned long offset,
enum vmw_resource_relocation_type enum vmw_resource_relocation_type
...@@ -453,7 +484,7 @@ static int vmw_resource_relocation_add(struct list_head *list, ...@@ -453,7 +484,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
{ {
struct vmw_resource_relocation *rel; struct vmw_resource_relocation *rel;
rel = kmalloc(sizeof(*rel), GFP_KERNEL); rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
if (unlikely(!rel)) { if (unlikely(!rel)) {
DRM_ERROR("Failed to allocate a resource relocation.\n"); DRM_ERROR("Failed to allocate a resource relocation.\n");
return -ENOMEM; return -ENOMEM;
...@@ -462,7 +493,7 @@ static int vmw_resource_relocation_add(struct list_head *list, ...@@ -462,7 +493,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
rel->res = res; rel->res = res;
rel->offset = offset; rel->offset = offset;
rel->rel_type = rel_type; rel->rel_type = rel_type;
list_add_tail(&rel->head, list); list_add_tail(&rel->head, &sw_context->res_relocations);
return 0; return 0;
} }
...@@ -470,16 +501,13 @@ static int vmw_resource_relocation_add(struct list_head *list, ...@@ -470,16 +501,13 @@ static int vmw_resource_relocation_add(struct list_head *list,
/** /**
* vmw_resource_relocations_free - Free all relocations on a list * vmw_resource_relocations_free - Free all relocations on a list
* *
* @list: Pointer to the head of the relocation list. * @list: Pointer to the head of the relocation list
*/ */
static void vmw_resource_relocations_free(struct list_head *list) static void vmw_resource_relocations_free(struct list_head *list)
{ {
struct vmw_resource_relocation *rel, *n; /* Memory is validation context memory, so no need to free it */
list_for_each_entry_safe(rel, n, list, head) { INIT_LIST_HEAD(list);
list_del(&rel->head);
kfree(rel);
}
} }
/** /**
...@@ -531,68 +559,6 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, ...@@ -531,68 +559,6 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0; return 0;
} }
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
* @validate_as_mob: Validate this buffer as a MOB.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
* Returns -EINVAL if the limit of number of buffer objects per command
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_buffer_object *vbo,
bool validate_as_mob,
uint32_t *p_val_node)
{
uint32_t val_node;
struct vmw_validate_buffer *vval_buf;
struct ttm_validate_buffer *val_buf;
struct drm_hash_item *hash;
int ret;
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
DRM_ERROR("Inconsistent buffer usage.\n");
return -EINVAL;
}
val_buf = &vval_buf->base;
val_node = vval_buf - sw_context->val_bufs;
} else {
val_node = sw_context->cur_val_buf;
if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
DRM_ERROR("Max number of DMA buffers per submission "
"exceeded.\n");
return -EINVAL;
}
vval_buf = &sw_context->val_bufs[val_node];
vval_buf->hash.key = (unsigned long) vbo;
ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a buffer validation "
"entry.\n");
return ret;
}
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(&vbo->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob;
}
if (p_val_node)
*p_val_node = val_node;
return 0;
}
/** /**
* vmw_resources_reserve - Reserve all resources on the sw_context's * vmw_resources_reserve - Reserve all resources on the sw_context's
* resource list. * resource list.
...@@ -605,27 +571,11 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, ...@@ -605,27 +571,11 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
*/ */
static int vmw_resources_reserve(struct vmw_sw_context *sw_context) static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{ {
struct vmw_resource_val_node *val; int ret;
int ret = 0;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
if (unlikely(ret != 0))
return ret;
if (res->backup) {
struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list
(sw_context, vbo,
vmw_resource_needs_backup(res), NULL);
if (unlikely(ret != 0)) ret = vmw_validation_res_reserve(sw_context->ctx, true);
if (ret)
return ret; return ret;
}
}
if (sw_context->dx_query_mob) { if (sw_context->dx_query_mob) {
struct vmw_buffer_object *expected_dx_query_mob; struct vmw_buffer_object *expected_dx_query_mob;
...@@ -641,87 +591,6 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) ...@@ -641,87 +591,6 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret; return ret;
} }
/**
* vmw_resources_validate - Validate all resources on the sw_context's
* resource list.
*
* @sw_context: Pointer to the software context.
*
* Before this function is called, all resource backup buffers must have
* been validated.
*/
static int vmw_resources_validate(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
struct vmw_buffer_object *backup = res->backup;
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to validate resource.\n");
return ret;
}
/* Check if the resource switched backup buffer */
if (backup && res->backup && (backup != res->backup)) {
struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list
(sw_context, vbo,
vmw_resource_needs_backup(res), NULL);
if (ret) {
ttm_bo_unreserve(&vbo->base);
return ret;
}
}
}
return 0;
}
/**
* vmw_cmd_res_reloc_add - Add a resource to a software context's
* relocation- and validation lists.
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @sw_context: Pointer to the software context.
* @id_loc: Pointer to where the id that needs translation is located.
* @res: Valid pointer to a struct vmw_resource.
* @p_val: If non null, a pointer to the struct vmw_resource_validate_node
* used for this resource is returned here.
*/
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t *id_loc,
struct vmw_resource *res,
struct vmw_resource_val_node **p_val)
{
int ret;
struct vmw_resource_val_node *node;
*p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
vmw_ptr_diff(sw_context->buf_start,
id_loc),
vmw_res_rel_normal);
if (unlikely(ret != 0))
return ret;
ret = vmw_resource_val_add(sw_context, res, &node);
if (unlikely(ret != 0))
return ret;
if (p_val)
*p_val = node;
return 0;
}
/** /**
* vmw_cmd_res_check - Check that a resource is present and if so, put it * vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there. * on the resource validate list unless it's already there.
...@@ -741,17 +610,16 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, ...@@ -741,17 +610,16 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
enum vmw_res_type res_type, enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter, const struct vmw_user_resource_conv *converter,
uint32_t *id_loc, uint32_t *id_loc,
struct vmw_resource_val_node **p_val) struct vmw_resource **p_res)
{ {
struct vmw_res_cache_entry *rcache = struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
&sw_context->res_cache[res_type];
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_resource_val_node *node;
int ret; int ret;
if (p_res)
*p_res = NULL;
if (*id_loc == SVGA3D_INVALID_ID) { if (*id_loc == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) { if (res_type == vmw_res_context) {
DRM_ERROR("Illegal context invalid id.\n"); DRM_ERROR("Illegal context invalid id.\n");
return -EINVAL; return -EINVAL;
...@@ -759,56 +627,41 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, ...@@ -759,56 +627,41 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return 0; return 0;
} }
/* if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
* Fastpath in case of repeated commands referencing the same res = rcache->res;
* resource } else {
*/ unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
if (likely(rcache->valid && *id_loc == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false; ret = vmw_validation_preload_res(sw_context->ctx, size);
if (p_val) if (ret)
*p_val = rcache->node; return ret;
return vmw_resource_relocation_add res = vmw_user_resource_noref_lookup_handle
(&sw_context->res_relocations, res, (dev_priv, sw_context->fp->tfile, *id_loc, converter);
vmw_ptr_diff(sw_context->buf_start, id_loc), if (unlikely(IS_ERR(res))) {
vmw_res_rel_normal); DRM_ERROR("Could not find or use resource 0x%08x.\n",
(unsigned int) *id_loc);
return PTR_ERR(res);
} }
ret = vmw_user_resource_lookup_handle(dev_priv, ret = vmw_execbuf_res_noref_val_add(sw_context, res);
sw_context->fp->tfile, if (unlikely(ret != 0))
*id_loc,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
(unsigned) *id_loc);
dump_stack();
return ret; return ret;
}
rcache->valid = true; if (rcache->valid && rcache->res == res) {
rcache->res = res; rcache->valid_handle = true;
rcache->handle = *id_loc; rcache->handle = *id_loc;
}
}
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc, ret = vmw_resource_relocation_add(sw_context, res,
res, &node); vmw_ptr_diff(sw_context->buf_start,
if (unlikely(ret != 0)) id_loc),
goto out_no_reloc; vmw_res_rel_normal);
if (p_res)
*p_res = res;
rcache->node = node;
if (p_val)
*p_val = node;
vmw_resource_unreference(&res);
return 0; return 0;
out_no_reloc:
BUG_ON(sw_context->error_resource != NULL);
sw_context->error_resource = res;
return ret;
} }
/** /**
...@@ -861,22 +714,18 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) ...@@ -861,22 +714,18 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
*/ */
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{ {
struct vmw_resource_val_node *val; struct vmw_ctx_validation_info *val;
int ret; int ret;
list_for_each_entry(val, &sw_context->resource_list, head) { list_for_each_entry(val, &sw_context->ctx_list, head) {
if (unlikely(!val->staged_bindings)) ret = vmw_binding_rebind_all(val->cur);
break;
ret = vmw_binding_rebind_all
(vmw_context_binding_state(val->res));
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n"); DRM_ERROR("Failed to rebind context.\n");
return ret; return ret;
} }
ret = vmw_rebind_all_dx_query(val->res); ret = vmw_rebind_all_dx_query(val->ctx);
if (ret != 0) if (ret != 0)
return ret; return ret;
} }
...@@ -903,45 +752,33 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, ...@@ -903,45 +752,33 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
uint32 view_ids[], u32 num_views, uint32 view_ids[], u32 num_views,
u32 first_slot) u32 first_slot)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_cmdbuf_res_manager *man;
u32 i; u32 i;
int ret;
if (!ctx_node) { if (!ctx_node) {
DRM_ERROR("DX Context not set.\n"); DRM_ERROR("DX Context not set.\n");
return -EINVAL; return -EINVAL;
} }
man = sw_context->man;
for (i = 0; i < num_views; ++i) { for (i = 0; i < num_views; ++i) {
struct vmw_ctx_bindinfo_view binding; struct vmw_ctx_bindinfo_view binding;
struct vmw_resource *view = NULL; struct vmw_resource *view = NULL;
if (view_ids[i] != SVGA3D_INVALID_ID) { if (view_ids[i] != SVGA3D_INVALID_ID) {
view = vmw_view_lookup(man, view_type, view_ids[i]); view = vmw_view_id_val_add(sw_context, view_type,
view_ids[i]);
if (IS_ERR(view)) { if (IS_ERR(view)) {
DRM_ERROR("View not found.\n"); DRM_ERROR("View not found.\n");
return PTR_ERR(view); return PTR_ERR(view);
} }
ret = vmw_view_res_val_add(sw_context, view);
if (ret) {
DRM_ERROR("Could not add view to "
"validation list.\n");
vmw_resource_unreference(&view);
return ret;
}
} }
binding.bi.ctx = ctx_node->res; binding.bi.ctx = ctx_node->ctx;
binding.bi.res = view; binding.bi.res = view;
binding.bi.bt = binding_type; binding.bi.bt = binding_type;
binding.shader_slot = shader_slot; binding.shader_slot = shader_slot;
binding.slot = first_slot + i; binding.slot = first_slot + i;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi, vmw_binding_add(ctx_node->staged, &binding.bi,
shader_slot, binding.slot); shader_slot, binding.slot);
if (view)
vmw_resource_unreference(&view);
} }
return 0; return 0;
...@@ -971,6 +808,34 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, ...@@ -971,6 +808,34 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
user_context_converter, &cmd->cid, NULL); user_context_converter, &cmd->cid, NULL);
} }
/**
* vmw_execbuf_info_from_res - Get the private validation metadata for a
* recently validated resource
* @sw_context: Pointer to the command submission context
* @res: The resource
*
* The resource pointed to by @res needs to be present in the command submission
* context's resource cache and hence the last resource of that type to be
* processed by the validation code.
*
* Return: a pointer to the private metadata of the resource, or NULL
* if it wasn't found
*/
static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
struct vmw_resource *res)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[vmw_res_type(res)];
if (rcache->valid && rcache->res == res)
return rcache->private;
WARN_ON_ONCE(true);
return NULL;
}
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
...@@ -979,8 +844,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, ...@@ -979,8 +844,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body; SVGA3dCmdSetRenderTarget body;
} *cmd; } *cmd;
struct vmw_resource_val_node *ctx_node; struct vmw_resource *ctx;
struct vmw_resource_val_node *res_node; struct vmw_resource *res;
int ret; int ret;
cmd = container_of(header, struct vmw_sid_cmd, header); cmd = container_of(header, struct vmw_sid_cmd, header);
...@@ -993,25 +858,29 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, ...@@ -993,25 +858,29 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid, user_context_converter, &cmd->body.cid,
&ctx_node); &ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, user_surface_converter, &cmd->body.target.sid,
&cmd->body.target.sid, &res_node); &res);
if (unlikely(ret != 0)) if (unlikely(ret))
return ret; return ret;
if (dev_priv->has_mob) { if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo_view binding; struct vmw_ctx_bindinfo_view binding;
struct vmw_ctx_validation_info *node;
binding.bi.ctx = ctx_node->res; node = vmw_execbuf_info_from_res(sw_context, ctx);
binding.bi.res = res_node ? res_node->res : NULL; if (!node)
return -EINVAL;
binding.bi.ctx = ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_rt; binding.bi.bt = vmw_ctx_binding_rt;
binding.slot = cmd->body.type; binding.slot = cmd->body.type;
vmw_binding_add(ctx_node->staged_bindings, vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
&binding.bi, 0, binding.slot);
} }
return 0; return 0;
...@@ -1171,17 +1040,17 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, ...@@ -1171,17 +1040,17 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(sw_context->cur_query_bo != NULL)) { if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true; sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context, ret = vmw_validation_add_bo(sw_context->ctx,
sw_context->cur_query_bo, sw_context->cur_query_bo,
dev_priv->has_mob, NULL); dev_priv->has_mob, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
} }
sw_context->cur_query_bo = new_query_bo; sw_context->cur_query_bo = new_query_bo;
ret = vmw_bo_to_validate_list(sw_context, ret = vmw_validation_add_bo(sw_context->ctx,
dev_priv->dummy_query_bo, dev_priv->dummy_query_bo,
dev_priv->has_mob, NULL); dev_priv->has_mob, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1269,7 +1138,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1269,7 +1138,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command batch validation. * @sw_context: The software context used for this command batch validation.
* @id: Pointer to the user-space handle to be translated. * @id: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry * @vmw_bo_p: Points to a location that, on successful return will carry
* a reference-counted pointer to the DMA buffer identified by the * a non-reference-counted pointer to the buffer object identified by the
* user-space handle in @id. * user-space handle in @id.
* *
* This function saves information needed to translate a user-space buffer * This function saves information needed to translate a user-space buffer
...@@ -1284,40 +1153,34 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1284,40 +1153,34 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
SVGAMobId *id, SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p) struct vmw_buffer_object **vmw_bo_p)
{ {
struct vmw_buffer_object *vmw_bo = NULL; struct vmw_buffer_object *vmw_bo;
uint32_t handle = *id; uint32_t handle = *id;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL); vmw_validation_preload_bo(sw_context->ctx);
if (unlikely(ret != 0)) { vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
if (IS_ERR(vmw_bo)) {
DRM_ERROR("Could not find or use MOB buffer.\n"); DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL; return PTR_ERR(vmw_bo);
goto out_no_reloc;
} }
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
DRM_ERROR("Max number relocations per submission" vmw_user_bo_noref_release();
" exceeded\n"); if (unlikely(ret != 0))
ret = -EINVAL; return ret;
goto out_no_reloc;
}
reloc = &sw_context->relocs[sw_context->cur_reloc++]; reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
reloc->mob_loc = id; if (!reloc)
reloc->location = NULL; return -ENOMEM;
ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index); reloc->mob_loc = id;
if (unlikely(ret != 0)) reloc->vbo = vmw_bo;
goto out_no_reloc;
*vmw_bo_p = vmw_bo; *vmw_bo_p = vmw_bo;
return 0; list_add_tail(&reloc->head, &sw_context->bo_relocations);
out_no_reloc: return 0;
vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL;
return ret;
} }
/** /**
...@@ -1328,7 +1191,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1328,7 +1191,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command batch validation. * @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated. * @ptr: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry * @vmw_bo_p: Points to a location that, on successful return will carry
* a reference-counted pointer to the DMA buffer identified by the * a non-reference-counted pointer to the DMA buffer identified by the
* user-space handle in @id. * user-space handle in @id.
* *
* This function saves information needed to translate a user-space buffer * This function saves information needed to translate a user-space buffer
...@@ -1344,39 +1207,33 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1344,39 +1207,33 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
SVGAGuestPtr *ptr, SVGAGuestPtr *ptr,
struct vmw_buffer_object **vmw_bo_p) struct vmw_buffer_object **vmw_bo_p)
{ {
struct vmw_buffer_object *vmw_bo = NULL; struct vmw_buffer_object *vmw_bo;
uint32_t handle = ptr->gmrId; uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL); vmw_validation_preload_bo(sw_context->ctx);
if (unlikely(ret != 0)) { vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
if (IS_ERR(vmw_bo)) {
DRM_ERROR("Could not find or use GMR region.\n"); DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL; return PTR_ERR(vmw_bo);
goto out_no_reloc;
} }
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
DRM_ERROR("Max number relocations per submission" vmw_user_bo_noref_release();
" exceeded\n");
ret = -EINVAL;
goto out_no_reloc;
}
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_reloc; return ret;
reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
if (!reloc)
return -ENOMEM;
reloc->location = ptr;
reloc->vbo = vmw_bo;
*vmw_bo_p = vmw_bo; *vmw_bo_p = vmw_bo;
return 0; list_add_tail(&reloc->head, &sw_context->bo_relocations);
out_no_reloc: return 0;
vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL;
return ret;
} }
...@@ -1400,7 +1257,7 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, ...@@ -1400,7 +1257,7 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
} *cmd; } *cmd;
int ret; int ret;
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *cotable_res; struct vmw_resource *cotable_res;
...@@ -1415,9 +1272,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, ...@@ -1415,9 +1272,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
cmd->q.type >= SVGA3D_QUERYTYPE_MAX) cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
return -EINVAL; return -EINVAL;
cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY); cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
ret = vmw_cotable_notify(cotable_res, cmd->q.queryId); ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
vmw_resource_unreference(&cotable_res);
return ret; return ret;
} }
...@@ -1462,11 +1318,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, ...@@ -1462,11 +1318,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
return ret; return ret;
sw_context->dx_query_mob = vmw_bo; sw_context->dx_query_mob = vmw_bo;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
return 0;
vmw_bo_unreference(&vmw_bo);
return ret;
} }
...@@ -1567,7 +1420,6 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ...@@ -1567,7 +1420,6 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1621,7 +1473,6 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -1621,7 +1473,6 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1654,7 +1505,6 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, ...@@ -1654,7 +1505,6 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_bo_unreference(&vmw_bo);
return 0; return 0;
} }
...@@ -1706,7 +1556,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, ...@@ -1706,7 +1556,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_bo_unreference(&vmw_bo);
return 0; return 0;
} }
...@@ -1757,7 +1606,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1757,7 +1606,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS)) if (unlikely(ret != -ERESTARTSYS))
DRM_ERROR("could not find surface for DMA.\n"); DRM_ERROR("could not find surface for DMA.\n");
goto out_no_surface; return ret;
} }
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
...@@ -1765,9 +1614,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1765,9 +1614,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
header); header);
out_no_surface: return 0;
vmw_bo_unreference(&vmw_bo);
return ret;
} }
static int vmw_cmd_draw(struct vmw_private *dev_priv, static int vmw_cmd_draw(struct vmw_private *dev_priv,
...@@ -1837,8 +1684,8 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, ...@@ -1837,8 +1684,8 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
((unsigned long) header + header->size + sizeof(header)); ((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *) SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
struct vmw_resource_val_node *ctx_node; struct vmw_resource *ctx;
struct vmw_resource_val_node *res_node; struct vmw_resource *res;
int ret; int ret;
cmd = container_of(header, struct vmw_tex_state_cmd, cmd = container_of(header, struct vmw_tex_state_cmd,
...@@ -1846,7 +1693,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, ...@@ -1846,7 +1693,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->state.cid, user_context_converter, &cmd->state.cid,
&ctx_node); &ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1862,19 +1709,24 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, ...@@ -1862,19 +1709,24 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, user_surface_converter,
&cur_state->value, &res_node); &cur_state->value, &res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
if (dev_priv->has_mob) { if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo_tex binding; struct vmw_ctx_bindinfo_tex binding;
struct vmw_ctx_validation_info *node;
node = vmw_execbuf_info_from_res(sw_context, ctx);
if (!node)
return -EINVAL;
binding.bi.ctx = ctx_node->res; binding.bi.ctx = ctx;
binding.bi.res = res_node ? res_node->res : NULL; binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_tex; binding.bi.bt = vmw_ctx_binding_tex;
binding.texture_stage = cur_state->stage; binding.texture_stage = cur_state->stage;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi, vmw_binding_add(node->staged, &binding.bi, 0,
0, binding.texture_stage); binding.texture_stage);
} }
} }
...@@ -1893,14 +1745,9 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, ...@@ -1893,14 +1745,9 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
SVGAFifoCmdDefineGMRFB body; SVGAFifoCmdDefineGMRFB body;
} *cmd = buf; } *cmd = buf;
ret = vmw_translate_guest_ptr(dev_priv, sw_context, return vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr, &cmd->body.ptr,
&vmw_bo); &vmw_bo);
if (unlikely(ret != 0))
return ret;
vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1922,25 +1769,24 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, ...@@ -1922,25 +1769,24 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
*/ */
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
struct vmw_resource_val_node *val_node, struct vmw_resource *res,
uint32_t *buf_id, uint32_t *buf_id,
unsigned long backup_offset) unsigned long backup_offset)
{ {
struct vmw_buffer_object *dma_buf; struct vmw_buffer_object *vbo;
void *info;
int ret; int ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); info = vmw_execbuf_info_from_res(sw_context, res);
if (!info)
return -EINVAL;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
if (ret) if (ret)
return ret; return ret;
val_node->switching_backup = true; vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
if (val_node->first_usage) backup_offset);
val_node->no_buffer_needed = true;
vmw_bo_unreference(&val_node->new_backup);
val_node->new_backup = dma_buf;
val_node->new_backup_offset = backup_offset;
return 0; return 0;
} }
...@@ -1970,15 +1816,15 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, ...@@ -1970,15 +1816,15 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id, uint32_t *buf_id,
unsigned long backup_offset) unsigned long backup_offset)
{ {
struct vmw_resource_val_node *val_node; struct vmw_resource *res;
int ret; int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
converter, res_id, &val_node); converter, res_id, &res);
if (ret) if (ret)
return ret; return ret;
return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node, return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
buf_id, backup_offset); buf_id, backup_offset);
} }
...@@ -2170,14 +2016,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, ...@@ -2170,14 +2016,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
} *cmd; } *cmd;
int ret; int ret;
size_t size; size_t size;
struct vmw_resource_val_node *val; struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_shader_define_cmd, cmd = container_of(header, struct vmw_shader_define_cmd,
header); header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid, user_context_converter, &cmd->body.cid,
&val); &ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -2186,14 +2032,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, ...@@ -2186,14 +2032,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
size = cmd->header.size - sizeof(cmd->body); size = cmd->header.size - sizeof(cmd->body);
ret = vmw_compat_shader_add(dev_priv, ret = vmw_compat_shader_add(dev_priv,
vmw_context_res_man(val->res), vmw_context_res_man(ctx),
cmd->body.shid, cmd + 1, cmd->body.shid, cmd + 1,
cmd->body.type, size, cmd->body.type, size,
&sw_context->staged_cmd_res); &sw_context->staged_cmd_res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations, return vmw_resource_relocation_add(sw_context,
NULL, NULL,
vmw_ptr_diff(sw_context->buf_start, vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id), &cmd->header.id),
...@@ -2217,28 +2063,28 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, ...@@ -2217,28 +2063,28 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
SVGA3dCmdDestroyShader body; SVGA3dCmdDestroyShader body;
} *cmd; } *cmd;
int ret; int ret;
struct vmw_resource_val_node *val; struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_shader_destroy_cmd, cmd = container_of(header, struct vmw_shader_destroy_cmd,
header); header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid, user_context_converter, &cmd->body.cid,
&val); &ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
if (unlikely(!dev_priv->has_mob)) if (unlikely(!dev_priv->has_mob))
return 0; return 0;
ret = vmw_shader_remove(vmw_context_res_man(val->res), ret = vmw_shader_remove(vmw_context_res_man(ctx),
cmd->body.shid, cmd->body.shid,
cmd->body.type, cmd->body.type,
&sw_context->staged_cmd_res); &sw_context->staged_cmd_res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations, return vmw_resource_relocation_add(sw_context,
NULL, NULL,
vmw_ptr_diff(sw_context->buf_start, vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id), &cmd->header.id),
...@@ -2261,9 +2107,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, ...@@ -2261,9 +2107,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdSetShader body; SVGA3dCmdSetShader body;
} *cmd; } *cmd;
struct vmw_resource_val_node *ctx_node, *res_node = NULL;
struct vmw_ctx_bindinfo_shader binding; struct vmw_ctx_bindinfo_shader binding;
struct vmw_resource *res = NULL; struct vmw_resource *ctx, *res = NULL;
struct vmw_ctx_validation_info *ctx_info;
int ret; int ret;
cmd = container_of(header, struct vmw_set_shader_cmd, cmd = container_of(header, struct vmw_set_shader_cmd,
...@@ -2277,7 +2123,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, ...@@ -2277,7 +2123,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid, user_context_converter, &cmd->body.cid,
&ctx_node); &ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -2285,34 +2131,35 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, ...@@ -2285,34 +2131,35 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return 0; return 0;
if (cmd->body.shid != SVGA3D_INVALID_ID) { if (cmd->body.shid != SVGA3D_INVALID_ID) {
res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid, cmd->body.shid,
cmd->body.type); cmd->body.type);
if (!IS_ERR(res)) { if (!IS_ERR(res)) {
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
&cmd->body.shid, res,
&res_node);
vmw_resource_unreference(&res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
} }
} }
if (!res_node) { if (IS_ERR_OR_NULL(res)) {
ret = vmw_cmd_res_check(dev_priv, sw_context, ret = vmw_cmd_res_check(dev_priv, sw_context,
vmw_res_shader, vmw_res_shader,
user_shader_converter, user_shader_converter,
&cmd->body.shid, &res_node); &cmd->body.shid, &res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
} }
binding.bi.ctx = ctx_node->res; ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
binding.bi.res = res_node ? res_node->res : NULL; if (!ctx_info)
return -EINVAL;
binding.bi.ctx = ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_shader; binding.bi.bt = vmw_ctx_binding_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi, vmw_binding_add(ctx_info->staged, &binding.bi,
binding.shader_slot, 0); binding.shader_slot, 0);
return 0; return 0;
} }
...@@ -2393,8 +2240,8 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, ...@@ -2393,8 +2240,8 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdDXSetSingleConstantBuffer body; SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd; } *cmd;
struct vmw_resource_val_node *res_node = NULL; struct vmw_resource *res = NULL;
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_cb binding; struct vmw_ctx_bindinfo_cb binding;
int ret; int ret;
...@@ -2406,12 +2253,12 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, ...@@ -2406,12 +2253,12 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header); cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, user_surface_converter,
&cmd->body.sid, &res_node); &cmd->body.sid, &res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
binding.bi.ctx = ctx_node->res; binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res_node ? res_node->res : NULL; binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_cb; binding.bi.bt = vmw_ctx_binding_cb;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
binding.offset = cmd->body.offsetInBytes; binding.offset = cmd->body.offsetInBytes;
...@@ -2426,7 +2273,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, ...@@ -2426,7 +2273,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
return -EINVAL; return -EINVAL;
} }
vmw_binding_add(ctx_node->staged_bindings, &binding.bi, vmw_binding_add(ctx_node->staged, &binding.bi,
binding.shader_slot, binding.slot); binding.shader_slot, binding.slot);
return 0; return 0;
...@@ -2482,7 +2329,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, ...@@ -2482,7 +2329,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdDXSetShader body; SVGA3dCmdDXSetShader body;
} *cmd; } *cmd;
struct vmw_resource *res = NULL; struct vmw_resource *res = NULL;
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_shader binding; struct vmw_ctx_bindinfo_shader binding;
int ret = 0; int ret = 0;
...@@ -2506,23 +2353,20 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, ...@@ -2506,23 +2353,20 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
return PTR_ERR(res); return PTR_ERR(res);
} }
ret = vmw_resource_val_add(sw_context, res, NULL); ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
if (ret) if (ret)
goto out_unref; return ret;
} }
binding.bi.ctx = ctx_node->res; binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res; binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_dx_shader; binding.bi.bt = vmw_ctx_binding_dx_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi, vmw_binding_add(ctx_node->staged, &binding.bi,
binding.shader_slot, 0); binding.shader_slot, 0);
out_unref:
if (res)
vmw_resource_unreference(&res);
return ret; return 0;
} }
/** /**
...@@ -2537,9 +2381,9 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, ...@@ -2537,9 +2381,9 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_vb binding; struct vmw_ctx_bindinfo_vb binding;
struct vmw_resource_val_node *res_node; struct vmw_resource *res;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdDXSetVertexBuffers body; SVGA3dCmdDXSetVertexBuffers body;
...@@ -2564,18 +2408,18 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, ...@@ -2564,18 +2408,18 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, user_surface_converter,
&cmd->buf[i].sid, &res_node); &cmd->buf[i].sid, &res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
binding.bi.ctx = ctx_node->res; binding.bi.ctx = ctx_node->ctx;
binding.bi.bt = vmw_ctx_binding_vb; binding.bi.bt = vmw_ctx_binding_vb;
binding.bi.res = ((res_node) ? res_node->res : NULL); binding.bi.res = res;
binding.offset = cmd->buf[i].offset; binding.offset = cmd->buf[i].offset;
binding.stride = cmd->buf[i].stride; binding.stride = cmd->buf[i].stride;
binding.slot = i + cmd->body.startBuffer; binding.slot = i + cmd->body.startBuffer;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi, vmw_binding_add(ctx_node->staged, &binding.bi,
0, binding.slot); 0, binding.slot);
} }
...@@ -2594,9 +2438,9 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, ...@@ -2594,9 +2438,9 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_ib binding; struct vmw_ctx_bindinfo_ib binding;
struct vmw_resource_val_node *res_node; struct vmw_resource *res;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdDXSetIndexBuffer body; SVGA3dCmdDXSetIndexBuffer body;
...@@ -2611,17 +2455,17 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, ...@@ -2611,17 +2455,17 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header); cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, user_surface_converter,
&cmd->body.sid, &res_node); &cmd->body.sid, &res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
binding.bi.ctx = ctx_node->res; binding.bi.ctx = ctx_node->ctx;
binding.bi.res = ((res_node) ? res_node->res : NULL); binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_ib; binding.bi.bt = vmw_ctx_binding_ib;
binding.offset = cmd->body.offset; binding.offset = cmd->body.offset;
binding.format = cmd->body.format; binding.format = cmd->body.format;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0); vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
return 0; return 0;
} }
...@@ -2679,8 +2523,8 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, ...@@ -2679,8 +2523,8 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
SVGA3dCmdDXClearRenderTargetView body; SVGA3dCmdDXClearRenderTargetView body;
} *cmd = container_of(header, typeof(*cmd), header); } *cmd = container_of(header, typeof(*cmd), header);
return vmw_view_id_val_add(sw_context, vmw_view_rt, return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
cmd->body.renderTargetViewId); cmd->body.renderTargetViewId));
} }
/** /**
...@@ -2700,16 +2544,16 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, ...@@ -2700,16 +2544,16 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
SVGA3dCmdDXClearDepthStencilView body; SVGA3dCmdDXClearDepthStencilView body;
} *cmd = container_of(header, typeof(*cmd), header); } *cmd = container_of(header, typeof(*cmd), header);
return vmw_view_id_val_add(sw_context, vmw_view_ds, return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
cmd->body.depthStencilViewId); cmd->body.depthStencilViewId));
} }
static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource_val_node *srf_node; struct vmw_resource *srf;
struct vmw_resource *res; struct vmw_resource *res;
enum vmw_view_type view_type; enum vmw_view_type view_type;
int ret; int ret;
...@@ -2734,19 +2578,18 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, ...@@ -2734,19 +2578,18 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header); cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, user_surface_converter,
&cmd->sid, &srf_node); &cmd->sid, &srf);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]); res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
ret = vmw_cotable_notify(res, cmd->defined_id); ret = vmw_cotable_notify(res, cmd->defined_id);
vmw_resource_unreference(&res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
return vmw_view_add(sw_context->man, return vmw_view_add(sw_context->man,
ctx_node->res, ctx_node->ctx,
srf_node->res, srf,
view_type, view_type,
cmd->defined_id, cmd->defined_id,
header, header,
...@@ -2766,9 +2609,9 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, ...@@ -2766,9 +2609,9 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_so binding; struct vmw_ctx_bindinfo_so binding;
struct vmw_resource_val_node *res_node; struct vmw_resource *res;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdDXSetSOTargets body; SVGA3dCmdDXSetSOTargets body;
...@@ -2793,18 +2636,18 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, ...@@ -2793,18 +2636,18 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, user_surface_converter,
&cmd->targets[i].sid, &res_node); &cmd->targets[i].sid, &res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
binding.bi.ctx = ctx_node->res; binding.bi.ctx = ctx_node->ctx;
binding.bi.res = ((res_node) ? res_node->res : NULL); binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_so, binding.bi.bt = vmw_ctx_binding_so,
binding.offset = cmd->targets[i].offset; binding.offset = cmd->targets[i].offset;
binding.size = cmd->targets[i].sizeInBytes; binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i; binding.slot = i;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi, vmw_binding_add(ctx_node->staged, &binding.bi,
0, binding.slot); 0, binding.slot);
} }
...@@ -2815,7 +2658,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, ...@@ -2815,7 +2658,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res; struct vmw_resource *res;
/* /*
* This is based on the fact that all affected define commands have * This is based on the fact that all affected define commands have
...@@ -2834,10 +2677,9 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, ...@@ -2834,10 +2677,9 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
} }
so_type = vmw_so_cmd_to_type(header->id); so_type = vmw_so_cmd_to_type(header->id);
res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]); res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
cmd = container_of(header, typeof(*cmd), header); cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cotable_notify(res, cmd->defined_id); ret = vmw_cotable_notify(res, cmd->defined_id);
vmw_resource_unreference(&res);
return ret; return ret;
} }
...@@ -2882,7 +2724,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, ...@@ -2882,7 +2724,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
if (unlikely(ctx_node == NULL)) { if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n"); DRM_ERROR("DX Context not set.\n");
...@@ -2907,7 +2749,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, ...@@ -2907,7 +2749,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
union vmw_view_destroy body; union vmw_view_destroy body;
...@@ -2934,7 +2776,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, ...@@ -2934,7 +2776,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
* relocation to conditionally make this command a NOP to avoid * relocation to conditionally make this command a NOP to avoid
* device errors. * device errors.
*/ */
return vmw_resource_relocation_add(&sw_context->res_relocations, return vmw_resource_relocation_add(sw_context,
view, view,
vmw_ptr_diff(sw_context->buf_start, vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id), &cmd->header.id),
...@@ -2953,7 +2795,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, ...@@ -2953,7 +2795,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res; struct vmw_resource *res;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
...@@ -2966,13 +2808,12 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, ...@@ -2966,13 +2808,12 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
return -EINVAL; return -EINVAL;
} }
res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER); res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
ret = vmw_cotable_notify(res, cmd->body.shaderId); ret = vmw_cotable_notify(res, cmd->body.shaderId);
vmw_resource_unreference(&res);
if (ret) if (ret)
return ret; return ret;
return vmw_dx_shader_add(sw_context->man, ctx_node->res, return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
cmd->body.shaderId, cmd->body.type, cmd->body.shaderId, cmd->body.type,
&sw_context->staged_cmd_res); &sw_context->staged_cmd_res);
} }
...@@ -2989,7 +2830,7 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, ...@@ -2989,7 +2830,7 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdDXDestroyShader body; SVGA3dCmdDXDestroyShader body;
...@@ -3021,8 +2862,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, ...@@ -3021,8 +2862,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_resource_val_node *ctx_node; struct vmw_resource *ctx;
struct vmw_resource_val_node *res_node;
struct vmw_resource *res; struct vmw_resource *res;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
...@@ -3033,38 +2873,33 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, ...@@ -3033,38 +2873,33 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
if (cmd->body.cid != SVGA3D_INVALID_ID) { if (cmd->body.cid != SVGA3D_INVALID_ID) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, user_context_converter,
&cmd->body.cid, &ctx_node); &cmd->body.cid, &ctx);
if (ret) if (ret)
return ret; return ret;
} else { } else {
ctx_node = sw_context->dx_ctx_node; if (!sw_context->dx_ctx_node) {
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n"); DRM_ERROR("DX Context not set.\n");
return -EINVAL; return -EINVAL;
} }
ctx = sw_context->dx_ctx_node->ctx;
} }
res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid, 0); cmd->body.shid, 0);
if (IS_ERR(res)) { if (IS_ERR(res)) {
DRM_ERROR("Could not find shader to bind.\n"); DRM_ERROR("Could not find shader to bind.\n");
return PTR_ERR(res); return PTR_ERR(res);
} }
ret = vmw_resource_val_add(sw_context, res, &res_node); ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
if (ret) { if (ret) {
DRM_ERROR("Error creating resource validation node.\n"); DRM_ERROR("Error creating resource validation node.\n");
goto out_unref; return ret;
} }
return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
&cmd->body.mobid, &cmd->body.mobid,
cmd->body.offsetInBytes); cmd->body.offsetInBytes);
out_unref:
vmw_resource_unreference(&res);
return ret;
} }
/** /**
...@@ -3083,8 +2918,8 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, ...@@ -3083,8 +2918,8 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
SVGA3dCmdDXGenMips body; SVGA3dCmdDXGenMips body;
} *cmd = container_of(header, typeof(*cmd), header); } *cmd = container_of(header, typeof(*cmd), header);
return vmw_view_id_val_add(sw_context, vmw_view_sr, return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
cmd->body.shaderResourceViewId); cmd->body.shaderResourceViewId));
} }
/** /**
...@@ -3638,20 +3473,18 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv, ...@@ -3638,20 +3473,18 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
static void vmw_free_relocations(struct vmw_sw_context *sw_context) static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{ {
sw_context->cur_reloc = 0; /* Memory is validation context memory, so no need to free it */
INIT_LIST_HEAD(&sw_context->bo_relocations);
} }
static void vmw_apply_relocations(struct vmw_sw_context *sw_context) static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
{ {
uint32_t i;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
struct ttm_validate_buffer *validate;
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
for (i = 0; i < sw_context->cur_reloc; ++i) { list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
reloc = &sw_context->relocs[i]; bo = &reloc->vbo->base;
validate = &sw_context->val_bufs[reloc->index].base;
bo = validate->bo;
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
reloc->location->offset += bo->offset; reloc->location->offset += bo->offset;
...@@ -3670,110 +3503,6 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) ...@@ -3670,110 +3503,6 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
vmw_free_relocations(sw_context); vmw_free_relocations(sw_context);
} }
/**
* vmw_resource_list_unrefererence - Free up a resource list and unreference
* all resources referenced by it.
*
* @list: The resource list.
*/
static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
struct list_head *list)
{
struct vmw_resource_val_node *val, *val_next;
/*
* Drop references to resources held during command submission.
*/
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
if (val->staged_bindings) {
if (val->staged_bindings != sw_context->staged_bindings)
vmw_binding_state_free(val->staged_bindings);
else
sw_context->staged_bindings_inuse = false;
val->staged_bindings = NULL;
}
kfree(val);
}
}
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
struct vmw_validate_buffer *entry, *next;
struct vmw_resource_val_node *val;
/*
* Drop references to DMA buffers held during command submission.
*/
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
base.head) {
list_del(&entry->base.head);
ttm_bo_unref(&entry->base.bo);
(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
list_for_each_entry(val, &sw_context->resource_list, head)
(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob)
{
struct vmw_buffer_object *vbo =
container_of(bo, struct vmw_buffer_object, base);
struct ttm_operation_ctx ctx = { interruptible, false };
int ret;
if (vbo->pin_count > 0)
return 0;
if (validate_as_mob)
return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
* start evicting GMRs to make room. If the DMA buffer can't be
* used as a GMR, this will return -ENOMEM.
*/
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
/**
* If that failed, try VRAM again, this time evicting
* previous contents.
*/
ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
return ret;
}
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
struct vmw_validate_buffer *entry;
int ret;
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
true,
entry->validate_as_mob);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
uint32_t size) uint32_t size)
{ {
...@@ -3946,7 +3675,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, ...@@ -3946,7 +3675,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
if (sw_context->dx_ctx_node) if (sw_context->dx_ctx_node)
cmd = vmw_fifo_reserve_dx(dev_priv, command_size, cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
sw_context->dx_ctx_node->res->id); sw_context->dx_ctx_node->ctx->id);
else else
cmd = vmw_fifo_reserve(dev_priv, command_size); cmd = vmw_fifo_reserve(dev_priv, command_size);
if (!cmd) { if (!cmd) {
...@@ -3980,7 +3709,7 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, ...@@ -3980,7 +3709,7 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
u32 command_size, u32 command_size,
struct vmw_sw_context *sw_context) struct vmw_sw_context *sw_context)
{ {
u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id : u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
SVGA3D_INVALID_ID); SVGA3D_INVALID_ID);
void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
id, false, header); id, false, header);
...@@ -4057,31 +3786,35 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, ...@@ -4057,31 +3786,35 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
uint32_t handle) uint32_t handle)
{ {
struct vmw_resource_val_node *ctx_node;
struct vmw_resource *res; struct vmw_resource *res;
int ret; int ret;
unsigned int size;
if (handle == SVGA3D_INVALID_ID) if (handle == SVGA3D_INVALID_ID)
return 0; return 0;
ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile, size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
handle, user_context_converter, ret = vmw_validation_preload_res(sw_context->ctx, size);
&res); if (ret)
if (unlikely(ret != 0)) { return ret;
res = vmw_user_resource_noref_lookup_handle
(dev_priv, sw_context->fp->tfile, handle,
user_context_converter);
if (unlikely(IS_ERR(res))) {
DRM_ERROR("Could not find or user DX context 0x%08x.\n", DRM_ERROR("Could not find or user DX context 0x%08x.\n",
(unsigned) handle); (unsigned) handle);
return ret; return PTR_ERR(res);
} }
ret = vmw_resource_val_add(sw_context, res, &ctx_node); ret = vmw_execbuf_res_noref_val_add(sw_context, res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; return ret;
sw_context->dx_ctx_node = ctx_node; sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
sw_context->man = vmw_context_res_man(res); sw_context->man = vmw_context_res_man(res);
out_err:
vmw_resource_unreference(&res); return 0;
return ret;
} }
int vmw_execbuf_process(struct drm_file *file_priv, int vmw_execbuf_process(struct drm_file *file_priv,
...@@ -4097,15 +3830,12 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4097,15 +3830,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
{ {
struct vmw_sw_context *sw_context = &dev_priv->ctx; struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL; struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource;
struct list_head resource_list;
struct vmw_cmdbuf_header *header; struct vmw_cmdbuf_header *header;
struct ww_acquire_ctx ticket;
uint32_t handle; uint32_t handle;
int ret; int ret;
int32_t out_fence_fd = -1; int32_t out_fence_fd = -1;
struct sync_file *sync_file = NULL; struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC); out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
...@@ -4157,10 +3887,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4157,10 +3887,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true; sw_context->kernel = true;
sw_context->fp = vmw_fpriv(file_priv); sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0; INIT_LIST_HEAD(&sw_context->ctx_list);
sw_context->cur_val_buf = 0;
INIT_LIST_HEAD(&sw_context->resource_list);
INIT_LIST_HEAD(&sw_context->ctx_resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo; sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL; sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false; sw_context->needs_post_query_barrier = false;
...@@ -4168,8 +3895,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4168,8 +3895,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->dx_query_mob = NULL; sw_context->dx_query_mob = NULL;
sw_context->dx_query_ctx = NULL; sw_context->dx_query_ctx = NULL;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations); INIT_LIST_HEAD(&sw_context->res_relocations);
INIT_LIST_HEAD(&sw_context->bo_relocations);
if (sw_context->staged_bindings) if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings); vmw_binding_state_reset(sw_context->staged_bindings);
...@@ -4180,24 +3907,13 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4180,24 +3907,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->res_ht_initialized = true; sw_context->res_ht_initialized = true;
} }
INIT_LIST_HEAD(&sw_context->staged_cmd_res); INIT_LIST_HEAD(&sw_context->staged_cmd_res);
INIT_LIST_HEAD(&resource_list); sw_context->ctx = &val_ctx;
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
if (unlikely(ret != 0)) { if (unlikely(ret != 0))
list_splice_init(&sw_context->ctx_resource_list,
&sw_context->resource_list);
goto out_err_nores; goto out_err_nores;
}
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size); command_size);
/*
* Merge the resource lists before checking the return status
* from vmd_cmd_check_all so that all the open hashtabs will
* be handled properly even if vmw_cmd_check_all fails.
*/
list_splice_init(&sw_context->ctx_resource_list,
&sw_context->resource_list);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err_nores; goto out_err_nores;
...@@ -4205,18 +3921,18 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4205,18 +3921,18 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err_nores; goto out_err_nores;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, ret = vmw_validation_bo_reserve(&val_ctx, true);
true, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err_nores; goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context); ret = vmw_validation_bo_validate(&val_ctx, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
ret = vmw_resources_validate(sw_context); ret = vmw_validation_res_validate(&val_ctx, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
vmw_validation_drop_ht(&val_ctx);
ret = mutex_lock_interruptible(&dev_priv->binding_mutex); ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
...@@ -4255,17 +3971,16 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4255,17 +3971,16 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0) if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n"); DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resources_unreserve(sw_context, false); vmw_execbuf_bindings_commit(sw_context, false);
vmw_bind_dx_query_mob(sw_context);
vmw_validation_res_unreserve(&val_ctx, false);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, vmw_validation_bo_fence(sw_context->ctx, fence);
(void *) fence);
if (unlikely(dev_priv->pinned_bo != NULL && if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)) !dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence); __vmw_execbuf_release_pinned_bo(dev_priv, fence);
vmw_clear_validations(sw_context);
/* /*
* If anything fails here, give up trying to export the fence * If anything fails here, give up trying to export the fence
* and do a sync since the user mode will not be able to sync * and do a sync since the user mode will not be able to sync
...@@ -4300,7 +4015,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4300,7 +4015,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
} }
list_splice_init(&sw_context->resource_list, &resource_list);
vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
...@@ -4308,36 +4022,33 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4308,36 +4022,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
* Unreference resources outside of the cmdbuf_mutex to * Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths. * avoid deadlocks in resource destruction paths.
*/ */
vmw_resource_list_unreference(sw_context, &resource_list); vmw_validation_unref_lists(&val_ctx);
return 0; return 0;
out_unlock_binding: out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex); mutex_unlock(&dev_priv->binding_mutex);
out_err: out_err:
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); vmw_validation_bo_backoff(&val_ctx);
out_err_nores: out_err_nores:
vmw_resources_unreserve(sw_context, true); vmw_execbuf_bindings_commit(sw_context, true);
vmw_validation_res_unreserve(&val_ctx, true);
vmw_resource_relocations_free(&sw_context->res_relocations); vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context); vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL && if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)) !dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL); __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock: out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
vmw_validation_drop_ht(&val_ctx);
WARN_ON(!list_empty(&sw_context->ctx_list));
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
/* /*
* Unreference resources outside of the cmdbuf_mutex to * Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths. * avoid deadlocks in resource destruction paths.
*/ */
vmw_resource_list_unreference(sw_context, &resource_list); vmw_validation_unref_lists(&val_ctx);
if (unlikely(error_resource != NULL))
vmw_resource_unreference(&error_resource);
out_free_header: out_free_header:
if (header) if (header)
vmw_cmdbuf_header_free(header); vmw_cmdbuf_header_free(header);
...@@ -4398,38 +4109,31 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -4398,38 +4109,31 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence) struct vmw_fence_obj *fence)
{ {
int ret = 0; int ret = 0;
struct list_head validate_list;
struct ttm_validate_buffer pinned_val, query_val;
struct vmw_fence_obj *lfence = NULL; struct vmw_fence_obj *lfence = NULL;
struct ww_acquire_ctx ticket; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
if (dev_priv->pinned_bo == NULL) if (dev_priv->pinned_bo == NULL)
goto out_unlock; goto out_unlock;
INIT_LIST_HEAD(&validate_list); ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
false);
pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base); if (ret)
pinned_val.shared = false; goto out_no_reserve;
list_add_tail(&pinned_val.head, &validate_list);
query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base); ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
query_val.shared = false; false);
list_add_tail(&query_val.head, &validate_list); if (ret)
goto out_no_reserve;
ret = ttm_eu_reserve_buffers(&ticket, &validate_list, ret = vmw_validation_bo_reserve(&val_ctx, false);
false, NULL); if (ret)
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_reserve; goto out_no_reserve;
}
if (dev_priv->query_cid_valid) { if (dev_priv->query_cid_valid) {
BUG_ON(fence != NULL); BUG_ON(fence != NULL);
ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
if (unlikely(ret != 0)) { if (ret)
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_emit; goto out_no_emit;
}
dev_priv->query_cid_valid = false; dev_priv->query_cid_valid = false;
} }
...@@ -4443,22 +4147,22 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -4443,22 +4147,22 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
NULL); NULL);
fence = lfence; fence = lfence;
} }
ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); vmw_validation_bo_fence(&val_ctx, fence);
if (lfence != NULL) if (lfence != NULL)
vmw_fence_obj_unreference(&lfence); vmw_fence_obj_unreference(&lfence);
ttm_bo_unref(&query_val.bo); vmw_validation_unref_lists(&val_ctx);
ttm_bo_unref(&pinned_val.bo);
vmw_bo_unreference(&dev_priv->pinned_bo); vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock: out_unlock:
return; return;
out_no_emit: out_no_emit:
ttm_eu_backoff_reservation(&ticket, &validate_list); vmw_validation_bo_backoff(&val_ctx);
out_no_reserve: out_no_reserve:
ttm_bo_unref(&query_val.bo); vmw_validation_unref_lists(&val_ctx);
ttm_bo_unref(&pinned_val.bo); vmw_execbuf_unpin_panic(dev_priv);
vmw_bo_unreference(&dev_priv->pinned_bo); vmw_bo_unreference(&dev_priv->pinned_bo);
} }
/** /**
......
...@@ -306,7 +306,8 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) ...@@ -306,7 +306,8 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->cleanup_list); INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func); INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true; fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
TTM_OBJ_EXTRA_SIZE;
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
fman->event_fence_action_size = fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action)); ttm_round_pot(sizeof(struct vmw_event_fence_action));
...@@ -650,7 +651,7 @@ int vmw_user_fence_create(struct drm_file *file_priv, ...@@ -650,7 +651,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
} }
*p_fence = &ufence->fence; *p_fence = &ufence->fence;
*p_handle = ufence->base.hash.key; *p_handle = ufence->base.handle;
return 0; return 0;
out_err: out_err:
...@@ -1137,7 +1138,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, ...@@ -1137,7 +1138,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
"object.\n"); "object.\n");
goto out_no_ref_obj; goto out_no_ref_obj;
} }
handle = base->hash.key; handle = base->handle;
} }
ttm_base_object_unref(&base); ttm_base_object_unref(&base);
} }
......
...@@ -2575,88 +2575,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, ...@@ -2575,88 +2575,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
} }
/** /**
* vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before * vmw_kms_helper_validation_finish - Helper for post KMS command submission
* command submission. * cleanup and fencing
* * @dev_priv: Pointer to the device-private struct
* @dev_priv. Pointer to a device private structure. * @file_priv: Pointer identifying the client when user-space fencing is used
* @buf: The buffer object * @ctx: Pointer to the validation context
* @interruptible: Whether to perform waits as interruptible. * @out_fence: If non-NULL, returned refcounted fence-pointer
* @validate_as_mob: Whether the buffer should be validated as a MOB. If false, * @user_fence_rep: If non-NULL, pointer to user-space address area
* The buffer will be validated as a GMR. Already pinned buffers will not be * in which to copy user-space fence info
* validated. */
* void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
* Returns 0 on success, negative error code on failure, -ERESTARTSYS if
* interrupted by a signal.
*/
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible,
bool validate_as_mob,
bool for_cpu_blit)
{
struct ttm_operation_ctx ctx = {
.interruptible = interruptible,
.no_wait_gpu = false};
struct ttm_buffer_object *bo = &buf->base;
int ret;
ttm_bo_reserve(bo, false, false, NULL);
if (for_cpu_blit)
ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
else
ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
validate_as_mob);
if (ret)
ttm_bo_unreserve(bo);
return ret;
}
/**
* vmw_kms_helper_buffer_revert - Undo the actions of
* vmw_kms_helper_buffer_prepare.
*
* @res: Pointer to the buffer object.
*
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_buffer_prepare.
*/
void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
{
if (buf)
ttm_bo_unreserve(&buf->base);
}
/**
* vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
* kms command submission.
*
* @dev_priv: Pointer to a device private structure.
* @file_priv: Pointer to a struct drm_file representing the caller's
* connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
* if non-NULL, @user_fence_rep must be non-NULL.
* @buf: The buffer object.
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
* ref-counted fence pointer is returned here.
* @user_fence_rep: Optional pointer to a user-space provided struct
* drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
* function copies fence data to user-space in a fail-safe manner.
*/
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv, struct drm_file *file_priv,
struct vmw_buffer_object *buf, struct vmw_validation_context *ctx,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user * struct drm_vmw_fence_rep __user *
user_fence_rep) user_fence_rep)
{ {
struct vmw_fence_obj *fence; struct vmw_fence_obj *fence = NULL;
uint32_t handle; uint32_t handle;
int ret; int ret;
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
out_fence)
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
file_priv ? &handle : NULL); file_priv ? &handle : NULL);
if (buf) vmw_validation_done(ctx, fence);
vmw_bo_fence_single(&buf->base, fence);
if (file_priv) if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence, ret, user_fence_rep, fence,
...@@ -2665,106 +2608,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, ...@@ -2665,106 +2608,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
*out_fence = fence; *out_fence = fence;
else else
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
vmw_kms_helper_buffer_revert(buf);
}
/**
* vmw_kms_helper_resource_revert - Undo the actions of
* vmw_kms_helper_resource_prepare.
*
* @res: Pointer to the resource. Typically a surface.
*
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_resource_prepare.
*/
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
{
struct vmw_resource *res = ctx->res;
vmw_kms_helper_buffer_revert(ctx->buf);
vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
/**
* vmw_kms_helper_resource_prepare - Reserve and validate a resource before
* command submission.
*
* @res: Pointer to the resource. Typically a surface.
* @interruptible: Whether to perform waits as interruptible.
*
* Reserves and validates also the backup buffer if a guest-backed resource.
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted by a signal.
*/
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
bool interruptible,
struct vmw_validation_ctx *ctx)
{
int ret = 0;
ctx->buf = NULL;
ctx->res = res;
if (interruptible)
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
else
mutex_lock(&res->dev_priv->cmdbuf_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
ret = vmw_resource_reserve(res, interruptible, false);
if (ret)
goto out_unlock;
if (res->backup) {
ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
interruptible,
res->dev_priv->has_mob,
false);
if (ret)
goto out_unreserve;
ctx->buf = vmw_bo_reference(res->backup);
}
ret = vmw_resource_validate(res);
if (ret)
goto out_revert;
return 0;
out_revert:
vmw_kms_helper_buffer_revert(ctx->buf);
out_unreserve:
vmw_resource_unreserve(res, false, NULL, 0);
out_unlock:
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
return ret;
}
/**
* vmw_kms_helper_resource_finish - Unreserve and fence a resource after
* kms command submission.
*
* @res: Pointer to the resource. Typically a surface.
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
* ref-counted fence pointer is returned here.
*/
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
struct vmw_fence_obj **out_fence)
{
struct vmw_resource *res = ctx->res;
if (ctx->buf || out_fence)
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
} }
/** /**
......
...@@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, ...@@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int increment, int increment,
struct vmw_kms_dirty *dirty); struct vmw_kms_dirty *dirty);
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible,
bool validate_as_mob,
bool for_cpu_blit);
void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv, struct drm_file *file_priv,
struct vmw_buffer_object *buf, struct vmw_validation_context *ctx,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user * struct drm_vmw_fence_rep __user *
user_fence_rep); user_fence_rep);
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
bool interruptible,
struct vmw_validation_ctx *ctx);
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
struct vmw_fence_obj **out_fence);
int vmw_kms_readback(struct vmw_private *dev_priv, int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv, struct drm_file *file_priv,
struct vmw_framebuffer *vfb, struct vmw_framebuffer *vfb,
......
...@@ -31,8 +31,8 @@ ...@@ -31,8 +31,8 @@
*/ */
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "ttm_object.h"
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <drm/ttm/ttm_object.h>
/* /*
* DMA-BUF attach- and mapping methods. No need to implement * DMA-BUF attach- and mapping methods. No need to implement
......
...@@ -58,11 +58,11 @@ void vmw_resource_release_id(struct vmw_resource *res) ...@@ -58,11 +58,11 @@ void vmw_resource_release_id(struct vmw_resource *res)
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
struct idr *idr = &dev_priv->res_idr[res->func->res_type]; struct idr *idr = &dev_priv->res_idr[res->func->res_type];
write_lock(&dev_priv->resource_lock); spin_lock(&dev_priv->resource_lock);
if (res->id != -1) if (res->id != -1)
idr_remove(idr, res->id); idr_remove(idr, res->id);
res->id = -1; res->id = -1;
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
} }
static void vmw_resource_release(struct kref *kref) static void vmw_resource_release(struct kref *kref)
...@@ -73,10 +73,9 @@ static void vmw_resource_release(struct kref *kref) ...@@ -73,10 +73,9 @@ static void vmw_resource_release(struct kref *kref)
int id; int id;
struct idr *idr = &dev_priv->res_idr[res->func->res_type]; struct idr *idr = &dev_priv->res_idr[res->func->res_type];
write_lock(&dev_priv->resource_lock); spin_lock(&dev_priv->resource_lock);
res->avail = false;
list_del_init(&res->lru_head); list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
if (res->backup) { if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base; struct ttm_buffer_object *bo = &res->backup->base;
...@@ -108,10 +107,10 @@ static void vmw_resource_release(struct kref *kref) ...@@ -108,10 +107,10 @@ static void vmw_resource_release(struct kref *kref)
else else
kfree(res); kfree(res);
write_lock(&dev_priv->resource_lock); spin_lock(&dev_priv->resource_lock);
if (id != -1) if (id != -1)
idr_remove(idr, id); idr_remove(idr, id);
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
} }
void vmw_resource_unreference(struct vmw_resource **p_res) void vmw_resource_unreference(struct vmw_resource **p_res)
...@@ -140,13 +139,13 @@ int vmw_resource_alloc_id(struct vmw_resource *res) ...@@ -140,13 +139,13 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
BUG_ON(res->id != -1); BUG_ON(res->id != -1);
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
write_lock(&dev_priv->resource_lock); spin_lock(&dev_priv->resource_lock);
ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT); ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
if (ret >= 0) if (ret >= 0)
res->id = ret; res->id = ret;
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
idr_preload_end(); idr_preload_end();
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
...@@ -170,7 +169,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, ...@@ -170,7 +169,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
kref_init(&res->kref); kref_init(&res->kref);
res->hw_destroy = NULL; res->hw_destroy = NULL;
res->res_free = res_free; res->res_free = res_free;
res->avail = false;
res->dev_priv = dev_priv; res->dev_priv = dev_priv;
res->func = func; res->func = func;
INIT_LIST_HEAD(&res->lru_head); INIT_LIST_HEAD(&res->lru_head);
...@@ -187,28 +185,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, ...@@ -187,28 +185,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
return vmw_resource_alloc_id(res); return vmw_resource_alloc_id(res);
} }
/**
* vmw_resource_activate
*
* @res: Pointer to the newly created resource
* @hw_destroy: Destroy function. NULL if none.
*
* Activate a resource after the hardware has been made aware of it.
* Set tye destroy function to @destroy. Typically this frees the
* resource and destroys the hardware resources associated with it.
* Activate basically means that the function vmw_resource_lookup will
* find it.
*/
void vmw_resource_activate(struct vmw_resource *res,
void (*hw_destroy) (struct vmw_resource *))
{
struct vmw_private *dev_priv = res->dev_priv;
write_lock(&dev_priv->resource_lock);
res->avail = true;
res->hw_destroy = hw_destroy;
write_unlock(&dev_priv->resource_lock);
}
/** /**
* vmw_user_resource_lookup_handle - lookup a struct resource from a * vmw_user_resource_lookup_handle - lookup a struct resource from a
...@@ -243,15 +219,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, ...@@ -243,15 +219,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource; goto out_bad_resource;
res = converter->base_obj_to_res(base); res = converter->base_obj_to_res(base);
read_lock(&dev_priv->resource_lock);
if (!res->avail || res->res_free != converter->res_free) {
read_unlock(&dev_priv->resource_lock);
goto out_bad_resource;
}
kref_get(&res->kref); kref_get(&res->kref);
read_unlock(&dev_priv->resource_lock);
*p_res = res; *p_res = res;
ret = 0; ret = 0;
...@@ -262,6 +230,41 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, ...@@ -262,6 +230,41 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
return ret; return ret;
} }
/**
* vmw_user_resource_lookup_handle - lookup a struct resource from a
* TTM user-space handle and perform basic type checks
*
* @dev_priv: Pointer to a device private struct
* @tfile: Pointer to a struct ttm_object_file identifying the caller
* @handle: The TTM user-space handle
* @converter: Pointer to an object describing the resource type
* @p_res: On successful return the location pointed to will contain
* a pointer to a refcounted struct vmw_resource.
*
* If the handle can't be found or is associated with an incorrect resource
* type, -EINVAL will be returned.
*/
struct vmw_resource *
vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv
*converter)
{
struct ttm_base_object *base;
base = ttm_base_object_noref_lookup(tfile, handle);
if (!base)
return ERR_PTR(-ESRCH);
if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
ttm_base_object_noref_release();
return ERR_PTR(-EINVAL);
}
return converter->base_obj_to_res(base);
}
/** /**
* Helper function that looks either a surface or bo. * Helper function that looks either a surface or bo.
* *
...@@ -422,10 +425,10 @@ void vmw_resource_unreserve(struct vmw_resource *res, ...@@ -422,10 +425,10 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (!res->func->may_evict || res->id == -1 || res->pin_count) if (!res->func->may_evict || res->id == -1 || res->pin_count)
return; return;
write_lock(&dev_priv->resource_lock); spin_lock(&dev_priv->resource_lock);
list_add_tail(&res->lru_head, list_add_tail(&res->lru_head,
&res->dev_priv->res_lru[res->func->res_type]); &res->dev_priv->res_lru[res->func->res_type]);
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
} }
/** /**
...@@ -504,9 +507,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, ...@@ -504,9 +507,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
int ret; int ret;
write_lock(&dev_priv->resource_lock); spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head); list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
if (res->func->needs_backup && res->backup == NULL && if (res->func->needs_backup && res->backup == NULL &&
!no_backup) { !no_backup) {
...@@ -587,15 +590,18 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket, ...@@ -587,15 +590,18 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
/** /**
* vmw_resource_validate - Make a resource up-to-date and visible * vmw_resource_validate - Make a resource up-to-date and visible
* to the device. * to the device.
*
* @res: The resource to make visible to the device. * @res: The resource to make visible to the device.
* @intr: Perform waits interruptible if possible.
* *
* On succesful return, any backup DMA buffer pointed to by @res->backup will * On succesful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated. * be reserved and validated.
* On hardware resource shortage, this function will repeatedly evict * On hardware resource shortage, this function will repeatedly evict
* resources of the same type until the validation succeeds. * resources of the same type until the validation succeeds.
*
* Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
* on failure.
*/ */
int vmw_resource_validate(struct vmw_resource *res) int vmw_resource_validate(struct vmw_resource *res, bool intr)
{ {
int ret; int ret;
struct vmw_resource *evict_res; struct vmw_resource *evict_res;
...@@ -616,12 +622,12 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -616,12 +622,12 @@ int vmw_resource_validate(struct vmw_resource *res)
if (likely(ret != -EBUSY)) if (likely(ret != -EBUSY))
break; break;
write_lock(&dev_priv->resource_lock); spin_lock(&dev_priv->resource_lock);
if (list_empty(lru_list) || !res->func->may_evict) { if (list_empty(lru_list) || !res->func->may_evict) {
DRM_ERROR("Out of device device resources " DRM_ERROR("Out of device device resources "
"for %s.\n", res->func->type_name); "for %s.\n", res->func->type_name);
ret = -EBUSY; ret = -EBUSY;
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
break; break;
} }
...@@ -630,14 +636,14 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -630,14 +636,14 @@ int vmw_resource_validate(struct vmw_resource *res)
lru_head)); lru_head));
list_del_init(&evict_res->lru_head); list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
/* Trylock backup buffers with a NULL ticket. */ /* Trylock backup buffers with a NULL ticket. */
ret = vmw_resource_do_evict(NULL, evict_res, true); ret = vmw_resource_do_evict(NULL, evict_res, intr);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock); spin_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list); list_add_tail(&evict_res->lru_head, lru_list);
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
if (ret == -ERESTARTSYS || if (ret == -ERESTARTSYS ||
++err_count > VMW_RES_EVICT_ERR_COUNT) { ++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res); vmw_resource_unreference(&evict_res);
...@@ -819,7 +825,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, ...@@ -819,7 +825,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
do { do {
write_lock(&dev_priv->resource_lock); spin_lock(&dev_priv->resource_lock);
if (list_empty(lru_list)) if (list_empty(lru_list))
goto out_unlock; goto out_unlock;
...@@ -828,14 +834,14 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, ...@@ -828,14 +834,14 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
list_first_entry(lru_list, struct vmw_resource, list_first_entry(lru_list, struct vmw_resource,
lru_head)); lru_head));
list_del_init(&evict_res->lru_head); list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
/* Wait lock backup buffers with a ticket. */ /* Wait lock backup buffers with a ticket. */
ret = vmw_resource_do_evict(&ticket, evict_res, false); ret = vmw_resource_do_evict(&ticket, evict_res, false);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock); spin_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list); list_add_tail(&evict_res->lru_head, lru_list);
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
if (++err_count > VMW_RES_EVICT_ERR_COUNT) { if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res); vmw_resource_unreference(&evict_res);
return; return;
...@@ -846,7 +852,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, ...@@ -846,7 +852,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
} while (1); } while (1);
out_unlock: out_unlock:
write_unlock(&dev_priv->resource_lock); spin_unlock(&dev_priv->resource_lock);
} }
/** /**
...@@ -914,7 +920,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) ...@@ -914,7 +920,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
/* Do we really need to pin the MOB as well? */ /* Do we really need to pin the MOB as well? */
vmw_bo_pin_reserved(vbo, true); vmw_bo_pin_reserved(vbo, true);
} }
ret = vmw_resource_validate(res); ret = vmw_resource_validate(res, interruptible);
if (vbo) if (vbo)
ttm_bo_unreserve(&vbo->base); ttm_bo_unreserve(&vbo->base);
if (ret) if (ret)
......
...@@ -30,6 +30,11 @@ ...@@ -30,6 +30,11 @@
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
/*
* Extra memory required by the resource id's ida storage, which is allocated
* separately from the base object itself. We estimate an on-average 128 bytes
* per ida.
*/
#define VMW_IDA_ACC_SIZE 128 #define VMW_IDA_ACC_SIZE 128
enum vmw_cmdbuf_res_state { enum vmw_cmdbuf_res_state {
...@@ -120,8 +125,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, ...@@ -120,8 +125,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
bool delay_id, bool delay_id,
void (*res_free) (struct vmw_resource *res), void (*res_free) (struct vmw_resource *res),
const struct vmw_res_func *func); const struct vmw_res_func *func);
void vmw_resource_activate(struct vmw_resource *res,
void (*hw_destroy) (struct vmw_resource *));
int int
vmw_simple_resource_create_ioctl(struct drm_device *dev, vmw_simple_resource_create_ioctl(struct drm_device *dev,
void *data, void *data,
......
...@@ -946,16 +946,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -946,16 +946,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs = struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base); container_of(framebuffer, typeof(*vfbs), base);
struct vmw_kms_sou_surface_dirty sdirty; struct vmw_kms_sou_surface_dirty sdirty;
struct vmw_validation_ctx ctx; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret; int ret;
if (!srf) if (!srf)
srf = &vfbs->surface->res; srf = &vfbs->surface->res;
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
if (ret) if (ret)
return ret; return ret;
ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
if (ret)
goto out_unref;
sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit; sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
sdirty.base.clip = vmw_sou_surface_clip; sdirty.base.clip = vmw_sou_surface_clip;
sdirty.base.dev_priv = dev_priv; sdirty.base.dev_priv = dev_priv;
...@@ -972,8 +976,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -972,8 +976,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc, dest_x, dest_y, num_clips, inc,
&sdirty.base); &sdirty.base);
vmw_kms_helper_resource_finish(&ctx, out_fence); vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
NULL);
return ret;
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret; return ret;
} }
...@@ -1051,13 +1060,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv, ...@@ -1051,13 +1060,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
container_of(framebuffer, struct vmw_framebuffer_bo, container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer; base)->buffer;
struct vmw_kms_dirty dirty; struct vmw_kms_dirty dirty;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret; int ret;
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
false, false);
if (ret) if (ret)
return ret; return ret;
ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
if (ret)
goto out_unref;
ret = do_bo_define_gmrfb(dev_priv, framebuffer); ret = do_bo_define_gmrfb(dev_priv, framebuffer);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_revert; goto out_revert;
...@@ -1069,12 +1082,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv, ...@@ -1069,12 +1082,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
num_clips; num_clips;
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty); 0, 0, num_clips, increment, &dirty);
vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL); vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
NULL);
return ret; return ret;
out_revert: out_revert:
vmw_kms_helper_buffer_revert(buf); vmw_validation_revert(&val_ctx);
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret; return ret;
} }
...@@ -1150,13 +1166,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, ...@@ -1150,13 +1166,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf = struct vmw_buffer_object *buf =
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty; struct vmw_kms_dirty dirty;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret; int ret;
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false, ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
false);
if (ret) if (ret)
return ret; return ret;
ret = vmw_validation_prepare(&val_ctx, NULL, true);
if (ret)
goto out_unref;
ret = do_bo_define_gmrfb(dev_priv, vfb); ret = do_bo_define_gmrfb(dev_priv, vfb);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_revert; goto out_revert;
...@@ -1168,13 +1188,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, ...@@ -1168,13 +1188,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
num_clips; num_clips;
ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips, ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
0, 0, num_clips, 1, &dirty); 0, 0, num_clips, 1, &dirty);
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL, vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
user_fence_rep); user_fence_rep);
return ret; return ret;
out_revert: out_revert:
vmw_kms_helper_buffer_revert(buf); vmw_validation_revert(&val_ctx);
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret; return ret;
} }
...@@ -186,7 +186,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, ...@@ -186,7 +186,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
shader->num_input_sig = num_input_sig; shader->num_input_sig = num_input_sig;
shader->num_output_sig = num_output_sig; shader->num_output_sig = num_output_sig;
vmw_resource_activate(res, vmw_hw_shader_destroy); res->hw_destroy = vmw_hw_shader_destroy;
return 0; return 0;
} }
...@@ -562,7 +562,7 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, ...@@ -562,7 +562,7 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
{ {
struct vmw_dx_shader *entry, *next; struct vmw_dx_shader *entry, *next;
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, cotable_head) { list_for_each_entry_safe(entry, next, list, cotable_head) {
WARN_ON(vmw_dx_shader_scrub(&entry->res)); WARN_ON(vmw_dx_shader_scrub(&entry->res));
...@@ -636,7 +636,8 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, ...@@ -636,7 +636,8 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
res = &shader->res; res = &shader->res;
shader->ctx = ctx; shader->ctx = ctx;
shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER); shader->cotable = vmw_resource_reference
(vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER));
shader->id = user_key; shader->id = user_key;
shader->committed = false; shader->committed = false;
INIT_LIST_HEAD(&shader->cotable_head); INIT_LIST_HEAD(&shader->cotable_head);
...@@ -656,7 +657,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, ...@@ -656,7 +657,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
goto out_resource_init; goto out_resource_init;
res->id = shader->id; res->id = shader->id;
vmw_resource_activate(res, vmw_hw_shader_destroy); res->hw_destroy = vmw_hw_shader_destroy;
out_resource_init: out_resource_init:
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
...@@ -740,13 +741,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, ...@@ -740,13 +741,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
}; };
int ret; int ret;
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
if (unlikely(vmw_user_shader_size == 0)) if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size = vmw_user_shader_size =
ttm_round_pot(sizeof(struct vmw_user_shader)) + 128; ttm_round_pot(sizeof(struct vmw_user_shader)) +
VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size, vmw_user_shader_size,
...@@ -792,7 +790,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, ...@@ -792,7 +790,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
} }
if (handle) if (handle)
*handle = ushader->base.hash.key; *handle = ushader->base.handle;
out_err: out_err:
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
out: out:
...@@ -814,13 +812,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, ...@@ -814,13 +812,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
}; };
int ret; int ret;
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
if (unlikely(vmw_shader_size == 0)) if (unlikely(vmw_shader_size == 0))
vmw_shader_size = vmw_shader_size =
ttm_round_pot(sizeof(struct vmw_shader)) + 128; ttm_round_pot(sizeof(struct vmw_shader)) +
VMW_IDA_ACC_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_shader_size, vmw_shader_size,
......
...@@ -81,7 +81,7 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv, ...@@ -81,7 +81,7 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
return ret; return ret;
} }
vmw_resource_activate(&simple->res, simple->func->hw_destroy); simple->res.hw_destroy = simple->func->hw_destroy;
return 0; return 0;
} }
...@@ -159,7 +159,8 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, ...@@ -159,7 +159,8 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
alloc_size = offsetof(struct vmw_user_simple_resource, simple) + alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size; func->size;
account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE; account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
TTM_OBJ_EXTRA_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (ret) if (ret)
...@@ -208,7 +209,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, ...@@ -208,7 +209,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
goto out_err; goto out_err;
} }
func->set_arg_handle(data, usimple->base.hash.key); func->set_arg_handle(data, usimple->base.handle);
out_err: out_err:
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
out_ret: out_ret:
......
...@@ -208,7 +208,7 @@ static int vmw_view_destroy(struct vmw_resource *res) ...@@ -208,7 +208,7 @@ static int vmw_view_destroy(struct vmw_resource *res)
union vmw_view_destroy body; union vmw_view_destroy body;
} *cmd; } *cmd;
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); lockdep_assert_held_once(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head); vmw_binding_res_list_scrub(&res->binding_head);
if (!view->committed || res->id == -1) if (!view->committed || res->id == -1)
...@@ -366,7 +366,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, ...@@ -366,7 +366,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
res = &view->res; res = &view->res;
view->ctx = ctx; view->ctx = ctx;
view->srf = vmw_resource_reference(srf); view->srf = vmw_resource_reference(srf);
view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]); view->cotable = vmw_resource_reference
(vmw_context_cotable(ctx, vmw_view_cotables[view_type]));
view->view_type = view_type; view->view_type = view_type;
view->view_id = user_key; view->view_id = user_key;
view->cmd_size = cmd_size; view->cmd_size = cmd_size;
...@@ -386,7 +387,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, ...@@ -386,7 +387,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
goto out_resource_init; goto out_resource_init;
res->id = view->view_id; res->id = view->view_id;
vmw_resource_activate(res, vmw_hw_view_destroy); res->hw_destroy = vmw_hw_view_destroy;
out_resource_init: out_resource_init:
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
...@@ -439,7 +440,7 @@ void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv, ...@@ -439,7 +440,7 @@ void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
{ {
struct vmw_view *entry, *next; struct vmw_view *entry, *next;
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, cotable_head) list_for_each_entry_safe(entry, next, list, cotable_head)
WARN_ON(vmw_view_destroy(&entry->res)); WARN_ON(vmw_view_destroy(&entry->res));
...@@ -459,7 +460,7 @@ void vmw_view_surface_list_destroy(struct vmw_private *dev_priv, ...@@ -459,7 +460,7 @@ void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
{ {
struct vmw_view *entry, *next; struct vmw_view *entry, *next;
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, srf_head) list_for_each_entry_safe(entry, next, list, srf_head)
WARN_ON(vmw_view_destroy(&entry->res)); WARN_ON(vmw_view_destroy(&entry->res));
......
...@@ -759,17 +759,21 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, ...@@ -759,17 +759,21 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
struct vmw_stdu_dirty ddirty; struct vmw_stdu_dirty ddirty;
int ret; int ret;
bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D); bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
/* /*
* VMs without 3D support don't have the surface DMA command and * VMs without 3D support don't have the surface DMA command and
* we'll be using a CPU blit, and the framebuffer should be moved out * we'll be using a CPU blit, and the framebuffer should be moved out
* of VRAM. * of VRAM.
*/ */
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
false, cpu_blit);
if (ret) if (ret)
return ret; return ret;
ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
if (ret)
goto out_unref;
ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM : ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
SVGA3D_READ_HOST_VRAM; SVGA3D_READ_HOST_VRAM;
ddirty.left = ddirty.top = S32_MAX; ddirty.left = ddirty.top = S32_MAX;
...@@ -796,9 +800,13 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, ...@@ -796,9 +800,13 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips, ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
0, 0, num_clips, increment, &ddirty.base); 0, 0, num_clips, increment, &ddirty.base);
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
user_fence_rep); user_fence_rep);
return ret;
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret; return ret;
} }
...@@ -924,16 +932,20 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, ...@@ -924,16 +932,20 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs = struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base); container_of(framebuffer, typeof(*vfbs), base);
struct vmw_stdu_dirty sdirty; struct vmw_stdu_dirty sdirty;
struct vmw_validation_ctx ctx; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret; int ret;
if (!srf) if (!srf)
srf = &vfbs->surface->res; srf = &vfbs->surface->res;
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
if (ret) if (ret)
return ret; return ret;
ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
if (ret)
goto out_unref;
if (vfbs->is_bo_proxy) { if (vfbs->is_bo_proxy) {
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
if (ret) if (ret)
...@@ -954,8 +966,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, ...@@ -954,8 +966,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
dest_x, dest_y, num_clips, inc, dest_x, dest_y, num_clips, inc,
&sdirty.base); &sdirty.base);
out_finish: out_finish:
vmw_kms_helper_resource_finish(&ctx, out_fence); vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
NULL);
return ret;
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret; return ret;
} }
......
...@@ -614,7 +614,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv, ...@@ -614,7 +614,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
*/ */
INIT_LIST_HEAD(&srf->view_list); INIT_LIST_HEAD(&srf->view_list);
vmw_resource_activate(res, vmw_hw_surface_destroy); res->hw_destroy = vmw_hw_surface_destroy;
return ret; return ret;
} }
...@@ -731,7 +731,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -731,7 +731,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(vmw_user_surface_size == 0)) if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128; VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
num_sizes = 0; num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
...@@ -744,7 +744,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -744,7 +744,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
num_sizes == 0) num_sizes == 0)
return -EINVAL; return -EINVAL;
size = vmw_user_surface_size + 128 + size = vmw_user_surface_size +
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
...@@ -886,7 +886,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -886,7 +886,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
rep->sid = user_srf->prime.base.hash.key; rep->sid = user_srf->prime.base.handle;
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
ttm_read_unlock(&dev_priv->reservation_sem); ttm_read_unlock(&dev_priv->reservation_sem);
...@@ -1024,7 +1024,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, ...@@ -1024,7 +1024,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n", DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes); user_sizes, srf->num_sizes);
ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE); ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
ret = -EFAULT; ret = -EFAULT;
} }
...@@ -1613,9 +1613,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1613,9 +1613,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (unlikely(vmw_user_surface_size == 0)) if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128; VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
size = vmw_user_surface_size + 128; size = vmw_user_surface_size;
/* Define a surface based on the parameters. */ /* Define a surface based on the parameters. */
ret = vmw_surface_gb_priv_define(dev, ret = vmw_surface_gb_priv_define(dev,
...@@ -1687,7 +1687,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1687,7 +1687,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
goto out_unlock; goto out_unlock;
} }
rep->handle = user_srf->prime.base.hash.key; rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->backup_size; rep->backup_size = res->backup_size;
if (res->backup) { if (res->backup) {
rep->buffer_map_handle = rep->buffer_map_handle =
...@@ -1749,7 +1749,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, ...@@ -1749,7 +1749,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a GB surface " DRM_ERROR("Could not add a reference to a GB surface "
"backup buffer.\n"); "backup buffer.\n");
(void) ttm_ref_object_base_unref(tfile, base->hash.key, (void) ttm_ref_object_base_unref(tfile, base->handle,
TTM_REF_USAGE); TTM_REF_USAGE);
goto out_bad_resource; goto out_bad_resource;
} }
...@@ -1763,7 +1763,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, ...@@ -1763,7 +1763,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->creq.base.array_size = srf->array_size; rep->creq.base.array_size = srf->array_size;
rep->creq.base.buffer_handle = backup_handle; rep->creq.base.buffer_handle = backup_handle;
rep->creq.base.base_size = srf->base_size; rep->creq.base.base_size = srf->base_size;
rep->crep.handle = user_srf->prime.base.hash.key; rep->crep.handle = user_srf->prime.base.handle;
rep->crep.backup_size = srf->res.backup_size; rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle; rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle = rep->crep.buffer_map_handle =
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <linux/slab.h>
#include "vmwgfx_validation.h"
#include "vmwgfx_drv.h"
/**
* struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation.
* @hash: A hash entry used for the duplicate detection hash table.
* @as_mob: Validate as mob.
* @cpu_blit: Validate for cpu blit access.
*
* Bit fields are used since these structures are allocated and freed in
* large numbers and space conservation is desired.
*/
struct vmw_validation_bo_node {
struct ttm_validate_buffer base;
struct drm_hash_item hash;
u32 as_mob : 1;
u32 cpu_blit : 1;
};
/**
* struct vmw_validation_res_node - Resource validation metadata.
* @head: List head for the resource validation list.
* @hash: A hash entry used for the duplicate detection hash table.
* @res: Reference counted resource pointer.
* @new_backup: Non ref-counted pointer to new backup buffer to be assigned
* to a resource.
* @new_backup_offset: Offset into the new backup mob for resources that can
* share MOBs.
* @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
* the command stream provides a mob bind operation.
* @switching_backup: The validation process is switching backup MOB.
* @first_usage: True iff the resource has been seen only once in the current
* validation batch.
* @reserved: Whether the resource is currently reserved by this process.
* @private: Optionally additional memory for caller-private data.
*
* Bit fields are used since these structures are allocated and freed in
* large numbers and space conservation is desired.
*/
struct vmw_validation_res_node {
struct list_head head;
struct drm_hash_item hash;
struct vmw_resource *res;
struct vmw_buffer_object *new_backup;
unsigned long new_backup_offset;
u32 no_buffer_needed : 1;
u32 switching_backup : 1;
u32 first_usage : 1;
u32 reserved : 1;
unsigned long private[0];
};
/**
* vmw_validation_mem_alloc - Allocate kernel memory from the validation
* context based allocator
* @ctx: The validation context
* @size: The number of bytes to allocated.
*
* The memory allocated may not exceed PAGE_SIZE, and the returned
* address is aligned to sizeof(long). All memory allocated this way is
* reclaimed after validation when calling any of the exported functions:
* vmw_validation_unref_lists()
* vmw_validation_revert()
* vmw_validation_done()
*
* Return: Pointer to the allocated memory on success. NULL on failure.
*/
void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
unsigned int size)
{
void *addr;
size = vmw_validation_align(size);
if (size > PAGE_SIZE)
return NULL;
if (ctx->mem_size_left < size) {
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
}
addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
ctx->mem_size_left -= size;
return addr;
}
/**
* vmw_validation_mem_free - Free all memory allocated using
* vmw_validation_mem_alloc()
* @ctx: The validation context
*
* All memory previously allocated for this context using
* vmw_validation_mem_alloc() is freed.
*/
static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
{
struct page *entry, *next;
list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
list_del_init(&entry->lru);
__free_page(entry);
}
ctx->mem_size_left = 0;
}
/**
* vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
* validation context's lists.
* @ctx: The validation context to search.
* @vbo: The buffer object to search for.
*
* Return: Pointer to the struct vmw_validation_bo_node referencing the
* duplicate, or NULL if none found.
*/
static struct vmw_validation_bo_node *
vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
struct vmw_buffer_object *vbo)
{
struct vmw_validation_bo_node *bo_node = NULL;
if (!ctx->merge_dups)
return NULL;
if (ctx->ht) {
struct drm_hash_item *hash;
if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
bo_node = container_of(hash, typeof(*bo_node), hash);
} else {
struct vmw_validation_bo_node *entry;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
if (entry->base.bo == &vbo->base) {
bo_node = entry;
break;
}
}
}
return bo_node;
}
/**
* vmw_validation_find_res_dup - Find a duplicate resource entry in the
* validation context's lists.
* @ctx: The validation context to search.
* @vbo: The buffer object to search for.
*
* Return: Pointer to the struct vmw_validation_bo_node referencing the
* duplicate, or NULL if none found.
*/
static struct vmw_validation_res_node *
vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
struct vmw_resource *res)
{
struct vmw_validation_res_node *res_node = NULL;
if (!ctx->merge_dups)
return NULL;
if (ctx->ht) {
struct drm_hash_item *hash;
if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
res_node = container_of(hash, typeof(*res_node), hash);
} else {
struct vmw_validation_res_node *entry;
list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
if (entry->res == res) {
res_node = entry;
goto out;
}
}
list_for_each_entry(entry, &ctx->resource_list, head) {
if (entry->res == res) {
res_node = entry;
break;
}
}
}
out:
return res_node;
}
/**
* vmw_validation_add_bo - Add a buffer object to the validation context.
* @ctx: The validation context.
* @vbo: The buffer object.
* @as_mob: Validate as mob, otherwise suitable for GMR operations.
* @cpu_blit: Validate in a page-mappable location.
*
* Return: Zero on success, negative error code otherwise.
*/
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
struct vmw_buffer_object *vbo,
bool as_mob,
bool cpu_blit)
{
struct vmw_validation_bo_node *bo_node;
bo_node = vmw_validation_find_bo_dup(ctx, vbo);
if (bo_node) {
if (bo_node->as_mob != as_mob ||
bo_node->cpu_blit != cpu_blit) {
DRM_ERROR("Inconsistent buffer usage.\n");
return -EINVAL;
}
} else {
struct ttm_validate_buffer *val_buf;
int ret;
bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
if (!bo_node)
return -ENOMEM;
if (ctx->ht) {
bo_node->hash.key = (unsigned long) vbo;
ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
if (ret) {
DRM_ERROR("Failed to initialize a buffer "
"validation entry.\n");
return ret;
}
}
val_buf = &bo_node->base;
val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
if (!val_buf->bo)
return -ESRCH;
val_buf->shared = false;
list_add_tail(&val_buf->head, &ctx->bo_list);
bo_node->as_mob = as_mob;
bo_node->cpu_blit = cpu_blit;
}
return 0;
}
/**
* vmw_validation_add_resource - Add a resource to the validation context.
* @ctx: The validation context.
* @res: The resource.
* @priv_size: Size of private, additional metadata.
* @p_node: Output pointer of additional metadata address.
* @first_usage: Whether this was the first time this resource was seen.
*
* Return: Zero on success, negative error code otherwise.
*/
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
struct vmw_resource *res,
size_t priv_size,
void **p_node,
bool *first_usage)
{
struct vmw_validation_res_node *node;
int ret;
node = vmw_validation_find_res_dup(ctx, res);
if (node) {
node->first_usage = 0;
goto out_fill;
}
node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
if (!node) {
DRM_ERROR("Failed to allocate a resource validation "
"entry.\n");
return -ENOMEM;
}
if (ctx->ht) {
node->hash.key = (unsigned long) res;
ret = drm_ht_insert_item(ctx->ht, &node->hash);
if (ret) {
DRM_ERROR("Failed to initialize a resource validation "
"entry.\n");
return ret;
}
}
node->res = vmw_resource_reference_unless_doomed(res);
if (!node->res)
return -ESRCH;
node->first_usage = 1;
if (!res->dev_priv->has_mob) {
list_add_tail(&node->head, &ctx->resource_list);
} else {
switch (vmw_res_type(res)) {
case vmw_res_context:
case vmw_res_dx_context:
list_add(&node->head, &ctx->resource_ctx_list);
break;
case vmw_res_cotable:
list_add_tail(&node->head, &ctx->resource_ctx_list);
break;
default:
list_add_tail(&node->head, &ctx->resource_list);
break;
}
}
out_fill:
if (first_usage)
*first_usage = node->first_usage;
if (p_node)
*p_node = &node->private;
return 0;
}
/**
* vmw_validation_res_switch_backup - Register a backup MOB switch during
* validation.
* @ctx: The validation context.
* @val_private: The additional meta-data pointer returned when the
* resource was registered with the validation context. Used to identify
* the resource.
* @vbo: The new backup buffer object MOB. This buffer object needs to have
* already been registered with the validation context.
* @backup_offset: Offset into the new backup MOB.
*/
void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
void *val_private,
struct vmw_buffer_object *vbo,
unsigned long backup_offset)
{
struct vmw_validation_res_node *val;
val = container_of(val_private, typeof(*val), private);
val->switching_backup = 1;
if (val->first_usage)
val->no_buffer_needed = 1;
val->new_backup = vbo;
val->new_backup_offset = backup_offset;
}
/**
* vmw_validation_res_reserve - Reserve all resources registered with this
* validation context.
* @ctx: The validation context.
* @intr: Use interruptible waits when possible.
*
* Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
* code on failure.
*/
int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
bool intr)
{
struct vmw_validation_res_node *val;
int ret = 0;
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
list_for_each_entry(val, &ctx->resource_list, head) {
struct vmw_resource *res = val->res;
ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
if (ret)
goto out_unreserve;
val->reserved = 1;
if (res->backup) {
struct vmw_buffer_object *vbo = res->backup;
ret = vmw_validation_add_bo
(ctx, vbo, vmw_resource_needs_backup(res),
false);
if (ret)
goto out_unreserve;
}
}
return 0;
out_unreserve:
vmw_validation_res_unreserve(ctx, true);
return ret;
}
/**
* vmw_validation_res_unreserve - Unreserve all reserved resources
* registered with this validation context.
* @ctx: The validation context.
* @backoff: Whether this is a backoff- of a commit-type operation. This
* is used to determine whether to switch backup MOBs or not.
*/
void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
bool backoff)
{
struct vmw_validation_res_node *val;
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
list_for_each_entry(val, &ctx->resource_list, head) {
if (val->reserved)
vmw_resource_unreserve(val->res,
!backoff &&
val->switching_backup,
val->new_backup,
val->new_backup_offset);
}
}
/**
* vmw_validation_bo_validate_single - Validate a single buffer object.
* @bo: The TTM buffer object base.
* @interruptible: Whether to perform waits interruptible if possible.
* @validate_as_mob: Whether to validate in MOB memory.
*
* Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
* code on failure.
*/
int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob)
{
struct vmw_buffer_object *vbo =
container_of(bo, struct vmw_buffer_object, base);
struct ttm_operation_ctx ctx = {
.interruptible = interruptible,
.no_wait_gpu = false
};
int ret;
if (vbo->pin_count > 0)
return 0;
if (validate_as_mob)
return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
* start evicting GMRs to make room. If the DMA buffer can't be
* used as a GMR, this will return -ENOMEM.
*/
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
if (ret == 0 || ret == -ERESTARTSYS)
return ret;
/**
* If that failed, try VRAM again, this time evicting
* previous contents.
*/
ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
return ret;
}
/**
* vmw_validation_bo_validate - Validate all buffer objects registered with
* the validation context.
* @ctx: The validation context.
* @intr: Whether to perform waits interruptible if possible.
*
* Return: Zero on success, -ERESTARTSYS if interrupted,
* negative error code on failure.
*/
int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
{
struct vmw_validation_bo_node *entry;
int ret;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
if (entry->cpu_blit) {
struct ttm_operation_ctx ctx = {
.interruptible = intr,
.no_wait_gpu = false
};
ret = ttm_bo_validate(entry->base.bo,
&vmw_nonfixed_placement, &ctx);
} else {
ret = vmw_validation_bo_validate_single
(entry->base.bo, intr, entry->as_mob);
}
if (ret)
return ret;
}
return 0;
}
/**
* vmw_validation_res_validate - Validate all resources registered with the
* validation context.
* @ctx: The validation context.
* @intr: Whether to perform waits interruptible if possible.
*
* Before this function is called, all resource backup buffers must have
* been validated.
*
* Return: Zero on success, -ERESTARTSYS if interrupted,
* negative error code on failure.
*/
int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
{
struct vmw_validation_res_node *val;
int ret;
list_for_each_entry(val, &ctx->resource_list, head) {
struct vmw_resource *res = val->res;
struct vmw_buffer_object *backup = res->backup;
ret = vmw_resource_validate(res, intr);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to validate resource.\n");
return ret;
}
/* Check if the resource switched backup buffer */
if (backup && res->backup && (backup != res->backup)) {
struct vmw_buffer_object *vbo = res->backup;
ret = vmw_validation_add_bo
(ctx, vbo, vmw_resource_needs_backup(res),
false);
if (ret)
return ret;
}
}
return 0;
}
/**
* vmw_validation_drop_ht - Reset the hash table used for duplicate finding
* and unregister it from this validation context.
* @ctx: The validation context.
*
* The hash table used for duplicate finding is an expensive resource and
* may be protected by mutexes that may cause deadlocks during resource
* unreferencing if held. After resource- and buffer object registering,
* there is no longer any use for this hash table, so allow freeing it
* either to shorten any mutex locking time, or before resources- and
* buffer objects are freed during validation context cleanup.
*/
void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
{
struct vmw_validation_bo_node *entry;
struct vmw_validation_res_node *val;
if (!ctx->ht)
return;
list_for_each_entry(entry, &ctx->bo_list, base.head)
(void) drm_ht_remove_item(ctx->ht, &entry->hash);
list_for_each_entry(val, &ctx->resource_list, head)
(void) drm_ht_remove_item(ctx->ht, &val->hash);
list_for_each_entry(val, &ctx->resource_ctx_list, head)
(void) drm_ht_remove_item(ctx->ht, &val->hash);
ctx->ht = NULL;
}
/**
* vmw_validation_unref_lists - Unregister previously registered buffer
* object and resources.
* @ctx: The validation context.
*
* Note that this function may cause buffer object- and resource destructors
* to be invoked.
*/
void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
{
struct vmw_validation_bo_node *entry;
struct vmw_validation_res_node *val;
list_for_each_entry(entry, &ctx->bo_list, base.head)
ttm_bo_unref(&entry->base.bo);
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
list_for_each_entry(val, &ctx->resource_list, head)
vmw_resource_unreference(&val->res);
/*
* No need to detach each list entry since they are all freed with
* vmw_validation_free_mem. Just make the inaccessible.
*/
INIT_LIST_HEAD(&ctx->bo_list);
INIT_LIST_HEAD(&ctx->resource_list);
vmw_validation_mem_free(ctx);
}
/**
* vmw_validation_prepare - Prepare a validation context for command
* submission.
* @ctx: The validation context.
* @mutex: The mutex used to protect resource reservation.
* @intr: Whether to perform waits interruptible if possible.
*
* Note that the single reservation mutex @mutex is an unfortunate
* construct. Ideally resource reservation should be moved to per-resource
* ww_mutexes.
* If this functions doesn't return Zero to indicate success, all resources
* are left unreserved but still referenced.
* Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
* on error.
*/
int vmw_validation_prepare(struct vmw_validation_context *ctx,
struct mutex *mutex,
bool intr)
{
int ret = 0;
if (mutex) {
if (intr)
ret = mutex_lock_interruptible(mutex);
else
mutex_lock(mutex);
if (ret)
return -ERESTARTSYS;
}
ctx->res_mutex = mutex;
ret = vmw_validation_res_reserve(ctx, intr);
if (ret)
goto out_no_res_reserve;
ret = vmw_validation_bo_reserve(ctx, intr);
if (ret)
goto out_no_bo_reserve;
ret = vmw_validation_bo_validate(ctx, intr);
if (ret)
goto out_no_validate;
ret = vmw_validation_res_validate(ctx, intr);
if (ret)
goto out_no_validate;
return 0;
out_no_validate:
vmw_validation_bo_backoff(ctx);
out_no_bo_reserve:
vmw_validation_res_unreserve(ctx, true);
out_no_res_reserve:
if (mutex)
mutex_unlock(mutex);
return ret;
}
/**
* vmw_validation_revert - Revert validation actions if command submission
* failed.
*
* @ctx: The validation context.
*
* The caller still needs to unref resources after a call to this function.
*/
void vmw_validation_revert(struct vmw_validation_context *ctx)
{
vmw_validation_bo_backoff(ctx);
vmw_validation_res_unreserve(ctx, true);
if (ctx->res_mutex)
mutex_unlock(ctx->res_mutex);
vmw_validation_unref_lists(ctx);
}
/**
* vmw_validation_cone - Commit validation actions after command submission
* success.
* @ctx: The validation context.
* @fence: Fence with which to fence all buffer objects taking part in the
* command submission.
*
* The caller does NOT need to unref resources after a call to this function.
*/
void vmw_validation_done(struct vmw_validation_context *ctx,
struct vmw_fence_obj *fence)
{
vmw_validation_bo_fence(ctx, fence);
vmw_validation_res_unreserve(ctx, false);
if (ctx->res_mutex)
mutex_unlock(ctx->res_mutex);
vmw_validation_unref_lists(ctx);
}
/**
* vmw_validation_preload_bo - Preload the validation memory allocator for a
* call to vmw_validation_add_bo().
* @ctx: Pointer to the validation context.
*
* Iff this function returns successfully, the next call to
* vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
* but voids the guarantee.
*
* Returns: Zero if successful, %-EINVAL otherwise.
*/
int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
{
unsigned int size = sizeof(struct vmw_validation_bo_node);
if (!vmw_validation_mem_alloc(ctx, size))
return -ENOMEM;
ctx->mem_size_left += size;
return 0;
}
/**
* vmw_validation_preload_res - Preload the validation memory allocator for a
* call to vmw_validation_add_res().
* @ctx: Pointer to the validation context.
* @size: Size of the validation node extra data. See below.
*
* Iff this function returns successfully, the next call to
* vmw_validation_add_res() with the same or smaller @size is guaranteed not to
* sleep. An error is not fatal but voids the guarantee.
*
* Returns: Zero if successful, %-EINVAL otherwise.
*/
int vmw_validation_preload_res(struct vmw_validation_context *ctx,
unsigned int size)
{
size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
size) +
vmw_validation_align(sizeof(struct vmw_validation_bo_node));
if (!vmw_validation_mem_alloc(ctx, size))
return -ENOMEM;
ctx->mem_size_left += size;
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _VMWGFX_VALIDATION_H_
#define _VMWGFX_VALIDATION_H_
#include <drm/drm_hashtab.h>
#include <linux/list.h>
#include <linux/ww_mutex.h>
#include <drm/ttm/ttm_execbuf_util.h>
/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
* @resource_list: List head for resource validation metadata
* @resource_ctx_list: List head for resource validation metadata for
* resources that need to be validated before those in @resource_list
* @bo_list: List head for buffer objects
* @page_list: List of pages used by the memory allocator
* @ticket: Ticked used for ww mutex locking
* @res_mutex: Pointer to mutex used for resource reserving
* @merge_dups: Whether to merge metadata for duplicate resources or
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
*/
struct vmw_validation_context {
struct drm_open_hash *ht;
struct list_head resource_list;
struct list_head resource_ctx_list;
struct list_head bo_list;
struct list_head page_list;
struct ww_acquire_ctx ticket;
struct mutex *res_mutex;
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
};
struct vmw_buffer_object;
struct vmw_resource;
struct vmw_fence_obj;
#if 0
/**
* DECLARE_VAL_CONTEXT - Declare a validation context with initialization
* @_name: The name of the variable
* @_ht: The hash table used to find dups or NULL if none
* @_merge_dups: Whether to merge duplicate buffer object- or resource
* entries. If set to true, ideally a hash table pointer should be supplied
* as well unless the number of resources and buffer objects per validation
* is known to be very small
*/
#endif
#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups) \
struct vmw_validation_context _name = \
{ .ht = _ht, \
.resource_list = LIST_HEAD_INIT((_name).resource_list), \
.resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \
.bo_list = LIST_HEAD_INIT((_name).bo_list), \
.page_list = LIST_HEAD_INIT((_name).page_list), \
.res_mutex = NULL, \
.merge_dups = _merge_dups, \
.mem_size_left = 0, \
}
/**
* vmw_validation_has_bos - return whether the validation context has
* any buffer objects registered.
*
* @ctx: The validation context
* Returns: Whether any buffer objects are registered
*/
static inline bool
vmw_validation_has_bos(struct vmw_validation_context *ctx)
{
return !list_empty(&ctx->bo_list);
}
/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context
* @ht: Pointer to a hash table to use for duplicate finding
* This function is intended to be used if the hash table wasn't
* available at validation context declaration time
*/
static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
struct drm_open_hash *ht)
{
ctx->ht = ht;
}
/**
* vmw_validation_bo_reserve - Reserve buffer objects registered with a
* validation context
* @ctx: The validation context
* @intr: Perform waits interruptible
*
* Return: Zero on success, -ERESTARTSYS when interrupted, negative error
* code on failure
*/
static inline int
vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
NULL);
}
/**
* vmw_validation_bo_backoff - Unreserve buffer objects registered with a
* validation context
* @ctx: The validation context
*
* This function unreserves the buffer objects previously reserved using
* vmw_validation_bo_reserve. It's typically used as part of an error path
*/
static inline void
vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
{
ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
}
/**
* vmw_validation_bo_fence - Unreserve and fence buffer objects registered
* with a validation context
* @ctx: The validation context
*
* This function unreserves the buffer objects previously reserved using
* vmw_validation_bo_reserve, and fences them with a fence object.
*/
static inline void
vmw_validation_bo_fence(struct vmw_validation_context *ctx,
struct vmw_fence_obj *fence)
{
ttm_eu_fence_buffer_objects(&ctx->ticket, &ctx->bo_list,
(void *) fence);
}
/**
* vmw_validation_context_init - Initialize a validation context
* @ctx: Pointer to the validation context to initialize
*
* This function initializes a validation context with @merge_dups set
* to false
*/
static inline void
vmw_validation_context_init(struct vmw_validation_context *ctx)
{
memset(ctx, 0, sizeof(*ctx));
INIT_LIST_HEAD(&ctx->resource_list);
INIT_LIST_HEAD(&ctx->resource_ctx_list);
INIT_LIST_HEAD(&ctx->bo_list);
}
/**
* vmw_validation_align - Align a validation memory allocation
* @val: The size to be aligned
*
* Returns: @val aligned to the granularity used by the validation memory
* allocator.
*/
static inline unsigned int vmw_validation_align(unsigned int val)
{
return ALIGN(val, sizeof(long));
}
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
struct vmw_buffer_object *vbo,
bool as_mob, bool cpu_blit);
int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob);
int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr);
void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
struct vmw_resource *res,
size_t priv_size,
void **p_node,
bool *first_usage);
void vmw_validation_drop_ht(struct vmw_validation_context *ctx);
int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
bool intr);
void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
bool backoff);
void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
void *val_private,
struct vmw_buffer_object *vbo,
unsigned long backup_offset);
int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr);
int vmw_validation_prepare(struct vmw_validation_context *ctx,
struct mutex *mutex, bool intr);
void vmw_validation_revert(struct vmw_validation_context *ctx);
void vmw_validation_done(struct vmw_validation_context *ctx,
struct vmw_fence_obj *fence);
void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
unsigned int size);
int vmw_validation_preload_bo(struct vmw_validation_context *ctx);
int vmw_validation_preload_res(struct vmw_validation_context *ctx,
unsigned int size);
#endif
...@@ -312,6 +312,24 @@ ttm_bo_reference(struct ttm_buffer_object *bo) ...@@ -312,6 +312,24 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
return bo; return bo;
} }
/**
* ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
* its refcount has already reached zero.
* @bo: The buffer object.
*
* Used to reference a TTM buffer object in lookups where the object is removed
* from the lookup structure during the destructor and for RCU lookups.
*
* Returns: @bo if the referencing was successful, NULL otherwise.
*/
static inline __must_check struct ttm_buffer_object *
ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
{
if (!kref_get_unless_zero(&bo->kref))
return NULL;
return bo;
}
/** /**
* ttm_bo_wait - wait for buffer idle. * ttm_bo_wait - wait for buffer idle.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment