Commit 05efb1ab authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/ttm: ttm object security fixes for render nodes

When a client looks up a ttm object, don't look it up through the device hash
table, but rather from the file hash table. That makes sure that the client
has indeed put a reference on the object, or in gem terms, has opened
the object; either using prime or using the global "name".

To avoid a performance loss, make sure the file hash table entries can be
looked up from under an RCU lock, and as a consequence, replace the rwlock
with a spinlock, since we never need to take it in read mode only anymore.

Finally add a ttm object lookup function for the device hash table, that is
intended to be used when we put a ref object on a base object or, in  gem terms,
when we open the object.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarBrian Paul <brianp@vmware.com>
parent 859ae233
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
struct ttm_object_file { struct ttm_object_file {
struct ttm_object_device *tdev; struct ttm_object_device *tdev;
rwlock_t lock; spinlock_t lock;
struct list_head ref_list; struct list_head ref_list;
struct drm_open_hash ref_hash[TTM_REF_NUM]; struct drm_open_hash ref_hash[TTM_REF_NUM];
struct kref refcount; struct kref refcount;
...@@ -118,6 +118,7 @@ struct ttm_object_device { ...@@ -118,6 +118,7 @@ struct ttm_object_device {
*/ */
struct ttm_ref_object { struct ttm_ref_object {
struct rcu_head rcu_head;
struct drm_hash_item hash; struct drm_hash_item hash;
struct list_head head; struct list_head head;
struct kref kref; struct kref kref;
...@@ -210,10 +211,9 @@ static void ttm_release_base(struct kref *kref) ...@@ -210,10 +211,9 @@ static void ttm_release_base(struct kref *kref)
* call_rcu() or ttm_base_object_kfree(). * call_rcu() or ttm_base_object_kfree().
*/ */
if (base->refcount_release) { ttm_object_file_unref(&base->tfile);
ttm_object_file_unref(&base->tfile); if (base->refcount_release)
base->refcount_release(&base); base->refcount_release(&base);
}
} }
void ttm_base_object_unref(struct ttm_base_object **p_base) void ttm_base_object_unref(struct ttm_base_object **p_base)
...@@ -229,32 +229,46 @@ EXPORT_SYMBOL(ttm_base_object_unref); ...@@ -229,32 +229,46 @@ EXPORT_SYMBOL(ttm_base_object_unref);
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key) uint32_t key)
{ {
struct ttm_object_device *tdev = tfile->tdev; struct ttm_base_object *base = NULL;
struct ttm_base_object *uninitialized_var(base);
struct drm_hash_item *hash; struct drm_hash_item *hash;
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
int ret; int ret;
rcu_read_lock(); rcu_read_lock();
ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash); ret = drm_ht_find_item_rcu(ht, key, &hash);
if (likely(ret == 0)) { if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash); base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; if (!kref_get_unless_zero(&base->refcount))
base = NULL;
} }
rcu_read_unlock(); rcu_read_unlock();
if (unlikely(ret != 0)) return base;
return NULL; }
EXPORT_SYMBOL(ttm_base_object_lookup);
if (tfile != base->tfile && !base->shareable) { struct ttm_base_object *
pr_err("Attempted access of non-shareable object\n"); ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
ttm_base_object_unref(&base); {
return NULL; struct ttm_base_object *base = NULL;
struct drm_hash_item *hash;
struct drm_open_hash *ht = &tdev->object_hash;
int ret;
rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, key, &hash);
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
if (!kref_get_unless_zero(&base->refcount))
base = NULL;
} }
rcu_read_unlock();
return base; return base;
} }
EXPORT_SYMBOL(ttm_base_object_lookup); EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
int ttm_ref_object_add(struct ttm_object_file *tfile, int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base, struct ttm_base_object *base,
...@@ -266,21 +280,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ...@@ -266,21 +280,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
int ret = -EINVAL; int ret = -EINVAL;
if (base->tfile != tfile && !base->shareable)
return -EPERM;
if (existed != NULL) if (existed != NULL)
*existed = true; *existed = true;
while (ret == -EINVAL) { while (ret == -EINVAL) {
read_lock(&tfile->lock); rcu_read_lock();
ret = drm_ht_find_item(ht, base->hash.key, &hash); ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
if (ret == 0) { if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
kref_get(&ref->kref); if (!kref_get_unless_zero(&ref->kref)) {
read_unlock(&tfile->lock); rcu_read_unlock();
break; break;
}
} }
read_unlock(&tfile->lock); rcu_read_unlock();
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false); false, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -297,19 +315,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ...@@ -297,19 +315,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
ref->ref_type = ref_type; ref->ref_type = ref_type;
kref_init(&ref->kref); kref_init(&ref->kref);
write_lock(&tfile->lock); spin_lock(&tfile->lock);
ret = drm_ht_insert_item(ht, &ref->hash); ret = drm_ht_insert_item_rcu(ht, &ref->hash);
if (likely(ret == 0)) { if (likely(ret == 0)) {
list_add_tail(&ref->head, &tfile->ref_list); list_add_tail(&ref->head, &tfile->ref_list);
kref_get(&base->refcount); kref_get(&base->refcount);
write_unlock(&tfile->lock); spin_unlock(&tfile->lock);
if (existed != NULL) if (existed != NULL)
*existed = false; *existed = false;
break; break;
} }
write_unlock(&tfile->lock); spin_unlock(&tfile->lock);
BUG_ON(ret != -EINVAL); BUG_ON(ret != -EINVAL);
ttm_mem_global_free(mem_glob, sizeof(*ref)); ttm_mem_global_free(mem_glob, sizeof(*ref));
...@@ -330,17 +348,17 @@ static void ttm_ref_object_release(struct kref *kref) ...@@ -330,17 +348,17 @@ static void ttm_ref_object_release(struct kref *kref)
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
ht = &tfile->ref_hash[ref->ref_type]; ht = &tfile->ref_hash[ref->ref_type];
(void)drm_ht_remove_item(ht, &ref->hash); (void)drm_ht_remove_item_rcu(ht, &ref->hash);
list_del(&ref->head); list_del(&ref->head);
write_unlock(&tfile->lock); spin_unlock(&tfile->lock);
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
base->ref_obj_release(base, ref->ref_type); base->ref_obj_release(base, ref->ref_type);
ttm_base_object_unref(&ref->obj); ttm_base_object_unref(&ref->obj);
ttm_mem_global_free(mem_glob, sizeof(*ref)); ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree(ref); kfree_rcu(ref, rcu_head);
write_lock(&tfile->lock); spin_lock(&tfile->lock);
} }
int ttm_ref_object_base_unref(struct ttm_object_file *tfile, int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
...@@ -351,15 +369,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile, ...@@ -351,15 +369,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
struct drm_hash_item *hash; struct drm_hash_item *hash;
int ret; int ret;
write_lock(&tfile->lock); spin_lock(&tfile->lock);
ret = drm_ht_find_item(ht, key, &hash); ret = drm_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
write_unlock(&tfile->lock); spin_unlock(&tfile->lock);
return -EINVAL; return -EINVAL;
} }
ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
kref_put(&ref->kref, ttm_ref_object_release); kref_put(&ref->kref, ttm_ref_object_release);
write_unlock(&tfile->lock); spin_unlock(&tfile->lock);
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_ref_object_base_unref); EXPORT_SYMBOL(ttm_ref_object_base_unref);
...@@ -372,7 +390,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) ...@@ -372,7 +390,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
struct ttm_object_file *tfile = *p_tfile; struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL; *p_tfile = NULL;
write_lock(&tfile->lock); spin_lock(&tfile->lock);
/* /*
* Since we release the lock within the loop, we have to * Since we release the lock within the loop, we have to
...@@ -388,7 +406,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) ...@@ -388,7 +406,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
for (i = 0; i < TTM_REF_NUM; ++i) for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]); drm_ht_remove(&tfile->ref_hash[i]);
write_unlock(&tfile->lock); spin_unlock(&tfile->lock);
ttm_object_file_unref(&tfile); ttm_object_file_unref(&tfile);
} }
EXPORT_SYMBOL(ttm_object_file_release); EXPORT_SYMBOL(ttm_object_file_release);
...@@ -404,7 +422,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, ...@@ -404,7 +422,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
if (unlikely(tfile == NULL)) if (unlikely(tfile == NULL))
return NULL; return NULL;
rwlock_init(&tfile->lock); spin_lock_init(&tfile->lock);
tfile->tdev = tdev; tfile->tdev = tdev;
kref_init(&tfile->refcount); kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list); INIT_LIST_HEAD(&tfile->ref_list);
......
...@@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, ...@@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
*/ */
if (arg->handle) { if (arg->handle) {
struct ttm_base_object *base = struct ttm_base_object *base =
ttm_base_object_lookup(vmw_fp->tfile, arg->handle); ttm_base_object_lookup_for_ref(dev_priv->tdev,
arg->handle);
if (unlikely(base == NULL)) { if (unlikely(base == NULL)) {
DRM_ERROR("Fence event invalid fence object handle " DRM_ERROR("Fence event invalid fence object handle "
......
...@@ -843,6 +843,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -843,6 +843,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_surface_reference_arg *arg = union drm_vmw_surface_reference_arg *arg =
(union drm_vmw_surface_reference_arg *)data; (union drm_vmw_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req; struct drm_vmw_surface_arg *req = &arg->req;
...@@ -854,7 +855,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, ...@@ -854,7 +855,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct ttm_base_object *base; struct ttm_base_object *base;
int ret = -EINVAL; int ret = -EINVAL;
base = ttm_base_object_lookup(tfile, req->sid); base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
if (unlikely(base == NULL)) { if (unlikely(base == NULL)) {
DRM_ERROR("Could not find surface to reference.\n"); DRM_ERROR("Could not find surface to reference.\n");
return -EINVAL; return -EINVAL;
......
...@@ -190,13 +190,25 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile, ...@@ -190,13 +190,25 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
* @key: Hash key * @key: Hash key
* *
* Looks up a struct ttm_base_object with the key @key. * Looks up a struct ttm_base_object with the key @key.
* Also verifies that the object is visible to the application, by
* comparing the @tfile argument and checking the object shareable flag.
*/ */
extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
*tfile, uint32_t key); *tfile, uint32_t key);
/**
* ttm_base_object_lookup_for_ref
*
* @tdev: Pointer to a struct ttm_object_device.
* @key: Hash key
*
* Looks up a struct ttm_base_object with the key @key.
* This function should only be used when the struct tfile associated with the
* caller doesn't yet have a reference to the base object.
*/
extern struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
/** /**
* ttm_base_object_unref * ttm_base_object_unref
* *
...@@ -218,6 +230,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); ...@@ -218,6 +230,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
* @existed: Upon completion, indicates that an identical reference object * @existed: Upon completion, indicates that an identical reference object
* already existed, and the refcount was upped on that object instead. * already existed, and the refcount was upped on that object instead.
* *
* Checks that the base object is shareable and adds a ref object to it.
*
* Adding a ref object to a base object is basically like referencing the * Adding a ref object to a base object is basically like referencing the
* base object, but a user-space application holds the reference. When the * base object, but a user-space application holds the reference. When the
* file corresponding to @tfile is closed, all its reference objects are * file corresponding to @tfile is closed, all its reference objects are
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment