Commit 8aadeb8a authored by Zack Rusin's avatar Zack Rusin

drm/vmwgfx: Remove the dedicated memory accounting

vmwgfx shared very elaborate memory accounting with ttm. It was moved
from ttm to vmwgfx in change
f07069da ("drm/ttm: move memory accounting into vmwgfx v4")
but because of complexity it was hard to maintain. Some parts of the code
weren't freeing memory correctly and  some were missing accounting all
together. While those would be fairly easy to fix the fundamental reason
for memory accounting in the driver was the ability to invoke shrinker
which is part of TTM code as well (with support for unified memory
hopefully coming soon).

That meant that vmwgfx had a lot of code that was either unused or
duplicating code from TTM. Removing this code also prevents excessive
calls to global swapout which were common during memory pressure
because both vmwgfx and TTM would invoke the shrinker when memory
usage reached half of RAM.

Fixes: f07069da ("drm/ttm: move memory accounting into vmwgfx v4")
Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Reviewed-by: default avatarMartin Krastev <krastevm@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211206172620.3139754-2-zack@kde.org
parent 21a6732f
...@@ -9,7 +9,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_d ...@@ -9,7 +9,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_d
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o ttm_memory.o vmwgfx_system_manager.o vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o
vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
......
This diff is collapsed.
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef TTM_MEMORY_H
#define TTM_MEMORY_H
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/bug.h>
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/kobject.h>
#include <linux/mm.h>
#include <drm/ttm/ttm_bo_api.h>
/**
* struct ttm_mem_global - Global memory accounting structure.
*
* @shrink: A single callback to shrink TTM memory usage. Extend this
* to a linked list to be able to handle multiple callbacks when needed.
* @swap_queue: A workqueue to handle shrinking in low memory situations. We
* need a separate workqueue since it will spend a lot of time waiting
* for the GPU, and this will otherwise block other workqueue tasks(?)
* At this point we use only a single-threaded workqueue.
* @work: The workqueue callback for the shrink queue.
* @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions.
* @zones: Array of pointers to accounting zones.
* @num_zones: Number of populated entries in the @zones array.
* @zone_kernel: Pointer to the kernel zone.
* @zone_highmem: Pointer to the highmem zone if there is one.
* @zone_dma32: Pointer to the dma32 zone if there is one.
*
* Note that this structure is not per device. It should be global for all
* graphics devices.
*/
#define TTM_MEM_MAX_ZONES 2
struct ttm_mem_zone;
extern struct ttm_mem_global {
struct kobject kobj;
struct workqueue_struct *swap_queue;
struct work_struct work;
spinlock_t lock;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones;
struct ttm_mem_zone *zone_kernel;
#ifdef CONFIG_HIGHMEM
struct ttm_mem_zone *zone_highmem;
#else
struct ttm_mem_zone *zone_dma32;
#endif
} ttm_mem_glob;
int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev);
void ttm_mem_global_release(struct ttm_mem_global *glob);
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
struct ttm_operation_ctx *ctx);
void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size,
struct ttm_operation_ctx *ctx);
void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size);
size_t ttm_round_pot(size_t size);
#endif
...@@ -93,10 +93,8 @@ struct ttm_object_device { ...@@ -93,10 +93,8 @@ struct ttm_object_device {
spinlock_t object_lock; spinlock_t object_lock;
struct vmwgfx_open_hash object_hash; struct vmwgfx_open_hash object_hash;
atomic_t object_count; atomic_t object_count;
struct ttm_mem_global *mem_glob;
struct dma_buf_ops ops; struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf); void (*dmabuf_release)(struct dma_buf *dma_buf);
size_t dma_buf_size;
struct idr idr; struct idr idr;
}; };
...@@ -352,11 +350,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ...@@ -352,11 +350,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type]; struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref; struct ttm_ref_object *ref;
struct vmwgfx_hash_item *hash; struct vmwgfx_hash_item *hash;
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
int ret = -EINVAL; int ret = -EINVAL;
if (base->tfile != tfile && !base->shareable) if (base->tfile != tfile && !base->shareable)
...@@ -381,13 +374,8 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ...@@ -381,13 +374,8 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (require_existed) if (require_existed)
return -EPERM; return -EPERM;
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
&ctx);
if (unlikely(ret != 0))
return ret;
ref = kmalloc(sizeof(*ref), GFP_KERNEL); ref = kmalloc(sizeof(*ref), GFP_KERNEL);
if (unlikely(ref == NULL)) { if (unlikely(ref == NULL)) {
ttm_mem_global_free(mem_glob, sizeof(*ref));
return -ENOMEM; return -ENOMEM;
} }
...@@ -412,7 +400,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ...@@ -412,7 +400,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
spin_unlock(&tfile->lock); spin_unlock(&tfile->lock);
BUG_ON(ret != -EINVAL); BUG_ON(ret != -EINVAL);
ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree(ref); kfree(ref);
} }
...@@ -427,7 +414,6 @@ ttm_ref_object_release(struct kref *kref) ...@@ -427,7 +414,6 @@ ttm_ref_object_release(struct kref *kref)
struct ttm_base_object *base = ref->obj; struct ttm_base_object *base = ref->obj;
struct ttm_object_file *tfile = ref->tfile; struct ttm_object_file *tfile = ref->tfile;
struct vmwgfx_open_hash *ht; struct vmwgfx_open_hash *ht;
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
ht = &tfile->ref_hash[ref->ref_type]; ht = &tfile->ref_hash[ref->ref_type];
(void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash); (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
...@@ -438,7 +424,6 @@ ttm_ref_object_release(struct kref *kref) ...@@ -438,7 +424,6 @@ ttm_ref_object_release(struct kref *kref)
base->ref_obj_release(base, ref->ref_type); base->ref_obj_release(base, ref->ref_type);
ttm_base_object_unref(&ref->obj); ttm_base_object_unref(&ref->obj);
ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree_rcu(ref, rcu_head); kfree_rcu(ref, rcu_head);
spin_lock(&tfile->lock); spin_lock(&tfile->lock);
} }
...@@ -526,8 +511,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, ...@@ -526,8 +511,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
} }
struct ttm_object_device * struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global *mem_glob, ttm_object_device_init(unsigned int hash_order,
unsigned int hash_order,
const struct dma_buf_ops *ops) const struct dma_buf_ops *ops)
{ {
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
...@@ -536,7 +520,6 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob, ...@@ -536,7 +520,6 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
if (unlikely(tdev == NULL)) if (unlikely(tdev == NULL))
return NULL; return NULL;
tdev->mem_glob = mem_glob;
spin_lock_init(&tdev->object_lock); spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0); atomic_set(&tdev->object_count, 0);
ret = vmwgfx_ht_create(&tdev->object_hash, hash_order); ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
...@@ -547,8 +530,6 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob, ...@@ -547,8 +530,6 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
tdev->ops = *ops; tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release; tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release; tdev->ops.release = ttm_prime_dmabuf_release;
tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
ttm_round_pot(sizeof(struct file));
return tdev; return tdev;
out_no_object_hash: out_no_object_hash:
...@@ -633,7 +614,6 @@ static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) ...@@ -633,7 +614,6 @@ static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
if (prime->dma_buf == dma_buf) if (prime->dma_buf == dma_buf)
prime->dma_buf = NULL; prime->dma_buf = NULL;
mutex_unlock(&prime->mutex); mutex_unlock(&prime->mutex);
ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
ttm_base_object_unref(&base); ttm_base_object_unref(&base);
} }
...@@ -715,30 +695,18 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, ...@@ -715,30 +695,18 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
dma_buf = prime->dma_buf; dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) { if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info); DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false
};
exp_info.ops = &tdev->ops; exp_info.ops = &tdev->ops;
exp_info.size = prime->size; exp_info.size = prime->size;
exp_info.flags = flags; exp_info.flags = flags;
exp_info.priv = prime; exp_info.priv = prime;
/* /*
* Need to create a new dma_buf, with memory accounting. * Need to create a new dma_buf
*/ */
ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
&ctx);
if (unlikely(ret != 0)) {
mutex_unlock(&prime->mutex);
goto out_unref;
}
dma_buf = dma_buf_export(&exp_info); dma_buf = dma_buf_export(&exp_info);
if (IS_ERR(dma_buf)) { if (IS_ERR(dma_buf)) {
ret = PTR_ERR(dma_buf); ret = PTR_ERR(dma_buf);
ttm_mem_global_free(tdev->mem_glob,
tdev->dma_buf_size);
mutex_unlock(&prime->mutex); mutex_unlock(&prime->mutex);
goto out_unref; goto out_unref;
} }
......
...@@ -42,7 +42,6 @@ ...@@ -42,7 +42,6 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include "ttm_memory.h"
#include "vmwgfx_hashtab.h" #include "vmwgfx_hashtab.h"
/** /**
...@@ -296,7 +295,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile); ...@@ -296,7 +295,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
/** /**
* ttm_object device init - initialize a struct ttm_object_device * ttm_object device init - initialize a struct ttm_object_device
* *
* @mem_glob: struct ttm_mem_global for memory accounting.
* @hash_order: Order of hash table used to hash the base objects. * @hash_order: Order of hash table used to hash the base objects.
* @ops: DMA buf ops for prime objects of this device. * @ops: DMA buf ops for prime objects of this device.
* *
...@@ -305,8 +303,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile); ...@@ -305,8 +303,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
*/ */
extern struct ttm_object_device * extern struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global *mem_glob, ttm_object_device_init(unsigned int hash_order,
unsigned int hash_order,
const struct dma_buf_ops *ops); const struct dma_buf_ops *ops);
/** /**
...@@ -352,13 +349,6 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, ...@@ -352,13 +349,6 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
#define ttm_prime_object_kfree(__obj, __prime) \ #define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead) kfree_rcu(__obj, __prime.base.rhead)
/*
* Extra memory required by the base object's idr storage, which is allocated
* separately from the base object itself. We estimate an on-average 128 bytes
* per idr.
*/
#define TTM_OBJ_EXTRA_SIZE 128
struct ttm_base_object * struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key); ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
......
...@@ -1327,8 +1327,7 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind) ...@@ -1327,8 +1327,7 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
} }
/** /**
* vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state.
* memory accounting.
* *
* @dev_priv: Pointer to a device private structure. * @dev_priv: Pointer to a device private structure.
* *
...@@ -1338,20 +1337,9 @@ struct vmw_ctx_binding_state * ...@@ -1338,20 +1337,9 @@ struct vmw_ctx_binding_state *
vmw_binding_state_alloc(struct vmw_private *dev_priv) vmw_binding_state_alloc(struct vmw_private *dev_priv)
{ {
struct vmw_ctx_binding_state *cbs; struct vmw_ctx_binding_state *cbs;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
int ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
&ctx);
if (ret)
return ERR_PTR(ret);
cbs = vzalloc(sizeof(*cbs)); cbs = vzalloc(sizeof(*cbs));
if (!cbs) { if (!cbs) {
ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1362,17 +1350,13 @@ vmw_binding_state_alloc(struct vmw_private *dev_priv) ...@@ -1362,17 +1350,13 @@ vmw_binding_state_alloc(struct vmw_private *dev_priv)
} }
/** /**
* vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its * vmw_binding_state_free - Free a struct vmw_ctx_binding_state.
* memory accounting info.
* *
* @cbs: Pointer to the struct vmw_ctx_binding_state to be freed. * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
*/ */
void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs) void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
{ {
struct vmw_private *dev_priv = cbs->dev_priv;
vfree(cbs); vfree(cbs);
ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
} }
/** /**
......
...@@ -391,39 +391,6 @@ void vmw_bo_unmap(struct vmw_buffer_object *vbo) ...@@ -391,39 +391,6 @@ void vmw_bo_unmap(struct vmw_buffer_object *vbo)
} }
/**
* vmw_bo_acc_size - Calculate the pinned memory usage of buffers
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @size: The requested buffer size.
* @user: Whether this is an ordinary dma buffer or a user dma buffer.
*/
static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
bool user)
{
static size_t struct_size, user_struct_size;
size_t num_pages = PFN_UP(size);
size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
if (unlikely(struct_size == 0)) {
size_t backend_size = ttm_round_pot(vmw_tt_size);
struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_buffer_object));
user_struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
TTM_OBJ_EXTRA_SIZE;
}
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
page_array_size +=
ttm_round_pot(num_pages * sizeof(dma_addr_t));
return ((user) ? user_struct_size : struct_size) +
page_array_size;
}
/** /**
* vmw_bo_bo_free - vmw buffer object destructor * vmw_bo_bo_free - vmw buffer object destructor
* *
...@@ -471,24 +438,17 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, ...@@ -471,24 +438,17 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
struct ttm_placement *placement, struct ttm_placement *placement,
struct ttm_buffer_object **p_bo) struct ttm_buffer_object **p_bo)
{ {
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
size_t acc_size;
int ret; int ret;
bo = kzalloc(sizeof(*bo), GFP_KERNEL); bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (unlikely(!bo)) if (unlikely(!bo))
return -ENOMEM; return -ENOMEM;
acc_size = ttm_round_pot(sizeof(*bo));
acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
acc_size += ttm_round_pot(sizeof(struct ttm_tt));
ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
if (unlikely(ret))
goto error_free;
bo->base.size = size; bo->base.size = size;
dma_resv_init(&bo->base._resv); dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node); drm_vma_node_reset(&bo->base.vma_node);
...@@ -497,7 +457,7 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, ...@@ -497,7 +457,7 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
ttm_bo_type_kernel, placement, 0, ttm_bo_type_kernel, placement, 0,
&ctx, NULL, NULL, NULL); &ctx, NULL, NULL, NULL);
if (unlikely(ret)) if (unlikely(ret))
goto error_account; goto error_free;
ttm_bo_pin(bo); ttm_bo_pin(bo);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
...@@ -505,9 +465,6 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, ...@@ -505,9 +465,6 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
return 0; return 0;
error_account:
ttm_mem_global_free(&ttm_mem_glob, acc_size);
error_free: error_free:
kfree(bo); kfree(bo);
return ret; return ret;
...@@ -533,23 +490,20 @@ int vmw_bo_init(struct vmw_private *dev_priv, ...@@ -533,23 +490,20 @@ int vmw_bo_init(struct vmw_private *dev_priv,
bool interruptible, bool pin, bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo)) void (*bo_free)(struct ttm_buffer_object *bo))
{ {
struct ttm_operation_ctx ctx = { interruptible, false }; struct ttm_operation_ctx ctx = {
.interruptible = interruptible,
.no_wait_gpu = false
};
struct ttm_device *bdev = &dev_priv->bdev; struct ttm_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret; int ret;
bool user = (bo_free == &vmw_user_bo_destroy); bool user = (bo_free == &vmw_user_bo_destroy);
WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free))); WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
acc_size = vmw_bo_acc_size(dev_priv, size, user);
memset(vmw_bo, 0, sizeof(*vmw_bo)); memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->base.priority = 3; vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT; vmw_bo->res_tree = RB_ROOT;
ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
if (unlikely(ret))
return ret;
vmw_bo->base.base.size = size; vmw_bo->base.base.size = size;
dma_resv_init(&vmw_bo->base.base._resv); dma_resv_init(&vmw_bo->base.base._resv);
...@@ -559,7 +513,6 @@ int vmw_bo_init(struct vmw_private *dev_priv, ...@@ -559,7 +513,6 @@ int vmw_bo_init(struct vmw_private *dev_priv,
ttm_bo_type_device, placement, ttm_bo_type_device, placement,
0, &ctx, NULL, NULL, bo_free); 0, &ctx, NULL, NULL, bo_free);
if (unlikely(ret)) { if (unlikely(ret)) {
ttm_mem_global_free(&ttm_mem_glob, acc_size);
return ret; return ret;
} }
......
...@@ -324,22 +324,3 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) ...@@ -324,22 +324,3 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
kfree(man); kfree(man);
} }
/**
* vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
* resource manager
*
* Returns the approximate allocation size of a command buffer managed
* resource manager.
*/
size_t vmw_cmdbuf_res_man_size(void)
{
static size_t res_man_size;
if (unlikely(res_man_size == 0))
res_man_size =
ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
ttm_round_pot(sizeof(struct hlist_head) <<
VMW_CMDBUF_RES_MAN_HT_ORDER);
return res_man_size;
}
...@@ -60,8 +60,6 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, ...@@ -60,8 +60,6 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf); struct ttm_validate_buffer *val_buf);
static int vmw_dx_context_destroy(struct vmw_resource *res); static int vmw_dx_context_destroy(struct vmw_resource *res);
static uint64_t vmw_user_context_size;
static const struct vmw_user_resource_conv user_context_conv = { static const struct vmw_user_resource_conv user_context_conv = {
.object_type = VMW_RES_CONTEXT, .object_type = VMW_RES_CONTEXT,
.base_obj_to_res = vmw_user_context_base_to_res, .base_obj_to_res = vmw_user_context_base_to_res,
...@@ -686,7 +684,6 @@ static void vmw_user_context_free(struct vmw_resource *res) ...@@ -686,7 +684,6 @@ static void vmw_user_context_free(struct vmw_resource *res)
{ {
struct vmw_user_context *ctx = struct vmw_user_context *ctx =
container_of(res, struct vmw_user_context, res); container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
if (ctx->cbs) if (ctx->cbs)
vmw_binding_state_free(ctx->cbs); vmw_binding_state_free(ctx->cbs);
...@@ -694,8 +691,6 @@ static void vmw_user_context_free(struct vmw_resource *res) ...@@ -694,8 +691,6 @@ static void vmw_user_context_free(struct vmw_resource *res)
(void) vmw_context_bind_dx_query(res, NULL); (void) vmw_context_bind_dx_query(res, NULL);
ttm_base_object_kfree(ctx, base); ttm_base_object_kfree(ctx, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
} }
/* /*
...@@ -732,10 +727,6 @@ static int vmw_context_define(struct drm_device *dev, void *data, ...@@ -732,10 +727,6 @@ static int vmw_context_define(struct drm_device *dev, void *data,
struct vmw_resource *tmp; struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_operation_ctx ttm_opt_ctx = {
.interruptible = true,
.no_wait_gpu = false
};
int ret; int ret;
if (!has_sm4_context(dev_priv) && dx) { if (!has_sm4_context(dev_priv) && dx) {
...@@ -743,25 +734,8 @@ static int vmw_context_define(struct drm_device *dev, void *data, ...@@ -743,25 +734,8 @@ static int vmw_context_define(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
if (unlikely(vmw_user_context_size == 0))
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_context_size,
&ttm_opt_ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for context"
" creation.\n");
goto out_ret;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (unlikely(!ctx)) { if (unlikely(!ctx)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
ret = -ENOMEM; ret = -ENOMEM;
goto out_ret; goto out_ret;
} }
......
...@@ -546,8 +546,6 @@ static void vmw_hw_cotable_destroy(struct vmw_resource *res) ...@@ -546,8 +546,6 @@ static void vmw_hw_cotable_destroy(struct vmw_resource *res)
(void) vmw_cotable_destroy(res); (void) vmw_cotable_destroy(res);
} }
static size_t cotable_acc_size;
/** /**
* vmw_cotable_free - Cotable resource destructor * vmw_cotable_free - Cotable resource destructor
* *
...@@ -555,10 +553,7 @@ static size_t cotable_acc_size; ...@@ -555,10 +553,7 @@ static size_t cotable_acc_size;
*/ */
static void vmw_cotable_free(struct vmw_resource *res) static void vmw_cotable_free(struct vmw_resource *res)
{ {
struct vmw_private *dev_priv = res->dev_priv;
kfree(res); kfree(res);
ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
} }
/** /**
...@@ -574,21 +569,9 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, ...@@ -574,21 +569,9 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
u32 type) u32 type)
{ {
struct vmw_cotable *vcotbl; struct vmw_cotable *vcotbl;
struct ttm_operation_ctx ttm_opt_ctx = {
.interruptible = true,
.no_wait_gpu = false
};
int ret; int ret;
u32 num_entries; u32 num_entries;
if (unlikely(cotable_acc_size == 0))
cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
cotable_acc_size, &ttm_opt_ctx);
if (unlikely(ret))
return ERR_PTR(ret);
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
if (unlikely(!vcotbl)) { if (unlikely(!vcotbl)) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -622,7 +605,6 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, ...@@ -622,7 +605,6 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
out_no_init: out_no_init:
kfree(vcotbl); kfree(vcotbl);
out_no_alloc: out_no_alloc:
ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
......
...@@ -50,9 +50,6 @@ ...@@ -50,9 +50,6 @@
#define VMW_MIN_INITIAL_WIDTH 800 #define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600 #define VMW_MIN_INITIAL_HEIGHT 600
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
/* /*
* Fully encoded drm commands. Might move to vmw_drm.h * Fully encoded drm commands. Might move to vmw_drm.h
*/ */
...@@ -986,8 +983,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) ...@@ -986,8 +983,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
goto out_err0; goto out_err0;
} }
dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops);
&vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) { if (unlikely(dev_priv->tdev == NULL)) {
drm_err(&dev_priv->drm, drm_err(&dev_priv->drm,
...@@ -1083,8 +1079,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) ...@@ -1083,8 +1079,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->sm_type = VMW_SM_4; dev_priv->sm_type = VMW_SM_4;
} }
vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */ /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
if (has_sm4_context(dev_priv) && if (has_sm4_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX2)) { (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
...@@ -1397,7 +1391,6 @@ static void vmw_remove(struct pci_dev *pdev) ...@@ -1397,7 +1391,6 @@ static void vmw_remove(struct pci_dev *pdev)
{ {
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
ttm_mem_global_release(&ttm_mem_glob);
drm_dev_unregister(dev); drm_dev_unregister(dev);
vmw_driver_unload(dev); vmw_driver_unload(dev);
} }
...@@ -1641,13 +1634,9 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1641,13 +1634,9 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, &vmw->drm); pci_set_drvdata(pdev, &vmw->drm);
ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
if (ret)
goto out_error;
ret = vmw_driver_load(vmw, ent->device); ret = vmw_driver_load(vmw, ent->device);
if (ret) if (ret)
goto out_release; goto out_error;
ret = drm_dev_register(&vmw->drm, 0); ret = drm_dev_register(&vmw->drm, 0);
if (ret) if (ret)
...@@ -1656,8 +1645,6 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1656,8 +1645,6 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
out_unload: out_unload:
vmw_driver_unload(&vmw->drm); vmw_driver_unload(&vmw->drm);
out_release:
ttm_mem_global_release(&ttm_mem_glob);
out_error: out_error:
return ret; return ret;
} }
......
...@@ -628,9 +628,6 @@ struct vmw_private { ...@@ -628,9 +628,6 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman; struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
/* Validation memory reservation */
struct vmw_validation_mem vvm;
uint32 *devcaps; uint32 *devcaps;
/* /*
...@@ -1028,9 +1025,6 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) ...@@ -1028,9 +1025,6 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
size_t gran);
/** /**
* TTM buffer object driver - vmwgfx_ttm_buffer.c * TTM buffer object driver - vmwgfx_ttm_buffer.c
*/ */
...@@ -1328,18 +1322,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1328,18 +1322,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
SVGA3dSurfaceAllFlags svga3d_flags,
SVGA3dSurfaceFormat format,
bool for_scanout,
uint32_t num_mip_levels,
uint32_t multisample_count,
uint32_t array_size,
struct drm_vmw_size size,
SVGA3dMSPattern multisample_pattern,
SVGA3dMSQualityLevel quality_level,
struct vmw_surface **srf_out);
extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
void *data, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
...@@ -1348,7 +1330,6 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, ...@@ -1348,7 +1330,6 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
struct drm_file *file_priv); struct drm_file *file_priv);
int vmw_gb_surface_define(struct vmw_private *dev_priv, int vmw_gb_surface_define(struct vmw_private *dev_priv,
uint32_t user_accounting_size,
const struct vmw_surface_metadata *req, const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out); struct vmw_surface **srf_out);
...@@ -1409,7 +1390,6 @@ void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, ...@@ -1409,7 +1390,6 @@ void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
extern struct vmw_cmdbuf_res_manager * extern struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
extern size_t vmw_cmdbuf_res_man_size(void);
extern struct vmw_resource * extern struct vmw_resource *
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type, enum vmw_cmdbuf_res_type res_type,
...@@ -1606,11 +1586,6 @@ vmw_bo_reference(struct vmw_buffer_object *buf) ...@@ -1606,11 +1586,6 @@ vmw_bo_reference(struct vmw_buffer_object *buf)
return buf; return buf;
} }
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
{
return &ttm_mem_glob;
}
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
{ {
atomic_inc(&dev_priv->num_fifo_resources); atomic_inc(&dev_priv->num_fifo_resources);
......
...@@ -4054,8 +4054,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4054,8 +4054,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL; struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC); out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) { if (out_fence_fd < 0) {
......
...@@ -37,9 +37,6 @@ struct vmw_fence_manager { ...@@ -37,9 +37,6 @@ struct vmw_fence_manager {
spinlock_t lock; spinlock_t lock;
struct list_head fence_list; struct list_head fence_list;
struct work_struct work; struct work_struct work;
u32 user_fence_size;
u32 fence_size;
u32 event_fence_action_size;
bool fifo_down; bool fifo_down;
struct list_head cleanup_list; struct list_head cleanup_list;
uint32_t pending_actions[VMW_ACTION_MAX]; uint32_t pending_actions[VMW_ACTION_MAX];
...@@ -304,11 +301,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) ...@@ -304,11 +301,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->cleanup_list); INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func); INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true; fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
TTM_OBJ_EXTRA_SIZE;
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex); mutex_init(&fman->goal_irq_mutex);
fman->ctx = dma_fence_context_alloc(1); fman->ctx = dma_fence_context_alloc(1);
...@@ -560,14 +552,8 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) ...@@ -560,14 +552,8 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
{ {
struct vmw_user_fence *ufence = struct vmw_user_fence *ufence =
container_of(fence, struct vmw_user_fence, fence); container_of(fence, struct vmw_user_fence, fence);
struct vmw_fence_manager *fman = fman_from_fence(fence);
ttm_base_object_kfree(ufence, base); ttm_base_object_kfree(ufence, base);
/*
* Free kernel space accounting.
*/
ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
fman->user_fence_size);
} }
static void vmw_user_fence_base_release(struct ttm_base_object **p_base) static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
...@@ -590,23 +576,8 @@ int vmw_user_fence_create(struct drm_file *file_priv, ...@@ -590,23 +576,8 @@ int vmw_user_fence_create(struct drm_file *file_priv,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_user_fence *ufence; struct vmw_user_fence *ufence;
struct vmw_fence_obj *tmp; struct vmw_fence_obj *tmp;
struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
int ret; int ret;
/*
* Kernel memory space accounting, since this object may
* be created by a user-space request.
*/
ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
&ctx);
if (unlikely(ret != 0))
return ret;
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
if (unlikely(!ufence)) { if (unlikely(!ufence)) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -646,7 +617,6 @@ int vmw_user_fence_create(struct drm_file *file_priv, ...@@ -646,7 +617,6 @@ int vmw_user_fence_create(struct drm_file *file_priv,
tmp = &ufence->fence; tmp = &ufence->fence;
vmw_fence_obj_unreference(&tmp); vmw_fence_obj_unreference(&tmp);
out_no_object: out_no_object:
ttm_mem_global_free(mem_glob, fman->user_fence_size);
return ret; return ret;
} }
......
...@@ -1188,7 +1188,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev, ...@@ -1188,7 +1188,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
metadata.base_size.depth = 1; metadata.base_size.depth = 1;
metadata.scanout = true; metadata.scanout = true;
ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out); ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
if (ret) { if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n"); DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret; return ret;
......
...@@ -413,10 +413,9 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages) ...@@ -413,10 +413,9 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
* @mob: Pointer to the mob the pagetable of which we want to * @mob: Pointer to the mob the pagetable of which we want to
* populate. * populate.
* *
* This function allocates memory to be used for the pagetable, and * This function allocates memory to be used for the pagetable.
* adjusts TTM memory accounting accordingly. Returns ENOMEM if * Returns ENOMEM if memory resources aren't sufficient and may
* memory resources aren't sufficient and may cause TTM buffer objects * cause TTM buffer objects to be swapped out.
* to be swapped out by using the TTM memory accounting function.
*/ */
static int vmw_mob_pt_populate(struct vmw_private *dev_priv, static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
struct vmw_mob *mob) struct vmw_mob *mob)
......
...@@ -57,7 +57,6 @@ enum vmw_bo_dirty_method { ...@@ -57,7 +57,6 @@ enum vmw_bo_dirty_method {
* @ref_count: Reference count for this structure * @ref_count: Reference count for this structure
* @bitmap_size: The size of the bitmap in bits. Typically equal to the * @bitmap_size: The size of the bitmap in bits. Typically equal to the
* nuber of pages in the bo. * nuber of pages in the bo.
* @size: The accounting size for this struct.
* @bitmap: A bitmap where each bit represents a page. A set bit means a * @bitmap: A bitmap where each bit represents a page. A set bit means a
* dirty page. * dirty page.
*/ */
...@@ -68,7 +67,6 @@ struct vmw_bo_dirty { ...@@ -68,7 +67,6 @@ struct vmw_bo_dirty {
unsigned int change_count; unsigned int change_count;
unsigned int ref_count; unsigned int ref_count;
unsigned long bitmap_size; unsigned long bitmap_size;
size_t size;
unsigned long bitmap[]; unsigned long bitmap[];
}; };
...@@ -233,12 +231,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) ...@@ -233,12 +231,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{ {
struct vmw_bo_dirty *dirty = vbo->dirty; struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t num_pages = vbo->base.resource->num_pages; pgoff_t num_pages = vbo->base.resource->num_pages;
size_t size, acc_size; size_t size;
int ret; int ret;
static struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
if (dirty) { if (dirty) {
dirty->ref_count++; dirty->ref_count++;
...@@ -246,20 +240,12 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) ...@@ -246,20 +240,12 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
} }
size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
acc_size = ttm_round_pot(size);
ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
if (ret) {
VMW_DEBUG_USER("Out of graphics memory for buffer object "
"dirty tracker.\n");
return ret;
}
dirty = kvzalloc(size, GFP_KERNEL); dirty = kvzalloc(size, GFP_KERNEL);
if (!dirty) { if (!dirty) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_no_dirty; goto out_no_dirty;
} }
dirty->size = acc_size;
dirty->bitmap_size = num_pages; dirty->bitmap_size = num_pages;
dirty->start = dirty->bitmap_size; dirty->start = dirty->bitmap_size;
dirty->end = 0; dirty->end = 0;
...@@ -285,7 +271,6 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) ...@@ -285,7 +271,6 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
return 0; return 0;
out_no_dirty: out_no_dirty:
ttm_mem_global_free(&ttm_mem_glob, acc_size);
return ret; return ret;
} }
...@@ -304,10 +289,7 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo) ...@@ -304,10 +289,7 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
struct vmw_bo_dirty *dirty = vbo->dirty; struct vmw_bo_dirty *dirty = vbo->dirty;
if (dirty && --dirty->ref_count == 0) { if (dirty && --dirty->ref_count == 0) {
size_t acc_size = dirty->size;
kvfree(dirty); kvfree(dirty);
ttm_mem_global_free(&ttm_mem_glob, acc_size);
vbo->dirty = NULL; vbo->dirty = NULL;
} }
} }
......
...@@ -53,10 +53,6 @@ struct vmw_dx_shader { ...@@ -53,10 +53,6 @@ struct vmw_dx_shader {
struct list_head cotable_head; struct list_head cotable_head;
}; };
static uint64_t vmw_user_shader_size;
static uint64_t vmw_shader_size;
static size_t vmw_shader_dx_size;
static void vmw_user_shader_free(struct vmw_resource *res); static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource * static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base); vmw_user_shader_base_to_res(struct ttm_base_object *base);
...@@ -79,7 +75,6 @@ static void vmw_dx_shader_commit_notify(struct vmw_resource *res, ...@@ -79,7 +75,6 @@ static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state); enum vmw_cmdbuf_res_state state);
static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type); static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type); static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
static uint64_t vmw_user_shader_size;
static const struct vmw_user_resource_conv user_shader_conv = { static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER, .object_type = VMW_RES_SHADER,
...@@ -563,16 +558,14 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, ...@@ -563,16 +558,14 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
* *
* @res: The shader resource * @res: The shader resource
* *
* Frees the DX shader resource and updates memory accounting. * Frees the DX shader resource.
*/ */
static void vmw_dx_shader_res_free(struct vmw_resource *res) static void vmw_dx_shader_res_free(struct vmw_resource *res)
{ {
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
vmw_resource_unreference(&shader->cotable); vmw_resource_unreference(&shader->cotable);
kfree(shader); kfree(shader);
ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
} }
/** /**
...@@ -594,30 +587,13 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, ...@@ -594,30 +587,13 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_dx_shader *shader; struct vmw_dx_shader *shader;
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv; struct vmw_private *dev_priv = ctx->dev_priv;
struct ttm_operation_ctx ttm_opt_ctx = {
.interruptible = true,
.no_wait_gpu = false
};
int ret; int ret;
if (!vmw_shader_dx_size)
vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
if (!vmw_shader_id_ok(user_key, shader_type)) if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL; return -EINVAL;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
&ttm_opt_ctx);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
"creation.\n");
return ret;
}
shader = kmalloc(sizeof(*shader), GFP_KERNEL); shader = kmalloc(sizeof(*shader), GFP_KERNEL);
if (!shader) { if (!shader) {
ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
return -ENOMEM; return -ENOMEM;
} }
...@@ -669,21 +645,15 @@ static void vmw_user_shader_free(struct vmw_resource *res) ...@@ -669,21 +645,15 @@ static void vmw_user_shader_free(struct vmw_resource *res)
{ {
struct vmw_user_shader *ushader = struct vmw_user_shader *ushader =
container_of(res, struct vmw_user_shader, shader.res); container_of(res, struct vmw_user_shader, shader.res);
struct vmw_private *dev_priv = res->dev_priv;
ttm_base_object_kfree(ushader, base); ttm_base_object_kfree(ushader, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
} }
static void vmw_shader_free(struct vmw_resource *res) static void vmw_shader_free(struct vmw_resource *res)
{ {
struct vmw_shader *shader = vmw_res_to_shader(res); struct vmw_shader *shader = vmw_res_to_shader(res);
struct vmw_private *dev_priv = res->dev_priv;
kfree(shader); kfree(shader);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_shader_size);
} }
/* /*
...@@ -722,31 +692,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, ...@@ -722,31 +692,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
{ {
struct vmw_user_shader *ushader; struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp; struct vmw_resource *res, *tmp;
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false
};
int ret; int ret;
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size =
ttm_round_pot(sizeof(struct vmw_user_shader)) +
VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
&ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
"creation.\n");
goto out;
}
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(!ushader)) { if (unlikely(!ushader)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -793,31 +742,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, ...@@ -793,31 +742,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
{ {
struct vmw_shader *shader; struct vmw_shader *shader;
struct vmw_resource *res; struct vmw_resource *res;
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false
};
int ret; int ret;
if (unlikely(vmw_shader_size == 0))
vmw_shader_size =
ttm_round_pot(sizeof(struct vmw_shader)) +
VMW_IDA_ACC_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_shader_size,
&ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
"creation.\n");
goto out_err;
}
shader = kzalloc(sizeof(*shader), GFP_KERNEL); shader = kzalloc(sizeof(*shader), GFP_KERNEL);
if (unlikely(!shader)) { if (unlikely(!shader)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_shader_size);
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_err;
} }
......
...@@ -32,12 +32,10 @@ ...@@ -32,12 +32,10 @@
* struct vmw_user_simple_resource - User-space simple resource struct * struct vmw_user_simple_resource - User-space simple resource struct
* *
* @base: The TTM base object implementing user-space visibility. * @base: The TTM base object implementing user-space visibility.
* @account_size: How much memory was accounted for this object.
* @simple: The embedded struct vmw_simple_resource. * @simple: The embedded struct vmw_simple_resource.
*/ */
struct vmw_user_simple_resource { struct vmw_user_simple_resource {
struct ttm_base_object base; struct ttm_base_object base;
size_t account_size;
struct vmw_simple_resource simple; struct vmw_simple_resource simple;
/* /*
* Nothing to be placed after @simple, since size of @simple is * Nothing to be placed after @simple, since size of @simple is
...@@ -91,18 +89,15 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv, ...@@ -91,18 +89,15 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
* *
* @res: The struct vmw_resource member of the simple resource object. * @res: The struct vmw_resource member of the simple resource object.
* *
* Frees memory and memory accounting for the object. * Frees memory for the object.
*/ */
static void vmw_simple_resource_free(struct vmw_resource *res) static void vmw_simple_resource_free(struct vmw_resource *res)
{ {
struct vmw_user_simple_resource *usimple = struct vmw_user_simple_resource *usimple =
container_of(res, struct vmw_user_simple_resource, container_of(res, struct vmw_user_simple_resource,
simple.res); simple.res);
struct vmw_private *dev_priv = res->dev_priv;
size_t size = usimple->account_size;
ttm_base_object_kfree(usimple, base); ttm_base_object_kfree(usimple, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
} }
/** /**
...@@ -149,39 +144,19 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, ...@@ -149,39 +144,19 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_resource *tmp; struct vmw_resource *tmp;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false
};
size_t alloc_size; size_t alloc_size;
size_t account_size;
int ret; int ret;
alloc_size = offsetof(struct vmw_user_simple_resource, simple) + alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size; func->size;
account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
TTM_OBJ_EXTRA_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
&ctx);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for %s"
" creation.\n", func->res_func.type_name);
goto out_ret;
}
usimple = kzalloc(alloc_size, GFP_KERNEL); usimple = kzalloc(alloc_size, GFP_KERNEL);
if (!usimple) { if (!usimple) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
account_size);
ret = -ENOMEM; ret = -ENOMEM;
goto out_ret; goto out_ret;
} }
usimple->simple.func = func; usimple->simple.func = func;
usimple->account_size = account_size;
res = &usimple->simple.res; res = &usimple->simple.res;
usimple->base.shareable = false; usimple->base.shareable = false;
usimple->base.tfile = NULL; usimple->base.tfile = NULL;
......
...@@ -279,18 +279,15 @@ static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type) ...@@ -279,18 +279,15 @@ static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
* *
* @res: Pointer to a struct vmw_resource * @res: Pointer to a struct vmw_resource
* *
* Frees memory and memory accounting held by a struct vmw_view. * Frees memory held by the struct vmw_view.
*/ */
static void vmw_view_res_free(struct vmw_resource *res) static void vmw_view_res_free(struct vmw_resource *res)
{ {
struct vmw_view *view = vmw_view(res); struct vmw_view *view = vmw_view(res);
size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
struct vmw_private *dev_priv = res->dev_priv;
vmw_resource_unreference(&view->cotable); vmw_resource_unreference(&view->cotable);
vmw_resource_unreference(&view->srf); vmw_resource_unreference(&view->srf);
kfree_rcu(view, rcu); kfree_rcu(view, rcu);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
} }
/** /**
...@@ -327,10 +324,6 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, ...@@ -327,10 +324,6 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_private *dev_priv = ctx->dev_priv; struct vmw_private *dev_priv = ctx->dev_priv;
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_view *view; struct vmw_view *view;
struct ttm_operation_ctx ttm_opt_ctx = {
.interruptible = true,
.no_wait_gpu = false
};
size_t size; size_t size;
int ret; int ret;
...@@ -347,16 +340,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, ...@@ -347,16 +340,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
size = offsetof(struct vmw_view, cmd) + cmd_size; size = offsetof(struct vmw_view, cmd) + cmd_size;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for view creation\n");
return ret;
}
view = kmalloc(size, GFP_KERNEL); view = kmalloc(size, GFP_KERNEL);
if (!view) { if (!view) {
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -1123,7 +1123,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -1123,7 +1123,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
} }
if (!vps->surf) { if (!vps->surf) {
ret = vmw_gb_surface_define(dev_priv, 0, &metadata, ret = vmw_gb_surface_define(dev_priv, &metadata,
&vps->surf); &vps->surf);
if (ret != 0) { if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n"); DRM_ERROR("Couldn't allocate STDU surface.\n");
......
...@@ -60,8 +60,6 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback, ...@@ -60,8 +60,6 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res, static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state); enum vmw_cmdbuf_res_state state);
static size_t vmw_streamoutput_size;
static const struct vmw_res_func vmw_dx_streamoutput_func = { static const struct vmw_res_func vmw_dx_streamoutput_func = {
.res_type = vmw_res_streamoutput, .res_type = vmw_res_streamoutput,
.needs_backup = true, .needs_backup = true,
...@@ -254,12 +252,10 @@ vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man, ...@@ -254,12 +252,10 @@ vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
static void vmw_dx_streamoutput_res_free(struct vmw_resource *res) static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
{ {
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res); struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
vmw_resource_unreference(&so->cotable); vmw_resource_unreference(&so->cotable);
kfree(so); kfree(so);
ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
} }
static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res) static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
...@@ -284,27 +280,10 @@ int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man, ...@@ -284,27 +280,10 @@ int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_dx_streamoutput *so; struct vmw_dx_streamoutput *so;
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv; struct vmw_private *dev_priv = ctx->dev_priv;
struct ttm_operation_ctx ttm_opt_ctx = {
.interruptible = true,
.no_wait_gpu = false
};
int ret; int ret;
if (!vmw_streamoutput_size)
vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_streamoutput_size, &ttm_opt_ctx);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for streamout.\n");
return ret;
}
so = kmalloc(sizeof(*so), GFP_KERNEL); so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so) { if (!so) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_streamoutput_size);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -45,14 +45,12 @@ ...@@ -45,14 +45,12 @@
* @prime: The TTM prime object. * @prime: The TTM prime object.
* @base: The TTM base object handling user-space visibility. * @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata. * @srf: The surface metadata.
* @size: TTM accounting size for the surface.
* @master: Master of the creating client. Used for security check. * @master: Master of the creating client. Used for security check.
* @backup_base: The TTM base object of the backup buffer. * @backup_base: The TTM base object of the backup buffer.
*/ */
struct vmw_user_surface { struct vmw_user_surface {
struct ttm_prime_object prime; struct ttm_prime_object prime;
struct vmw_surface srf; struct vmw_surface srf;
uint32_t size;
struct drm_master *master; struct drm_master *master;
struct ttm_base_object *backup_base; struct ttm_base_object *backup_base;
}; };
...@@ -74,13 +72,11 @@ struct vmw_surface_offset { ...@@ -74,13 +72,11 @@ struct vmw_surface_offset {
/** /**
* struct vmw_surface_dirty - Surface dirty-tracker * struct vmw_surface_dirty - Surface dirty-tracker
* @cache: Cached layout information of the surface. * @cache: Cached layout information of the surface.
* @size: Accounting size for the struct vmw_surface_dirty.
* @num_subres: Number of subresources. * @num_subres: Number of subresources.
* @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
*/ */
struct vmw_surface_dirty { struct vmw_surface_dirty {
struct vmw_surface_cache cache; struct vmw_surface_cache cache;
size_t size;
u32 num_subres; u32 num_subres;
SVGA3dBox boxes[]; SVGA3dBox boxes[];
}; };
...@@ -129,9 +125,6 @@ static const struct vmw_user_resource_conv user_surface_conv = { ...@@ -129,9 +125,6 @@ static const struct vmw_user_resource_conv user_surface_conv = {
const struct vmw_user_resource_conv *user_surface_converter = const struct vmw_user_resource_conv *user_surface_converter =
&user_surface_conv; &user_surface_conv;
static uint64_t vmw_user_surface_size;
static const struct vmw_res_func vmw_legacy_surface_func = { static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface, .res_type = vmw_res_surface,
.needs_backup = false, .needs_backup = false,
...@@ -359,7 +352,7 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf, ...@@ -359,7 +352,7 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf,
* vmw_surface. * vmw_surface.
* *
* Destroys a the device surface associated with a struct vmw_surface if * Destroys a the device surface associated with a struct vmw_surface if
* any, and adjusts accounting and resource count accordingly. * any, and adjusts resource count accordingly.
*/ */
static void vmw_hw_surface_destroy(struct vmw_resource *res) static void vmw_hw_surface_destroy(struct vmw_resource *res)
{ {
...@@ -666,8 +659,6 @@ static void vmw_user_surface_free(struct vmw_resource *res) ...@@ -666,8 +659,6 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_surface *srf = vmw_res_to_srf(res); struct vmw_surface *srf = vmw_res_to_srf(res);
struct vmw_user_surface *user_srf = struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf); container_of(srf, struct vmw_user_surface, srf);
struct vmw_private *dev_priv = srf->res.dev_priv;
uint32_t size = user_srf->size;
WARN_ON_ONCE(res->dirty); WARN_ON_ONCE(res->dirty);
if (user_srf->master) if (user_srf->master)
...@@ -676,7 +667,6 @@ static void vmw_user_surface_free(struct vmw_resource *res) ...@@ -676,7 +667,6 @@ static void vmw_user_surface_free(struct vmw_resource *res)
kfree(srf->metadata.sizes); kfree(srf->metadata.sizes);
kfree(srf->snooper.image); kfree(srf->snooper.image);
ttm_prime_object_kfree(user_srf, prime); ttm_prime_object_kfree(user_srf, prime);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
} }
/** /**
...@@ -740,23 +730,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -740,23 +730,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req; struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep; struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false
};
int ret; int ret;
int i, j; int i, j;
uint32_t cur_bo_offset; uint32_t cur_bo_offset;
struct drm_vmw_size *cur_size; struct drm_vmw_size *cur_size;
struct vmw_surface_offset *cur_offset; struct vmw_surface_offset *cur_offset;
uint32_t num_sizes; uint32_t num_sizes;
uint32_t size;
const SVGA3dSurfaceDesc *desc; const SVGA3dSurfaceDesc *desc;
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
num_sizes = 0; num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
...@@ -768,10 +749,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -768,10 +749,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
num_sizes == 0) num_sizes == 0)
return -EINVAL; return -EINVAL;
size = vmw_user_surface_size +
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
desc = vmw_surface_get_desc(req->format); desc = vmw_surface_get_desc(req->format);
if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) { if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
VMW_DEBUG_USER("Invalid format %d for surface creation.\n", VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
...@@ -779,18 +756,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -779,18 +756,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
size, &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for surface.\n");
goto out_unlock;
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(!user_srf)) { if (unlikely(!user_srf)) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_no_user_srf; goto out_unlock;
} }
srf = &user_srf->srf; srf = &user_srf->srf;
...@@ -805,7 +774,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -805,7 +774,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
memcpy(metadata->mip_levels, req->mip_levels, memcpy(metadata->mip_levels, req->mip_levels,
sizeof(metadata->mip_levels)); sizeof(metadata->mip_levels));
metadata->num_sizes = num_sizes; metadata->num_sizes = num_sizes;
user_srf->size = size;
metadata->sizes = metadata->sizes =
memdup_user((struct drm_vmw_size __user *)(unsigned long) memdup_user((struct drm_vmw_size __user *)(unsigned long)
req->size_addr, req->size_addr,
...@@ -916,8 +884,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -916,8 +884,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
kfree(metadata->sizes); kfree(metadata->sizes);
out_no_sizes: out_no_sizes:
ttm_prime_object_kfree(user_srf, prime); ttm_prime_object_kfree(user_srf, prime);
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock: out_unlock:
return ret; return ret;
} }
...@@ -1459,7 +1425,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1459,7 +1425,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_resource *tmp; struct vmw_resource *tmp;
int ret = 0; int ret = 0;
uint32_t size;
uint32_t backup_handle = 0; uint32_t backup_handle = 0;
SVGA3dSurfaceAllFlags svga3d_flags_64 = SVGA3dSurfaceAllFlags svga3d_flags_64 =
SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits, SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
...@@ -1506,12 +1471,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1506,12 +1471,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
return -EINVAL; return -EINVAL;
} }
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
size = vmw_user_surface_size;
metadata.flags = svga3d_flags_64; metadata.flags = svga3d_flags_64;
metadata.format = req->base.format; metadata.format = req->base.format;
metadata.mip_levels[0] = req->base.mip_levels; metadata.mip_levels[0] = req->base.mip_levels;
...@@ -1526,7 +1485,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1526,7 +1485,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vmw_surface_flag_scanout; drm_vmw_surface_flag_scanout;
/* Define a surface based on the parameters. */ /* Define a surface based on the parameters. */
ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf); ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
if (ret != 0) { if (ret != 0) {
VMW_DEBUG_USER("Failed to define surface.\n"); VMW_DEBUG_USER("Failed to define surface.\n");
return ret; return ret;
...@@ -1955,11 +1914,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) ...@@ -1955,11 +1914,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
u32 num_mip; u32 num_mip;
u32 num_subres; u32 num_subres;
u32 num_samples; u32 num_samples;
size_t dirty_size, acc_size; size_t dirty_size;
static struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
int ret; int ret;
if (metadata->array_size) if (metadata->array_size)
...@@ -1973,14 +1928,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) ...@@ -1973,14 +1928,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
num_subres = num_layers * num_mip; num_subres = num_layers * num_mip;
dirty_size = struct_size(dirty, boxes, num_subres); dirty_size = struct_size(dirty, boxes, num_subres);
acc_size = ttm_round_pot(dirty_size);
ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
acc_size, &ctx);
if (ret) {
VMW_DEBUG_USER("Out of graphics memory for surface "
"dirty tracker.\n");
return ret;
}
dirty = kvzalloc(dirty_size, GFP_KERNEL); dirty = kvzalloc(dirty_size, GFP_KERNEL);
if (!dirty) { if (!dirty) {
...@@ -1990,13 +1937,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) ...@@ -1990,13 +1937,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
num_samples = max_t(u32, 1, metadata->multisample_count); num_samples = max_t(u32, 1, metadata->multisample_count);
ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format, ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
num_mip, num_layers, num_samples, num_mip, num_layers, num_samples,
&dirty->cache); &dirty->cache);
if (ret) if (ret)
goto out_no_cache; goto out_no_cache;
dirty->num_subres = num_subres; dirty->num_subres = num_subres;
dirty->size = acc_size;
res->dirty = (struct vmw_resource_dirty *) dirty; res->dirty = (struct vmw_resource_dirty *) dirty;
return 0; return 0;
...@@ -2004,7 +1950,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) ...@@ -2004,7 +1950,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
out_no_cache: out_no_cache:
kvfree(dirty); kvfree(dirty);
out_no_dirty: out_no_dirty:
ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
return ret; return ret;
} }
...@@ -2015,10 +1960,8 @@ static void vmw_surface_dirty_free(struct vmw_resource *res) ...@@ -2015,10 +1960,8 @@ static void vmw_surface_dirty_free(struct vmw_resource *res)
{ {
struct vmw_surface_dirty *dirty = struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty; (struct vmw_surface_dirty *) res->dirty;
size_t acc_size = dirty->size;
kvfree(dirty); kvfree(dirty);
ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
res->dirty = NULL; res->dirty = NULL;
} }
...@@ -2051,8 +1994,6 @@ static int vmw_surface_clean(struct vmw_resource *res) ...@@ -2051,8 +1994,6 @@ static int vmw_surface_clean(struct vmw_resource *res)
* vmw_gb_surface_define - Define a private GB surface * vmw_gb_surface_define - Define a private GB surface
* *
* @dev_priv: Pointer to a device private. * @dev_priv: Pointer to a device private.
* @user_accounting_size: Used to track user-space memory usage, set
* to 0 for kernel mode only memory
* @metadata: Metadata representing the surface to create. * @metadata: Metadata representing the surface to create.
* @user_srf_out: allocated user_srf. Set to NULL on failure. * @user_srf_out: allocated user_srf. Set to NULL on failure.
* *
...@@ -2062,17 +2003,12 @@ static int vmw_surface_clean(struct vmw_resource *res) ...@@ -2062,17 +2003,12 @@ static int vmw_surface_clean(struct vmw_resource *res)
* it available to user mode drivers. * it available to user mode drivers.
*/ */
int vmw_gb_surface_define(struct vmw_private *dev_priv, int vmw_gb_surface_define(struct vmw_private *dev_priv,
uint32_t user_accounting_size,
const struct vmw_surface_metadata *req, const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out) struct vmw_surface **srf_out)
{ {
struct vmw_surface_metadata *metadata; struct vmw_surface_metadata *metadata;
struct vmw_user_surface *user_srf; struct vmw_user_surface *user_srf;
struct vmw_surface *srf; struct vmw_surface *srf;
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false
};
u32 sample_count = 1; u32 sample_count = 1;
u32 num_layers = 1; u32 num_layers = 1;
int ret; int ret;
...@@ -2113,22 +2049,13 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, ...@@ -2113,22 +2049,13 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
if (req->sizes != NULL) if (req->sizes != NULL)
return -EINVAL; return -EINVAL;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
user_accounting_size, &ctx);
if (ret != 0) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for surface.\n");
goto out_unlock;
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(!user_srf)) { if (unlikely(!user_srf)) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_no_user_srf; goto out_unlock;
} }
*srf_out = &user_srf->srf; *srf_out = &user_srf->srf;
user_srf->size = user_accounting_size;
user_srf->prime.base.shareable = false; user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL; user_srf->prime.base.tfile = NULL;
...@@ -2179,9 +2106,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, ...@@ -2179,9 +2106,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
return ret; return ret;
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
out_unlock: out_unlock:
return ret; return ret;
} }
...@@ -175,7 +175,6 @@ struct vmw_ttm_tt { ...@@ -175,7 +175,6 @@ struct vmw_ttm_tt {
int mem_type; int mem_type;
struct sg_table sgt; struct sg_table sgt;
struct vmw_sg_table vsgt; struct vmw_sg_table vsgt;
uint64_t sg_alloc_size;
bool mapped; bool mapped;
bool bound; bool bound;
}; };
...@@ -300,17 +299,10 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) ...@@ -300,17 +299,10 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
{ {
struct vmw_private *dev_priv = vmw_tt->dev_priv; struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
struct vmw_sg_table *vsgt = &vmw_tt->vsgt; struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false
};
struct vmw_piter iter; struct vmw_piter iter;
dma_addr_t old; dma_addr_t old;
int ret = 0; int ret = 0;
static size_t sgl_size;
static size_t sgt_size;
if (vmw_tt->mapped) if (vmw_tt->mapped)
return 0; return 0;
...@@ -324,15 +316,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) ...@@ -324,15 +316,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
switch (dev_priv->map_mode) { switch (dev_priv->map_mode) {
case vmw_dma_map_bind: case vmw_dma_map_bind:
case vmw_dma_map_populate: case vmw_dma_map_populate:
if (unlikely(!sgl_size)) {
sgl_size = ttm_round_pot(sizeof(struct scatterlist));
sgt_size = ttm_round_pot(sizeof(struct sg_table));
}
vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
if (unlikely(ret != 0))
return ret;
ret = sg_alloc_table_from_pages_segment( ret = sg_alloc_table_from_pages_segment(
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
(unsigned long)vsgt->num_pages << PAGE_SHIFT, (unsigned long)vsgt->num_pages << PAGE_SHIFT,
...@@ -340,15 +323,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) ...@@ -340,15 +323,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (ret) if (ret)
goto out_sg_alloc_fail; goto out_sg_alloc_fail;
if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
uint64_t over_alloc =
sgl_size * (vsgt->num_pages -
vmw_tt->sgt.orig_nents);
ttm_mem_global_free(glob, over_alloc);
vmw_tt->sg_alloc_size -= over_alloc;
}
ret = vmw_ttm_map_for_dma(vmw_tt); ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_map_fail; goto out_map_fail;
...@@ -375,7 +349,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) ...@@ -375,7 +349,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
sg_free_table(vmw_tt->vsgt.sgt); sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL; vmw_tt->vsgt.sgt = NULL;
out_sg_alloc_fail: out_sg_alloc_fail:
ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
return ret; return ret;
} }
...@@ -401,8 +374,6 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) ...@@ -401,8 +374,6 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
vmw_ttm_unmap_from_dma(vmw_tt); vmw_ttm_unmap_from_dma(vmw_tt);
sg_free_table(vmw_tt->vsgt.sgt); sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL; vmw_tt->vsgt.sgt = NULL;
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_tt->sg_alloc_size);
break; break;
default: default:
break; break;
...@@ -522,7 +493,6 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) ...@@ -522,7 +493,6 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev, static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
unsigned int i;
int ret; int ret;
/* TODO: maybe completely drop this ? */ /* TODO: maybe completely drop this ? */
...@@ -530,22 +500,7 @@ static int vmw_ttm_populate(struct ttm_device *bdev, ...@@ -530,22 +500,7 @@ static int vmw_ttm_populate(struct ttm_device *bdev,
return 0; return 0;
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
if (ret)
return ret;
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i],
PAGE_SIZE, ctx);
if (ret)
goto error;
}
return 0;
error:
while (i--)
ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
PAGE_SIZE);
ttm_pool_free(&bdev->pool, ttm);
return ret; return ret;
} }
...@@ -554,7 +509,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, ...@@ -554,7 +509,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{ {
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm); dma_ttm);
unsigned int i;
vmw_ttm_unbind(bdev, ttm); vmw_ttm_unbind(bdev, ttm);
...@@ -565,10 +519,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, ...@@ -565,10 +519,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
vmw_ttm_unmap_dma(vmw_tt); vmw_ttm_unmap_dma(vmw_tt);
for (i = 0; i < ttm->num_pages; ++i)
ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
PAGE_SIZE);
ttm_pool_free(&bdev->pool, ttm); ttm_pool_free(&bdev->pool, ttm);
} }
......
...@@ -99,38 +99,3 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -99,38 +99,3 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
return ret; return ret;
} }
/* struct vmw_validation_mem callback */
static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
{
static struct ttm_operation_ctx ctx = {.interruptible = false,
.no_wait_gpu = false};
struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
}
/* struct vmw_validation_mem callback */
static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
{
struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
* vmw_validation_mem_init_ttm - Interface the validation memory tracker
* to ttm.
* @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
* rather than a struct vmw_validation_mem is to make sure assumption in the
* callbacks that struct vmw_private derives from struct vmw_validation_mem
* holds true.
* @gran: The recommended allocation granularity
*/
void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
{
struct vmw_validation_mem *vvm = &dev_priv->vvm;
vvm->reserve_mem = vmw_vmt_reserve;
vvm->unreserve_mem = vmw_vmt_unreserve;
vvm->gran = gran;
}
...@@ -29,6 +29,9 @@ ...@@ -29,6 +29,9 @@
#include "vmwgfx_validation.h" #include "vmwgfx_validation.h"
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
/** /**
* struct vmw_validation_bo_node - Buffer object validation metadata. * struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation. * @base: Metadata used for TTM reservation- and validation.
...@@ -113,13 +116,8 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, ...@@ -113,13 +116,8 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
struct page *page; struct page *page;
if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) { if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran); ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
if (ret)
return NULL;
ctx->vm_size_left += ctx->vm->gran;
ctx->total_mem += ctx->vm->gran;
} }
page = alloc_page(GFP_KERNEL | __GFP_ZERO); page = alloc_page(GFP_KERNEL | __GFP_ZERO);
...@@ -159,7 +157,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx) ...@@ -159,7 +157,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
ctx->mem_size_left = 0; ctx->mem_size_left = 0;
if (ctx->vm && ctx->total_mem) { if (ctx->vm && ctx->total_mem) {
ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
ctx->total_mem = 0; ctx->total_mem = 0;
ctx->vm_size_left = 0; ctx->vm_size_left = 0;
} }
......
...@@ -39,21 +39,6 @@ ...@@ -39,21 +39,6 @@
#define VMW_RES_DIRTY_SET BIT(0) #define VMW_RES_DIRTY_SET BIT(0)
#define VMW_RES_DIRTY_CLEAR BIT(1) #define VMW_RES_DIRTY_CLEAR BIT(1)
/**
* struct vmw_validation_mem - Custom interface to provide memory reservations
* for the validation code.
* @reserve_mem: Callback to reserve memory
* @unreserve_mem: Callback to unreserve memory
* @gran: Reservation granularity. Contains a hint how much memory should
* be reserved in each call to @reserve_mem(). A slow implementation may want
* reservation to be done in large batches.
*/
struct vmw_validation_mem {
int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
size_t gran;
};
/** /**
* struct vmw_validation_context - Per command submission validation context * struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates * @ht: Hash table used to find resource- or buffer object duplicates
...@@ -129,21 +114,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx) ...@@ -129,21 +114,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
return !list_empty(&ctx->bo_list); return !list_empty(&ctx->bo_list);
} }
/**
* vmw_validation_set_val_mem - Register a validation mem object for
* validation memory reservation
* @ctx: The validation context
* @vm: Pointer to a struct vmw_validation_mem
*
* Must be set before the first attempt to allocate validation memory.
*/
static inline void
vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
struct vmw_validation_mem *vm)
{
ctx->vm = vm;
}
/** /**
* vmw_validation_set_ht - Register a hash table for duplicate finding * vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context * @ctx: The validation context
...@@ -190,22 +160,6 @@ vmw_validation_bo_fence(struct vmw_validation_context *ctx, ...@@ -190,22 +160,6 @@ vmw_validation_bo_fence(struct vmw_validation_context *ctx,
(void *) fence); (void *) fence);
} }
/**
* vmw_validation_context_init - Initialize a validation context
* @ctx: Pointer to the validation context to initialize
*
* This function initializes a validation context with @merge_dups set
* to false
*/
static inline void
vmw_validation_context_init(struct vmw_validation_context *ctx)
{
memset(ctx, 0, sizeof(*ctx));
INIT_LIST_HEAD(&ctx->resource_list);
INIT_LIST_HEAD(&ctx->resource_ctx_list);
INIT_LIST_HEAD(&ctx->bo_list);
}
/** /**
* vmw_validation_align - Align a validation memory allocation * vmw_validation_align - Align a validation memory allocation
* @val: The size to be aligned * @val: The size to be aligned
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment