Commit 8038d2a9 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'vmwgfx-next-4.19-2' of git://people.freedesktop.org/~thomash/linux into drm-next

A series of cleanups / reorganizations and modesetting changes that
mostly target atomic state validation.

[airlied: conflicts with SPDX stuff in amdgpu tree]
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

Link: https://patchwork.freedesktop.org/patch/msgid/1a88485e-e509-b00e-8485-19194f074115@vmware.com
parents ba7ca97d 812a954b
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
......
...@@ -673,8 +673,34 @@ SVGASignedPoint; ...@@ -673,8 +673,34 @@ SVGASignedPoint;
* SVGA_CAP_GBOBJECTS -- * SVGA_CAP_GBOBJECTS --
* Enable guest-backed objects and surfaces. * Enable guest-backed objects and surfaces.
* *
* SVGA_CAP_CMD_BUFFERS_3 -- * SVGA_CAP_DX --
* Enable support for command buffers in a mob. * Enable support for DX commands, and command buffers in a mob.
*
* SVGA_CAP_HP_CMD_QUEUE --
* Enable support for the high priority command queue, and the
* ScreenCopy command.
*
* SVGA_CAP_NO_BB_RESTRICTION --
* Allow ScreenTargets to be defined without regard to the 32-bpp
* bounding-box memory restrictions. ie:
*
* The summed memory usage of all screens (assuming they were defined as
* 32-bpp) must always be less than the value of the
* SVGA_REG_MAX_PRIMARY_MEM register.
*
* If this cap is not present, the 32-bpp bounding box around all screens
* must additionally be under the value of the SVGA_REG_MAX_PRIMARY_MEM
* register.
*
* If the cap is present, the bounding box restriction is lifted (and only
* the screen-sum limit applies).
*
* (Note that this is a slight lie... there is still a sanity limit on any
* dimension of the topology to be less than SVGA_SCREEN_ROOT_LIMIT, even
* when SVGA_CAP_NO_BB_RESTRICTION is present, but that should be
* large enough to express any possible topology without holes between
* monitors.)
*
*/ */
#define SVGA_CAP_NONE 0x00000000 #define SVGA_CAP_NONE 0x00000000
...@@ -700,6 +726,7 @@ SVGASignedPoint; ...@@ -700,6 +726,7 @@ SVGASignedPoint;
#define SVGA_CAP_GBOBJECTS 0x08000000 #define SVGA_CAP_GBOBJECTS 0x08000000
#define SVGA_CAP_DX 0x10000000 #define SVGA_CAP_DX 0x10000000
#define SVGA_CAP_HP_CMD_QUEUE 0x20000000 #define SVGA_CAP_HP_CMD_QUEUE 0x20000000
#define SVGA_CAP_NO_BB_RESTRICTION 0x40000000
#define SVGA_CAP_CMD_RESERVED 0x80000000 #define SVGA_CAP_CMD_RESERVED 0x80000000
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "drm/ttm/ttm_object.h"
/**
* struct vmw_user_buffer_object - User-space-visible buffer object
*
* @prime: The prime object providing user visibility.
* @vbo: The struct vmw_buffer_object
*/
struct vmw_user_buffer_object {
struct ttm_prime_object prime;
struct vmw_buffer_object vbo;
};
/**
* vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
* vmw_buffer_object.
*
* @bo: Pointer to the TTM buffer object.
* Return: Pointer to the struct vmw_buffer_object embedding the
* TTM buffer object.
*/
static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object *bo)
{
return container_of(bo, struct vmw_buffer_object, base);
}
/**
* vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
* vmw_user_buffer_object.
*
* @bo: Pointer to the TTM buffer object.
* Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
* object.
*/
static struct vmw_user_buffer_object *
vmw_user_buffer_object(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
}
/**
* vmw_bo_pin_in_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @placement: The placement to pin it.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
struct ttm_placement *placement,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
if (buf->pin_count > 0)
ret = ttm_bo_mem_compat(placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, placement, &ctx);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer if true.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
if (buf->pin_count > 0) {
ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
goto out_unreserve;
}
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve;
ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
out_unreserve:
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_bo_pin_in_vram - Move a buffer to vram.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible)
{
return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
interruptible);
}
/**
* vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to pin.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
uint32_t new_flags;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages;
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
placement.busy_placement = &place;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
/*
* Is this buffer already in vram but not at the start of it?
* In that case, evict it first because TTM isn't good at handling
* that situation.
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0 &&
buf->pin_count == 0) {
ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
}
if (buf->pin_count > 0)
ret = ttm_bo_mem_compat(&placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, &placement, &ctx);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
*
* This function takes the reservation_sem in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to unpin.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_unpin(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
vmw_bo_pin_reserved(buf, false);
ttm_bo_unreserve(bo);
err:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
* of a buffer.
*
* @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
* @ptr: SVGAGuestPtr returning the result.
*/
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
SVGAGuestPtr *ptr)
{
if (bo->mem.mem_type == TTM_PL_VRAM) {
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
ptr->offset = bo->offset;
} else {
ptr->gmrId = bo->mem.start;
ptr->offset = 0;
}
}
/**
* vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
*
* @vbo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin.
*
*/
void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
{
struct ttm_operation_ctx ctx = { false, true };
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
lockdep_assert_held(&bo->resv->lock.base);
if (pin) {
if (vbo->pin_count++ > 0)
return;
} else {
WARN_ON(vbo->pin_count <= 0);
if (--vbo->pin_count > 0)
return;
}
pl.fpfn = 0;
pl.lpfn = 0;
pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
if (pin)
pl.flags |= TTM_PL_FLAG_NO_EVICT;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
placement.placement = &pl;
ret = ttm_bo_validate(bo, &placement, &ctx);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
/**
* vmw_bo_map_and_cache - Map a buffer object and cache the map
*
* @vbo: The buffer object to map
* Return: A kernel virtual address or NULL if mapping failed.
*
* This function maps a buffer object into the kernel address space, or
* returns the virtual kernel address of an already existing map. The virtual
* address remains valid as long as the buffer object is pinned or reserved.
* The cached map is torn down on either
* 1) Buffer object move
* 2) Buffer object swapout
* 3) Buffer object destruction
*
*/
void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
{
struct ttm_buffer_object *bo = &vbo->base;
bool not_used;
void *virtual;
int ret;
virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
if (virtual)
return virtual;
ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
return ttm_kmap_obj_virtual(&vbo->map, &not_used);
}
/**
* vmw_bo_unmap - Tear down a cached buffer object map.
*
* @vbo: The buffer object whose map we are tearing down.
*
* This function tears down a cached map set up using
* vmw_buffer_object_map_and_cache().
*/
void vmw_bo_unmap(struct vmw_buffer_object *vbo)
{
if (vbo->map.bo == NULL)
return;
ttm_bo_kunmap(&vbo->map);
}
/**
* vmw_bo_acc_size - Calculate the pinned memory usage of buffers
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @size: The requested buffer size.
* @user: Whether this is an ordinary dma buffer or a user dma buffer.
*/
static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
bool user)
{
static size_t struct_size, user_struct_size;
size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
if (unlikely(struct_size == 0)) {
size_t backend_size = ttm_round_pot(vmw_tt_size);
struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_buffer_object));
user_struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_user_buffer_object));
}
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
page_array_size +=
ttm_round_pot(num_pages * sizeof(dma_addr_t));
return ((user) ? user_struct_size : struct_size) +
page_array_size;
}
/**
* vmw_bo_bo_free - vmw buffer object destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
*/
void vmw_bo_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
vmw_bo_unmap(vmw_bo);
kfree(vmw_bo);
}
/**
* vmw_user_bo_destroy - vmw buffer object destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
*/
static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
vmw_bo_unmap(&vmw_user_bo->vbo);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
/**
* vmw_bo_init - Initialize a vmw buffer object
*
* @dev_priv: Pointer to the device private struct
* @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
* @size: Buffer object size in bytes.
* @placement: Initial placement.
* @interruptible: Whether waits should be performed interruptible.
* @bo_free: The buffer object destructor.
* Returns: Zero on success, negative error code on error.
*
* Note that on error, the code will free the buffer object.
*/
int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interruptible,
void (*bo_free)(struct ttm_buffer_object *bo))
{
struct ttm_bo_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret;
bool user = (bo_free == &vmw_user_bo_destroy);
WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
acc_size = vmw_bo_acc_size(dev_priv, size, user);
memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
0, interruptible, acc_size,
NULL, NULL, bo_free);
return ret;
}
/**
* vmw_user_bo_release - TTM reference base object release callback for
* vmw user buffer objects
*
* @p_base: The TTM base object pointer about to be unreferenced.
*
* Clears the TTM base object pointer and drops the reference the
* base object has on the underlying struct vmw_buffer_object.
*/
static void vmw_user_bo_release(struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *vmw_user_bo;
struct ttm_base_object *base = *p_base;
struct ttm_buffer_object *bo;
*p_base = NULL;
if (unlikely(base == NULL))
return;
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
bo = &vmw_user_bo->vbo.base;
ttm_bo_unref(&bo);
}
/**
* vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
* for vmw user buffer objects
*
* @base: Pointer to the TTM base object
* @ref_type: Reference type of the reference reaching zero.
*
* Called when user-space drops its last synccpu reference on the buffer
* object, Either explicitly or as part of a cleanup file close.
*/
static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
enum ttm_ref_type ref_type)
{
struct vmw_user_buffer_object *user_bo;
user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE:
ttm_bo_synccpu_write_release(&user_bo->vbo.base);
break;
default:
WARN_ONCE(true, "Undefined buffer object reference release.\n");
}
}
/**
* vmw_user_bo_alloc - Allocate a user buffer object
*
* @dev_priv: Pointer to a struct device private.
* @tfile: Pointer to a struct ttm_object_file on which to register the user
* object.
* @size: Size of the buffer object.
* @shareable: Boolean whether the buffer is shareable with other open files.
* @handle: Pointer to where the handle value should be assigned.
* @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
* should be assigned.
* Return: Zero on success, negative error code on error.
*/
int vmw_user_bo_alloc(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_buffer_object **p_vbo,
struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *user_bo;
struct ttm_buffer_object *tmp;
int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
if (unlikely(!user_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
}
ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
(dev_priv->has_mob) ?
&vmw_sys_placement :
&vmw_vram_sys_placement, true,
&vmw_user_bo_destroy);
if (unlikely(ret != 0))
return ret;
tmp = ttm_bo_reference(&user_bo->vbo.base);
ret = ttm_prime_object_init(tfile,
size,
&user_bo->prime,
shareable,
ttm_buffer_type,
&vmw_user_bo_release,
&vmw_user_bo_ref_obj_release);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
}
*p_vbo = &user_bo->vbo;
if (p_base) {
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
*handle = user_bo->prime.base.hash.key;
out_no_base_object:
return ret;
}
/**
* vmw_user_bo_verify_access - verify access permissions on this
* buffer object.
*
* @bo: Pointer to the buffer object being accessed
* @tfile: Identifying the caller.
*/
int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile)
{
struct vmw_user_buffer_object *vmw_user_bo;
if (unlikely(bo->destroy != vmw_user_bo_destroy))
return -EPERM;
vmw_user_bo = vmw_user_buffer_object(bo);
/* Check that the caller has opened the object. */
if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
return 0;
DRM_ERROR("Could not grant buffer access.\n");
return -EPERM;
}
/**
* vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
* access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions.
*
* @user_bo: Pointer to the buffer object being grabbed for CPU access
* @tfile: Identifying the caller.
* @flags: Flags indicating how the grab should be performed.
* Return: Zero on success, Negative error code on error. In particular,
* -EBUSY will be returned if a dontblock operation is requested and the
* buffer object is busy, and -ERESTARTSYS will be returned if a wait is
* interrupted by a signal.
*
* A blocking grab will be automatically released when @tfile is closed.
*/
static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
struct ttm_object_file *tfile,
uint32_t flags)
{
struct ttm_buffer_object *bo = &user_bo->vbo.base;
bool existed;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
lret = reservation_object_wait_timeout_rcu
(bo->resv, true, true,
nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
return lret;
return 0;
}
ret = ttm_bo_synccpu_write_grab
(bo, !!(flags & drm_vmw_synccpu_dontblock));
if (unlikely(ret != 0))
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->vbo.base);
return ret;
}
/**
* vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
* and unblock command submission on the buffer if blocked.
*
* @handle: Handle identifying the buffer object.
* @tfile: Identifying the caller.
* @flags: Flags indicating the type of release.
*/
static int vmw_user_bo_synccpu_release(uint32_t handle,
struct ttm_object_file *tfile,
uint32_t flags)
{
if (!(flags & drm_vmw_synccpu_allow_cs))
return ttm_ref_object_base_unref(tfile, handle,
TTM_REF_SYNCCPU_WRITE);
return 0;
}
/**
* vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
* functionality.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller.
* Return: Zero on success, negative error code on error.
*
* This function checks the ioctl arguments for validity and calls the
* relevant synccpu functions.
*/
int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data;
struct vmw_buffer_object *vbo;
struct vmw_user_buffer_object *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_base_object *buffer_base;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
|| (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
drm_vmw_synccpu_dontblock |
drm_vmw_synccpu_allow_cs)) != 0) {
DRM_ERROR("Illegal synccpu flags.\n");
return -EINVAL;
}
switch (arg->op) {
case drm_vmw_synccpu_grab:
ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
&buffer_base);
if (unlikely(ret != 0))
return ret;
user_bo = container_of(vbo, struct vmw_user_buffer_object,
vbo);
ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
vmw_bo_unreference(&vbo);
ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
}
break;
case drm_vmw_synccpu_release:
ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
arg->flags);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
}
break;
default:
DRM_ERROR("Invalid synccpu operation.\n");
return -EINVAL;
}
return 0;
}
/**
* vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
* allocation functionality.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller.
* Return: Zero on success, negative error code on error.
*
* This function checks the ioctl arguments for validity and allocates a
* struct vmw_user_buffer_object bo.
*/
int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_alloc_dmabuf_arg *arg =
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_buffer_object *vbo;
uint32_t handle;
int ret;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
req->size, false, &handle, &vbo,
NULL);
if (unlikely(ret != 0))
goto out_no_bo;
rep->handle = handle;
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
vmw_bo_unreference(&vbo);
out_no_bo:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_bo_unref_ioctl - Generic handle close ioctl.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller.
* Return: Zero on success, negative error code on error.
*
* This function checks the ioctl arguments for validity and closes a
* handle to a TTM base object, optionally freeing the object.
*/
int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
arg->handle,
TTM_REF_USAGE);
}
/**
* vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
*
* @tfile: The TTM object file the handle is registered with.
* @handle: The user buffer object handle
* @out: Pointer to a where a pointer to the embedded
* struct vmw_buffer_object should be placed.
* @p_base: Pointer to where a pointer to the TTM base object should be
* placed, or NULL if no such pointer is required.
* Return: Zero on success, Negative error code on error.
*
* Both the output base object pointer and the vmw buffer object pointer
* will be refcounted.
*/
int vmw_user_bo_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_buffer_object **out,
struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *vmw_user_bo;
struct ttm_base_object *base;
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL)) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -ESRCH;
}
if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
ttm_base_object_unref(&base);
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -EINVAL;
}
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
(void)ttm_bo_reference(&vmw_user_bo->vbo.base);
if (p_base)
*p_base = base;
else
ttm_base_object_unref(&base);
*out = &vmw_user_bo->vbo;
return 0;
}
/**
* vmw_user_bo_reference - Open a handle to a vmw user buffer object.
*
* @tfile: The TTM object file to register the handle with.
* @vbo: The embedded vmw buffer object.
* @handle: Pointer to where the new handle should be placed.
* Return: Zero on success, Negative error code on error.
*/
int vmw_user_bo_reference(struct ttm_object_file *tfile,
struct vmw_buffer_object *vbo,
uint32_t *handle)
{
struct vmw_user_buffer_object *user_bo;
if (vbo->base.destroy != vmw_user_bo_destroy)
return -EINVAL;
user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
*handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL, false);
}
/**
* vmw_bo_fence_single - Utility function to fence a single TTM buffer
* object without unreserving it.
*
* @bo: Pointer to the struct ttm_buffer_object to fence.
* @fence: Pointer to the fence. If NULL, this function will
* insert a fence into the command stream..
*
* Contrary to the ttm_eu version of this function, it takes only
* a single buffer object instead of a list, and it also doesn't
* unreserve the buffer object, which needs to be done separately.
*/
void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence)
{
struct ttm_bo_device *bdev = bo->bdev;
struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev);
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
reservation_object_add_excl_fence(bo->resv, &fence->base);
dma_fence_put(&fence->base);
} else
reservation_object_add_excl_fence(bo->resv, &fence->base);
}
/**
* vmw_dumb_create - Create a dumb kms buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @args: Pointer to a struct drm_mode_create_dumb structure
* Return: Zero on success, negative error code on failure.
*
* This is a driver callback for the core drm create_dumb functionality.
* Note that this is very similar to the vmw_bo_alloc ioctl, except
* that the arguments have a different format.
*/
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_buffer_object *vbo;
int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
args->size, false, &args->handle,
&vbo, NULL);
if (unlikely(ret != 0))
goto out_no_bo;
vmw_bo_unreference(&vbo);
out_no_bo:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_dumb_map_offset - Return the address space offset of a dumb buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @handle: Handle identifying the dumb buffer.
* @offset: The address space offset returned.
* Return: Zero on success, negative error code on failure.
*
* This is a driver callback for the core drm dumb_map_offset functionality.
*/
int vmw_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_buffer_object *out_buf;
int ret;
ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
if (ret != 0)
return -EINVAL;
*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
vmw_bo_unreference(&out_buf);
return 0;
}
/**
* vmw_dumb_destroy - Destroy a dumb boffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @handle: Handle identifying the dumb buffer.
* Return: Zero on success, negative error code on failure.
*
* This is a driver callback for the core drm dumb_destroy functionality.
*/
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle)
{
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
}
/**
* vmw_bo_swap_notify - swapout notify callback.
*
* @bo: The buffer object to be swapped out.
*/
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
/* Is @bo embedded in a struct vmw_buffer_object? */
if (bo->destroy != vmw_bo_bo_free &&
bo->destroy != vmw_user_bo_destroy)
return;
/* Kill any cached kernel maps before swapout */
vmw_bo_unmap(vmw_buffer_object(bo));
}
/**
* vmw_bo_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
* @mem: The struct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
* Detaches cached maps and device bindings that require that the
* buffer doesn't move.
*/
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
struct vmw_buffer_object *vbo;
if (mem == NULL)
return;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
if (bo->destroy != vmw_bo_bo_free &&
bo->destroy != vmw_user_bo_destroy)
return;
vbo = container_of(bo, struct vmw_buffer_object, base);
/*
* Kill any cached kernel maps before move to or from VRAM.
* With other types of moves, the underlying pages stay the same,
* and the map can be kept.
*/
if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
vmw_bo_unmap(vbo);
/*
* If we're moving a backup MOB out of MOB placement, then make sure we
* read back all resource content first, and unbind the MOB from
* the resource.
*/
if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
...@@ -38,7 +38,7 @@ struct vmw_user_context { ...@@ -38,7 +38,7 @@ struct vmw_user_context {
struct vmw_cmdbuf_res_manager *man; struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
spinlock_t cotable_lock; spinlock_t cotable_lock;
struct vmw_dma_buffer *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
}; };
static void vmw_user_context_free(struct vmw_resource *res); static void vmw_user_context_free(struct vmw_resource *res);
...@@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, ...@@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv, (void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL); &fence, NULL);
vmw_fence_single_bo(bo, fence); vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, ...@@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv, (void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL); &fence, NULL);
vmw_fence_single_bo(bo, fence); vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx) ...@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
* specified in the parameter. 0 otherwise. * specified in the parameter. 0 otherwise.
*/ */
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_dma_buffer *mob) struct vmw_buffer_object *mob)
{ {
struct vmw_user_context *uctx = struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res); container_of(ctx_res, struct vmw_user_context, res);
...@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, ...@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
if (mob == NULL) { if (mob == NULL) {
if (uctx->dx_query_mob) { if (uctx->dx_query_mob) {
uctx->dx_query_mob->dx_query_ctx = NULL; uctx->dx_query_mob->dx_query_ctx = NULL;
vmw_dmabuf_unreference(&uctx->dx_query_mob); vmw_bo_unreference(&uctx->dx_query_mob);
uctx->dx_query_mob = NULL; uctx->dx_query_mob = NULL;
} }
...@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, ...@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
mob->dx_query_ctx = ctx_res; mob->dx_query_ctx = ctx_res;
if (!uctx->dx_query_mob) if (!uctx->dx_query_mob)
uctx->dx_query_mob = vmw_dmabuf_reference(mob); uctx->dx_query_mob = vmw_bo_reference(mob);
return 0; return 0;
} }
...@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, ...@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
* *
* @ctx_res: The context resource * @ctx_res: The context resource
*/ */
struct vmw_dma_buffer * struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res) vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{ {
struct vmw_user_context *uctx = struct vmw_user_context *uctx =
......
...@@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, ...@@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
vmw_dx_context_scrub_cotables(vcotbl->ctx, readback); vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
mutex_unlock(&dev_priv->binding_mutex); mutex_unlock(&dev_priv->binding_mutex);
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_fence_single_bo(bo, fence); vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res) ...@@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
} }
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_fence_single_bo(&res->backup->base, fence); vmw_bo_fence_single(&res->backup->base, fence);
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
return 0; return 0;
...@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_dma_buffer *buf, *old_buf = res->backup; struct vmw_buffer_object *buf, *old_buf = res->backup;
struct ttm_buffer_object *bo, *old_bo = &res->backup->base; struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
size_t old_size = res->backup_size; size_t old_size = res->backup_size;
size_t old_size_read_back = vcotbl->size_read_back; size_t old_size_read_back = vcotbl->size_read_back;
...@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
true, vmw_dmabuf_bo_free); true, vmw_bo_bo_free);
if (ret) { if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n"); DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret; return ret;
...@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
/* Let go of the old mob. */ /* Let go of the old mob. */
list_del(&res->mob_head); list_del(&res->mob_head);
list_add_tail(&res->mob_head, &buf->res_list); list_add_tail(&res->mob_head, &buf->res_list);
vmw_dmabuf_unreference(&old_buf); vmw_bo_unreference(&old_buf);
res->id = vcotbl->type; res->id = vcotbl->type;
return 0; return 0;
...@@ -491,7 +491,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -491,7 +491,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
ttm_bo_kunmap(&old_map); ttm_bo_kunmap(&old_map);
out_wait: out_wait:
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
vmw_dmabuf_unreference(&buf); vmw_bo_unreference(&buf);
return ret; return ret;
} }
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
/**
* vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @placement: The placement to pin it.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
struct ttm_placement *placement,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
if (buf->pin_count > 0)
ret = ttm_bo_mem_compat(placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, placement, &ctx);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
if (buf->pin_count > 0) {
ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
goto out_unreserve;
}
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve;
ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
out_unreserve:
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_dmabuf_pin_in_vram - Move a buffer to vram.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
interruptible);
}
/**
* vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to pin.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
uint32_t new_flags;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages;
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
placement.busy_placement = &place;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
/*
* Is this buffer already in vram but not at the start of it?
* In that case, evict it first because TTM isn't good at handling
* that situation.
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0 &&
buf->pin_count == 0) {
ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
}
if (buf->pin_count > 0)
ret = ttm_bo_mem_compat(&placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, &placement, &ctx);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
*
* This function takes the reservation_sem in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to unpin.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
vmw_bo_pin_reserved(buf, false);
ttm_bo_unreserve(bo);
err:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
* of a buffer.
*
* @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
* @ptr: SVGAGuestPtr returning the result.
*/
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
SVGAGuestPtr *ptr)
{
if (bo->mem.mem_type == TTM_PL_VRAM) {
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
ptr->offset = bo->offset;
} else {
ptr->gmrId = bo->mem.start;
ptr->offset = 0;
}
}
/**
* vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
*
* @vbo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin.
*
*/
void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
{
struct ttm_operation_ctx ctx = { false, true };
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
lockdep_assert_held(&bo->resv->lock.base);
if (pin) {
if (vbo->pin_count++ > 0)
return;
} else {
WARN_ON(vbo->pin_count <= 0);
if (--vbo->pin_count > 0)
return;
}
pl.fpfn = 0;
pl.lpfn = 0;
pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
if (pin)
pl.flags |= TTM_PL_FLAG_NO_EVICT;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
placement.placement = &pl;
ret = ttm_bo_validate(bo, &placement, &ctx);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
/*
* vmw_dma_buffer_unmap - Tear down a cached buffer object map.
*
* @vbo: The buffer object whose map we are tearing down.
*
* This function tears down a cached map set up using
* vmw_dma_buffer_map_and_cache().
*/
void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
{
if (vbo->map.bo == NULL)
return;
ttm_bo_kunmap(&vbo->map);
}
/*
* vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
*
* @vbo: The buffer object to map
* Return: A kernel virtual address or NULL if mapping failed.
*
* This function maps a buffer object into the kernel address space, or
* returns the virtual kernel address of an already existing map. The virtual
* address remains valid as long as the buffer object is pinned or reserved.
* The cached map is torn down on either
* 1) Buffer object move
* 2) Buffer object swapout
* 3) Buffer object destruction
*
*/
void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
{
struct ttm_buffer_object *bo = &vbo->base;
bool not_used;
void *virtual;
int ret;
virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
if (virtual)
return virtual;
ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
return ttm_kmap_obj_virtual(&vbo->map, &not_used);
}
...@@ -153,9 +153,9 @@ ...@@ -153,9 +153,9 @@
static const struct drm_ioctl_desc vmw_ioctls[] = { static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW), DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW), DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl, vmw_kms_cursor_bypass_ioctl,
...@@ -219,7 +219,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { ...@@ -219,7 +219,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
vmw_gb_surface_reference_ioctl, vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW), DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU, VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl, vmw_user_bo_synccpu_ioctl,
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl, vmw_extended_context_define_ioctl,
...@@ -321,7 +321,7 @@ static void vmw_print_capabilities(uint32_t capabilities) ...@@ -321,7 +321,7 @@ static void vmw_print_capabilities(uint32_t capabilities)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{ {
int ret; int ret;
struct vmw_dma_buffer *vbo; struct vmw_buffer_object *vbo;
struct ttm_bo_kmap_obj map; struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result; volatile SVGA3dQueryResult *result;
bool dummy; bool dummy;
...@@ -335,9 +335,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) ...@@ -335,9 +335,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (!vbo) if (!vbo)
return -ENOMEM; return -ENOMEM;
ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
&vmw_sys_ne_placement, false, &vmw_sys_ne_placement, false,
&vmw_dmabuf_bo_free); &vmw_bo_bo_free);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -358,7 +358,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) ...@@ -358,7 +358,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n"); DRM_ERROR("Dummy query buffer map failed.\n");
vmw_dmabuf_unreference(&vbo); vmw_bo_unreference(&vbo);
} else } else
dev_priv->dummy_query_bo = vbo; dev_priv->dummy_query_bo = vbo;
...@@ -460,7 +460,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv) ...@@ -460,7 +460,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL); BUG_ON(dev_priv->pinned_bo != NULL);
vmw_dmabuf_unreference(&dev_priv->dummy_query_bo); vmw_bo_unreference(&dev_priv->dummy_query_bo);
if (dev_priv->cman) if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman); vmw_cmdbuf_remove_pool(dev_priv->cman);
...@@ -644,6 +644,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -644,6 +644,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex); mutex_init(&dev_priv->binding_mutex);
mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex); mutex_init(&dev_priv->global_kms_state_mutex);
rwlock_init(&dev_priv->resource_lock); rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem); ttm_lock_init(&dev_priv->reservation_sem);
......
...@@ -86,7 +86,7 @@ struct vmw_fpriv { ...@@ -86,7 +86,7 @@ struct vmw_fpriv {
bool gb_aware; bool gb_aware;
}; };
struct vmw_dma_buffer { struct vmw_buffer_object {
struct ttm_buffer_object base; struct ttm_buffer_object base;
struct list_head res_list; struct list_head res_list;
s32 pin_count; s32 pin_count;
...@@ -120,7 +120,7 @@ struct vmw_resource { ...@@ -120,7 +120,7 @@ struct vmw_resource {
unsigned long backup_size; unsigned long backup_size;
bool res_dirty; /* Protected by backup buffer reserved */ bool res_dirty; /* Protected by backup buffer reserved */
bool backup_dirty; /* Protected by backup buffer reserved */ bool backup_dirty; /* Protected by backup buffer reserved */
struct vmw_dma_buffer *backup; struct vmw_buffer_object *backup;
unsigned long backup_offset; unsigned long backup_offset;
unsigned long pin_count; /* Protected by resource reserved */ unsigned long pin_count; /* Protected by resource reserved */
const struct vmw_res_func *func; const struct vmw_res_func *func;
...@@ -304,7 +304,7 @@ struct vmw_sw_context{ ...@@ -304,7 +304,7 @@ struct vmw_sw_context{
uint32_t cmd_bounce_size; uint32_t cmd_bounce_size;
struct list_head resource_list; struct list_head resource_list;
struct list_head ctx_resource_list; /* For contexts and cotables */ struct list_head ctx_resource_list; /* For contexts and cotables */
struct vmw_dma_buffer *cur_query_bo; struct vmw_buffer_object *cur_query_bo;
struct list_head res_relocations; struct list_head res_relocations;
uint32_t *buf_start; uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max]; struct vmw_res_cache_entry res_cache[vmw_res_max];
...@@ -315,7 +315,7 @@ struct vmw_sw_context{ ...@@ -315,7 +315,7 @@ struct vmw_sw_context{
bool staged_bindings_inuse; bool staged_bindings_inuse;
struct list_head staged_cmd_res; struct list_head staged_cmd_res;
struct vmw_resource_val_node *dx_ctx_node; struct vmw_resource_val_node *dx_ctx_node;
struct vmw_dma_buffer *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
struct vmw_resource *dx_query_ctx; struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man; struct vmw_cmdbuf_res_manager *man;
}; };
...@@ -411,6 +411,15 @@ struct vmw_private { ...@@ -411,6 +411,15 @@ struct vmw_private {
uint32_t num_displays; uint32_t num_displays;
/*
* Currently requested_layout_mutex is used to protect the gui
* positionig state in display unit. With that use case currently this
* mutex is only taken during layout ioctl and atomic check_modeset.
* Other display unit state can be protected with this mutex but that
* needs careful consideration.
*/
struct mutex requested_layout_mutex;
/* /*
* Framebuffer info. * Framebuffer info.
*/ */
...@@ -513,8 +522,8 @@ struct vmw_private { ...@@ -513,8 +522,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex. * are protected by the cmdbuf mutex.
*/ */
struct vmw_dma_buffer *dummy_query_bo; struct vmw_buffer_object *dummy_query_bo;
struct vmw_dma_buffer *pinned_bo; struct vmw_buffer_object *pinned_bo;
uint32_t query_cid; uint32_t query_cid;
uint32_t query_cid_valid; uint32_t query_cid_valid;
bool dummy_query_bo_pinned; bool dummy_query_bo_pinned;
...@@ -623,43 +632,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, ...@@ -623,43 +632,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile, struct ttm_object_file *tfile,
uint32_t handle, uint32_t handle,
struct vmw_surface **out_surf, struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf); struct vmw_buffer_object **out_buf);
extern int vmw_user_resource_lookup_handle( extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv, struct vmw_private *dev_priv,
struct ttm_object_file *tfile, struct ttm_object_file *tfile,
uint32_t handle, uint32_t handle,
const struct vmw_user_resource_conv *converter, const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res); struct vmw_resource **p_res);
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interuptable,
void (*bo_free) (struct ttm_buffer_object *bo));
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base);
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf,
uint32_t *handle);
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out,
struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
...@@ -670,43 +649,70 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, ...@@ -670,43 +649,70 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct vmw_resource **out); struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res, extern void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup, bool switch_backup,
struct vmw_dma_buffer *new_backup, struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset); unsigned long new_backup_offset);
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo, extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo); extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv); extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
/** /**
* DMA buffer helper routines - vmwgfx_dmabuf.c * Buffer object helper functions - vmwgfx_bo.c
*/ */
extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv, extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo, struct vmw_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible);
extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible);
extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible);
extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo,
bool interruptible); bool interruptible);
extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *bo,
bool interruptible); bool interruptible);
extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible);
extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool interruptible);
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr); SVGAGuestPtr *ptr);
extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin); extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo); extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo); extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interuptable,
void (*bo_free)(struct ttm_buffer_object *bo));
extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_buffer_object **p_dma_buf,
struct ttm_base_object **p_base);
extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
struct vmw_buffer_object *dma_buf,
uint32_t *handle);
extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_buffer_object **out,
struct ttm_base_object **base);
extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
/** /**
* Misc Ioctl functionality - vmwgfx_ioctl.c * Misc Ioctl functionality - vmwgfx_ioctl.c
...@@ -758,7 +764,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv); ...@@ -758,7 +764,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
/** /**
* TTM buffer object driver - vmwgfx_buffer.c * TTM buffer object driver - vmwgfx_ttm_buffer.c
*/ */
extern const size_t vmw_tt_size; extern const size_t vmw_tt_size;
...@@ -1041,8 +1047,8 @@ vmw_context_binding_state(struct vmw_resource *ctx); ...@@ -1041,8 +1047,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback); bool readback);
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_dma_buffer *mob); struct vmw_buffer_object *mob);
extern struct vmw_dma_buffer * extern struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
...@@ -1224,6 +1230,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, ...@@ -1224,6 +1230,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
u32 w, u32 h, u32 w, u32 h,
struct vmw_diff_cpy *diff); struct vmw_diff_cpy *diff);
/* Host messaging -vmwgfx_msg.c: */
int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
int vmw_host_log(const char *log);
/** /**
* Inline helper functions * Inline helper functions
*/ */
...@@ -1243,9 +1254,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) ...@@ -1243,9 +1254,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
return srf; return srf;
} }
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
{ {
struct vmw_dma_buffer *tmp_buf = *buf; struct vmw_buffer_object *tmp_buf = *buf;
*buf = NULL; *buf = NULL;
if (tmp_buf != NULL) { if (tmp_buf != NULL) {
...@@ -1255,7 +1266,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) ...@@ -1255,7 +1266,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
} }
} }
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) static inline struct vmw_buffer_object *
vmw_bo_reference(struct vmw_buffer_object *buf)
{ {
if (ttm_bo_reference(&buf->base)) if (ttm_bo_reference(&buf->base))
return buf; return buf;
...@@ -1302,10 +1314,4 @@ static inline void vmw_mmio_write(u32 value, u32 *addr) ...@@ -1302,10 +1314,4 @@ static inline void vmw_mmio_write(u32 value, u32 *addr)
{ {
WRITE_ONCE(*addr, value); WRITE_ONCE(*addr, value);
} }
/**
* Add vmw_msg module function
*/
extern int vmw_host_log(const char *log);
#endif #endif
...@@ -92,7 +92,7 @@ struct vmw_resource_val_node { ...@@ -92,7 +92,7 @@ struct vmw_resource_val_node {
struct list_head head; struct list_head head;
struct drm_hash_item hash; struct drm_hash_item hash;
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_dma_buffer *new_backup; struct vmw_buffer_object *new_backup;
struct vmw_ctx_binding_state *staged_bindings; struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset; unsigned long new_backup_offset;
u32 first_usage : 1; u32 first_usage : 1;
...@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGAMobId *id, SVGAMobId *id,
struct vmw_dma_buffer **vmw_bo_p); struct vmw_buffer_object **vmw_bo_p);
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo, struct vmw_buffer_object *vbo,
bool validate_as_mob, bool validate_as_mob,
uint32_t *p_val_node); uint32_t *p_val_node);
/** /**
...@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, ...@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
} }
vmw_resource_unreserve(res, switch_backup, val->new_backup, vmw_resource_unreserve(res, switch_backup, val->new_backup,
val->new_backup_offset); val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup); vmw_bo_unreference(&val->new_backup);
} }
} }
...@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
} }
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
struct vmw_dma_buffer *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx); dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob) if (dx_query_mob)
...@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, ...@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
* submission is reached. * submission is reached.
*/ */
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo, struct vmw_buffer_object *vbo,
bool validate_as_mob, bool validate_as_mob,
uint32_t *p_val_node) uint32_t *p_val_node)
{ {
...@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) ...@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret; return ret;
if (res->backup) { if (res->backup) {
struct vmw_dma_buffer *vbo = res->backup; struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list ret = vmw_bo_to_validate_list
(sw_context, vbo, (sw_context, vbo,
...@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) ...@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
} }
if (sw_context->dx_query_mob) { if (sw_context->dx_query_mob) {
struct vmw_dma_buffer *expected_dx_query_mob; struct vmw_buffer_object *expected_dx_query_mob;
expected_dx_query_mob = expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
...@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) ...@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
list_for_each_entry(val, &sw_context->resource_list, head) { list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res; struct vmw_resource *res = val->res;
struct vmw_dma_buffer *backup = res->backup; struct vmw_buffer_object *backup = res->backup;
ret = vmw_resource_validate(res); ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
...@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) ...@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
/* Check if the resource switched backup buffer */ /* Check if the resource switched backup buffer */
if (backup && res->backup && (backup != res->backup)) { if (backup && res->backup && (backup != res->backup)) {
struct vmw_dma_buffer *vbo = res->backup; struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list ret = vmw_bo_to_validate_list
(sw_context, vbo, (sw_context, vbo,
...@@ -821,7 +821,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, ...@@ -821,7 +821,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{ {
struct vmw_private *dev_priv = ctx_res->dev_priv; struct vmw_private *dev_priv = ctx_res->dev_priv;
struct vmw_dma_buffer *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdDXBindAllQuery body; SVGA3dCmdDXBindAllQuery body;
...@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, ...@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* command batch. * command batch.
*/ */
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *new_query_bo, struct vmw_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context) struct vmw_sw_context *sw_context)
{ {
struct vmw_res_cache_entry *ctx_entry = struct vmw_res_cache_entry *ctx_entry =
...@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo != sw_context->cur_query_bo) { if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) { if (dev_priv->pinned_bo) {
vmw_bo_pin_reserved(dev_priv->pinned_bo, false); vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
vmw_dmabuf_unreference(&dev_priv->pinned_bo); vmw_bo_unreference(&dev_priv->pinned_bo);
} }
if (!sw_context->needs_post_query_barrier) { if (!sw_context->needs_post_query_barrier) {
...@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
dev_priv->query_cid = sw_context->last_query_ctx->id; dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true; dev_priv->query_cid_valid = true;
dev_priv->pinned_bo = dev_priv->pinned_bo =
vmw_dmabuf_reference(sw_context->cur_query_bo); vmw_bo_reference(sw_context->cur_query_bo);
} }
} }
} }
...@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGAMobId *id, SVGAMobId *id,
struct vmw_dma_buffer **vmw_bo_p) struct vmw_buffer_object **vmw_bo_p)
{ {
struct vmw_dma_buffer *vmw_bo = NULL; struct vmw_buffer_object *vmw_bo = NULL;
uint32_t handle = *id; uint32_t handle = *id;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n"); DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL; ret = -EINVAL;
...@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
return 0; return 0;
out_no_reloc: out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL; *vmw_bo_p = NULL;
return ret; return ret;
} }
...@@ -1343,15 +1342,14 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1343,15 +1342,14 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr, SVGAGuestPtr *ptr,
struct vmw_dma_buffer **vmw_bo_p) struct vmw_buffer_object **vmw_bo_p)
{ {
struct vmw_dma_buffer *vmw_bo = NULL; struct vmw_buffer_object *vmw_bo = NULL;
uint32_t handle = ptr->gmrId; uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n"); DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL; ret = -EINVAL;
...@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return 0; return 0;
out_no_reloc: out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL; *vmw_bo_p = NULL;
return ret; return ret;
} }
...@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, ...@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
SVGA3dCmdDXBindQuery q; SVGA3dCmdDXBindQuery q;
} *cmd; } *cmd;
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
int ret; int ret;
...@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, ...@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
sw_context->dx_query_mob = vmw_bo; sw_context->dx_query_mob = vmw_bo;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ...@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd { struct vmw_query_cmd {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdEndGBQuery q; SVGA3dCmdEndGBQuery q;
...@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ...@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd { struct vmw_query_cmd {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdEndQuery q; SVGA3dCmdEndQuery q;
...@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, ...@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd { struct vmw_query_cmd {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery q; SVGA3dCmdWaitForGBQuery q;
...@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, ...@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return 0; return 0;
} }
...@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, ...@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd { struct vmw_query_cmd {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery q; SVGA3dCmdWaitForQuery q;
...@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, ...@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return 0; return 0;
} }
...@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_dma_buffer *vmw_bo = NULL; struct vmw_buffer_object *vmw_bo = NULL;
struct vmw_surface *srf = NULL; struct vmw_surface *srf = NULL;
struct vmw_dma_cmd { struct vmw_dma_cmd {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
...@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
header); header);
out_no_surface: out_no_surface:
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, ...@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
void *buf) void *buf)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
int ret; int ret;
struct { struct {
...@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, ...@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, ...@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id, uint32_t *buf_id,
unsigned long backup_offset) unsigned long backup_offset)
{ {
struct vmw_dma_buffer *dma_buf; struct vmw_buffer_object *dma_buf;
int ret; int ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
...@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, ...@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
if (val_node->first_usage) if (val_node->first_usage)
val_node->no_buffer_needed = true; val_node->no_buffer_needed = true;
vmw_dmabuf_unreference(&val_node->new_backup); vmw_bo_unreference(&val_node->new_backup);
val_node->new_backup = dma_buf; val_node->new_backup = dma_buf;
val_node->new_backup_offset = backup_offset; val_node->new_backup_offset = backup_offset;
...@@ -3701,8 +3699,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, ...@@ -3701,8 +3699,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
bool interruptible, bool interruptible,
bool validate_as_mob) bool validate_as_mob)
{ {
struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, struct vmw_buffer_object *vbo =
base); container_of(bo, struct vmw_buffer_object, base);
struct ttm_operation_ctx ctx = { interruptible, true }; struct ttm_operation_ctx ctx = { interruptible, true };
int ret; int ret;
...@@ -4423,7 +4421,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -4423,7 +4421,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo); ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&pinned_val.bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo); vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock: out_unlock:
return; return;
...@@ -4432,7 +4430,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -4432,7 +4430,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
out_no_reserve: out_no_reserve:
ttm_bo_unref(&query_val.bo); ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&pinned_val.bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo); vmw_bo_unreference(&dev_priv->pinned_bo);
} }
/** /**
......
...@@ -42,7 +42,7 @@ struct vmw_fb_par { ...@@ -42,7 +42,7 @@ struct vmw_fb_par {
void *vmalloc; void *vmalloc;
struct mutex bo_mutex; struct mutex bo_mutex;
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
unsigned bo_size; unsigned bo_size;
struct drm_framebuffer *set_fb; struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode; struct drm_display_mode *set_mode;
...@@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work) ...@@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
struct drm_clip_rect clip; struct drm_clip_rect clip;
struct drm_framebuffer *cur_fb; struct drm_framebuffer *cur_fb;
u8 *src_ptr, *dst_ptr; u8 *src_ptr, *dst_ptr;
struct vmw_dma_buffer *vbo = par->vmw_bo; struct vmw_buffer_object *vbo = par->vmw_bo;
void *virtual; void *virtual;
if (!READ_ONCE(par->dirty.active)) if (!READ_ONCE(par->dirty.active))
...@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work) ...@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
(void) ttm_read_lock(&vmw_priv->reservation_sem, false); (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
(void) ttm_bo_reserve(&vbo->base, false, false, NULL); (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
virtual = vmw_dma_buffer_map_and_cache(vbo); virtual = vmw_bo_map_and_cache(vbo);
if (!virtual) if (!virtual)
goto out_unreserve; goto out_unreserve;
...@@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) ...@@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
*/ */
static int vmw_fb_create_bo(struct vmw_private *vmw_priv, static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
size_t size, struct vmw_dma_buffer **out) size_t size, struct vmw_buffer_object **out)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
int ret; int ret;
(void) ttm_write_lock(&vmw_priv->reservation_sem, false); (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
...@@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, ...@@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
goto err_unlock; goto err_unlock;
} }
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, ret = vmw_bo_init(vmw_priv, vmw_bo, size,
&vmw_sys_placement, &vmw_sys_placement,
false, false,
&vmw_dmabuf_bo_free); &vmw_bo_bo_free);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */ goto err_unlock; /* init frees the buffer on failure */
...@@ -491,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, ...@@ -491,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
} }
if (par->vmw_bo && detach_bo && unref_bo) if (par->vmw_bo && detach_bo && unref_bo)
vmw_dmabuf_unreference(&par->vmw_bo); vmw_bo_unreference(&par->vmw_bo);
return 0; return 0;
} }
......
...@@ -175,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) ...@@ -175,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
struct vmw_private *dev_priv = fman->dev_priv; struct vmw_private *dev_priv = fman->dev_priv;
struct vmwgfx_wait_cb cb; struct vmwgfx_wait_cb cb;
long ret = timeout; long ret = timeout;
unsigned long irq_flags;
if (likely(vmw_fence_obj_signaled(fence))) if (likely(vmw_fence_obj_signaled(fence)))
return timeout; return timeout;
...@@ -183,7 +182,7 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) ...@@ -183,7 +182,7 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_seqno_waiter_add(dev_priv); vmw_seqno_waiter_add(dev_priv);
spin_lock_irqsave(f->lock, irq_flags); spin_lock(f->lock);
if (intr && signal_pending(current)) { if (intr && signal_pending(current)) {
ret = -ERESTARTSYS; ret = -ERESTARTSYS;
...@@ -194,30 +193,45 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) ...@@ -194,30 +193,45 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
cb.task = current; cb.task = current;
list_add(&cb.base.node, &f->cb_list); list_add(&cb.base.node, &f->cb_list);
while (ret > 0) { for (;;) {
__vmw_fences_update(fman); __vmw_fences_update(fman);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
break;
/*
* We can use the barrier free __set_current_state() since
* DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
* fence spinlock.
*/
if (intr) if (intr)
__set_current_state(TASK_INTERRUPTIBLE); __set_current_state(TASK_INTERRUPTIBLE);
else else
__set_current_state(TASK_UNINTERRUPTIBLE); __set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(f->lock, irq_flags);
ret = schedule_timeout(ret); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
if (ret == 0 && timeout > 0)
ret = 1;
break;
}
spin_lock_irqsave(f->lock, irq_flags); if (intr && signal_pending(current)) {
if (ret > 0 && intr && signal_pending(current))
ret = -ERESTARTSYS; ret = -ERESTARTSYS;
} break;
}
if (ret == 0)
break;
spin_unlock(f->lock);
ret = schedule_timeout(ret);
spin_lock(f->lock);
}
__set_current_state(TASK_RUNNING);
if (!list_empty(&cb.base.node)) if (!list_empty(&cb.base.node))
list_del(&cb.base.node); list_del(&cb.base.node);
__set_current_state(TASK_RUNNING);
out: out:
spin_unlock_irqrestore(f->lock, irq_flags); spin_unlock(f->lock);
vmw_seqno_waiter_remove(dev_priv); vmw_seqno_waiter_remove(dev_priv);
......
...@@ -377,8 +377,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, ...@@ -377,8 +377,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
} }
vfb = vmw_framebuffer_to_vfb(fb); vfb = vmw_framebuffer_to_vfb(fb);
if (!vfb->dmabuf) { if (!vfb->bo) {
DRM_ERROR("Framebuffer not dmabuf backed.\n"); DRM_ERROR("Framebuffer not buffer backed.\n");
ret = -EINVAL; ret = -EINVAL;
goto out_no_ttm_lock; goto out_no_ttm_lock;
} }
......
...@@ -85,10 +85,10 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv, ...@@ -85,10 +85,10 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0; return 0;
} }
static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf, struct vmw_buffer_object *bo,
u32 width, u32 height, u32 width, u32 height,
u32 hotspotX, u32 hotspotY) u32 hotspotX, u32 hotspotY)
{ {
struct ttm_bo_kmap_obj map; struct ttm_bo_kmap_obj map;
unsigned long kmap_offset; unsigned long kmap_offset;
...@@ -100,13 +100,13 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, ...@@ -100,13 +100,13 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
kmap_offset = 0; kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL); ret = ttm_bo_reserve(&bo->base, true, false, NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n"); DRM_ERROR("reserve failed\n");
return -EINVAL; return -EINVAL;
} }
ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_unreserve; goto err_unreserve;
...@@ -116,7 +116,7 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, ...@@ -116,7 +116,7 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
ttm_bo_kunmap(&map); ttm_bo_kunmap(&map);
err_unreserve: err_unreserve:
ttm_bo_unreserve(&dmabuf->base); ttm_bo_unreserve(&bo->base);
return ret; return ret;
} }
...@@ -352,13 +352,13 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, ...@@ -352,13 +352,13 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
if (vps->surf) if (vps->surf)
vmw_surface_unreference(&vps->surf); vmw_surface_unreference(&vps->surf);
if (vps->dmabuf) if (vps->bo)
vmw_dmabuf_unreference(&vps->dmabuf); vmw_bo_unreference(&vps->bo);
if (fb) { if (fb) {
if (vmw_framebuffer_to_vfb(fb)->dmabuf) { if (vmw_framebuffer_to_vfb(fb)->bo) {
vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer; vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
vmw_dmabuf_reference(vps->dmabuf); vmw_bo_reference(vps->bo);
} else { } else {
vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
vmw_surface_reference(vps->surf); vmw_surface_reference(vps->surf);
...@@ -390,7 +390,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, ...@@ -390,7 +390,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
} }
du->cursor_surface = vps->surf; du->cursor_surface = vps->surf;
du->cursor_dmabuf = vps->dmabuf; du->cursor_bo = vps->bo;
if (vps->surf) { if (vps->surf) {
du->cursor_age = du->cursor_surface->snooper.age; du->cursor_age = du->cursor_surface->snooper.age;
...@@ -399,11 +399,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, ...@@ -399,11 +399,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vps->surf->snooper.image, vps->surf->snooper.image,
64, 64, hotspot_x, 64, 64, hotspot_x,
hotspot_y); hotspot_y);
} else if (vps->dmabuf) { } else if (vps->bo) {
ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf, ret = vmw_cursor_update_bo(dev_priv, vps->bo,
plane->state->crtc_w, plane->state->crtc_w,
plane->state->crtc_h, plane->state->crtc_h,
hotspot_x, hotspot_y); hotspot_x, hotspot_y);
} else { } else {
vmw_cursor_update_position(dev_priv, false, 0, 0); vmw_cursor_update_position(dev_priv, false, 0, 0);
return; return;
...@@ -519,7 +519,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, ...@@ -519,7 +519,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
ret = -EINVAL; ret = -EINVAL;
} }
if (!vmw_framebuffer_to_vfb(fb)->dmabuf) if (!vmw_framebuffer_to_vfb(fb)->bo)
surface = vmw_framebuffer_to_vfbs(fb)->surface; surface = vmw_framebuffer_to_vfbs(fb)->surface;
if (surface && !surface->snooper.image) { if (surface && !surface->snooper.image) {
...@@ -687,8 +687,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) ...@@ -687,8 +687,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
if (vps->surf) if (vps->surf)
(void) vmw_surface_reference(vps->surf); (void) vmw_surface_reference(vps->surf);
if (vps->dmabuf) if (vps->bo)
(void) vmw_dmabuf_reference(vps->dmabuf); (void) vmw_bo_reference(vps->bo);
state = &vps->base; state = &vps->base;
...@@ -745,8 +745,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane, ...@@ -745,8 +745,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
if (vps->surf) if (vps->surf)
vmw_surface_unreference(&vps->surf); vmw_surface_unreference(&vps->surf);
if (vps->dmabuf) if (vps->bo)
vmw_dmabuf_unreference(&vps->dmabuf); vmw_bo_unreference(&vps->bo);
drm_atomic_helper_plane_destroy_state(plane, state); drm_atomic_helper_plane_destroy_state(plane, state);
} }
...@@ -902,12 +902,12 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, ...@@ -902,12 +902,12 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
/** /**
* vmw_kms_readback - Perform a readback from the screen system to * vmw_kms_readback - Perform a readback from the screen system to
* a dma-buffer backed framebuffer. * a buffer-object backed framebuffer.
* *
* @dev_priv: Pointer to the device private structure. * @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller. * @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL. * Must be set to NULL if @user_fence_rep is NULL.
* @vfb: Pointer to the dma-buffer backed framebuffer. * @vfb: Pointer to the buffer-object backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information. * @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL. * Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects. * @vclips: Array of clip rects.
...@@ -951,7 +951,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, ...@@ -951,7 +951,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_framebuffer **out, struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2 const struct drm_mode_fb_cmd2
*mode_cmd, *mode_cmd,
bool is_dmabuf_proxy) bool is_bo_proxy)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
...@@ -1019,7 +1019,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, ...@@ -1019,7 +1019,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface); vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handles[0]; vfbs->base.user_handle = mode_cmd->handles[0];
vfbs->is_dmabuf_proxy = is_dmabuf_proxy; vfbs->is_bo_proxy = is_bo_proxy;
*out = &vfbs->base; *out = &vfbs->base;
...@@ -1038,30 +1038,30 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, ...@@ -1038,30 +1038,30 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
} }
/* /*
* Dmabuf framebuffer code * Buffer-object framebuffer code
*/ */
static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
{ {
struct vmw_framebuffer_dmabuf *vfbd = struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer); vmw_framebuffer_to_vfbd(framebuffer);
drm_framebuffer_cleanup(framebuffer); drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer); vmw_bo_unreference(&vfbd->buffer);
if (vfbd->base.user_obj) if (vfbd->base.user_obj)
ttm_base_object_unref(&vfbd->base.user_obj); ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd); kfree(vfbd);
} }
static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv, struct drm_file *file_priv,
unsigned flags, unsigned color, unsigned int flags, unsigned int color,
struct drm_clip_rect *clips, struct drm_clip_rect *clips,
unsigned num_clips) unsigned int num_clips)
{ {
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_framebuffer_dmabuf *vfbd = struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer); vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect; struct drm_clip_rect norect;
int ret, increment = 1; int ret, increment = 1;
...@@ -1092,13 +1092,13 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, ...@@ -1092,13 +1092,13 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
true, true, NULL); true, true, NULL);
break; break;
case vmw_du_screen_object: case vmw_du_screen_object:
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base, ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
clips, NULL, num_clips, clips, NULL, num_clips,
increment, true, NULL, NULL); increment, true, NULL, NULL);
break; break;
case vmw_du_legacy: case vmw_du_legacy:
ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0, ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
clips, num_clips, increment); clips, num_clips, increment);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
...@@ -1114,23 +1114,23 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, ...@@ -1114,23 +1114,23 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
return ret; return ret;
} }
static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.destroy = vmw_framebuffer_dmabuf_destroy, .destroy = vmw_framebuffer_bo_destroy,
.dirty = vmw_framebuffer_dmabuf_dirty, .dirty = vmw_framebuffer_bo_dirty,
}; };
/** /**
* Pin the dmabuffer in a location suitable for access by the * Pin the bofer in a location suitable for access by the
* display system. * display system.
*/ */
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{ {
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_dma_buffer *buf; struct vmw_buffer_object *buf;
struct ttm_placement *placement; struct ttm_placement *placement;
int ret; int ret;
buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
if (!buf) if (!buf)
...@@ -1139,12 +1139,12 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) ...@@ -1139,12 +1139,12 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
switch (dev_priv->active_display_unit) { switch (dev_priv->active_display_unit) {
case vmw_du_legacy: case vmw_du_legacy:
vmw_overlay_pause_all(dev_priv); vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false); ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
vmw_overlay_resume_all(dev_priv); vmw_overlay_resume_all(dev_priv);
break; break;
case vmw_du_screen_object: case vmw_du_screen_object:
case vmw_du_screen_target: case vmw_du_screen_target:
if (vfb->dmabuf) { if (vfb->bo) {
if (dev_priv->capabilities & SVGA_CAP_3D) { if (dev_priv->capabilities & SVGA_CAP_3D) {
/* /*
* Use surface DMA to get content to * Use surface DMA to get content to
...@@ -1160,8 +1160,7 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) ...@@ -1160,8 +1160,7 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
placement = &vmw_mob_placement; placement = &vmw_mob_placement;
} }
return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement, return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
false);
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -1172,36 +1171,36 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) ...@@ -1172,36 +1171,36 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
{ {
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_dma_buffer *buf; struct vmw_buffer_object *buf;
buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
if (WARN_ON(!buf)) if (WARN_ON(!buf))
return 0; return 0;
return vmw_dmabuf_unpin(dev_priv, buf, false); return vmw_bo_unpin(dev_priv, buf, false);
} }
/** /**
* vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf * vmw_create_bo_proxy - create a proxy surface for the buffer object
* *
* @dev: DRM device * @dev: DRM device
* @mode_cmd: parameters for the new surface * @mode_cmd: parameters for the new surface
* @dmabuf_mob: MOB backing the DMA buf * @bo_mob: MOB backing the buffer object
* @srf_out: newly created surface * @srf_out: newly created surface
* *
* When the content FB is a DMA buf, we create a surface as a proxy to the * When the content FB is a buffer object, we create a surface as a proxy to the
* same buffer. This way we can do a surface copy rather than a surface DMA. * same buffer. This way we can do a surface copy rather than a surface DMA.
* This is a more efficient approach * This is a more efficient approach
* *
* RETURNS: * RETURNS:
* 0 on success, error code otherwise * 0 on success, error code otherwise
*/ */
static int vmw_create_dmabuf_proxy(struct drm_device *dev, static int vmw_create_bo_proxy(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, const struct drm_mode_fb_cmd2 *mode_cmd,
struct vmw_dma_buffer *dmabuf_mob, struct vmw_buffer_object *bo_mob,
struct vmw_surface **srf_out) struct vmw_surface **srf_out)
{ {
uint32_t format; uint32_t format;
struct drm_vmw_size content_base_size = {0}; struct drm_vmw_size content_base_size = {0};
...@@ -1258,8 +1257,8 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, ...@@ -1258,8 +1257,8 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
/* Reserve and switch the backing mob. */ /* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex); mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true); (void) vmw_resource_reserve(res, false, true);
vmw_dmabuf_unreference(&res->backup); vmw_bo_unreference(&res->backup);
res->backup = vmw_dmabuf_reference(dmabuf_mob); res->backup = vmw_bo_reference(bo_mob);
res->backup_offset = 0; res->backup_offset = 0;
vmw_resource_unreserve(res, false, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex); mutex_unlock(&res->dev_priv->cmdbuf_mutex);
...@@ -1269,21 +1268,21 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, ...@@ -1269,21 +1268,21 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf, struct vmw_buffer_object *bo,
struct vmw_framebuffer **out, struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2 const struct drm_mode_fb_cmd2
*mode_cmd) *mode_cmd)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_dmabuf *vfbd; struct vmw_framebuffer_bo *vfbd;
unsigned int requested_size; unsigned int requested_size;
struct drm_format_name_buf format_name; struct drm_format_name_buf format_name;
int ret; int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0]; requested_size = mode_cmd->height * mode_cmd->pitches[0];
if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small " DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n"); "for requested mode.\n");
return -EINVAL; return -EINVAL;
...@@ -1312,20 +1311,20 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, ...@@ -1312,20 +1311,20 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
} }
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.dmabuf = true; vfbd->base.bo = true;
vfbd->buffer = vmw_dmabuf_reference(dmabuf); vfbd->buffer = vmw_bo_reference(bo);
vfbd->base.user_handle = mode_cmd->handles[0]; vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base; *out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base, ret = drm_framebuffer_init(dev, &vfbd->base.base,
&vmw_framebuffer_dmabuf_funcs); &vmw_framebuffer_bo_funcs);
if (ret) if (ret)
goto out_err2; goto out_err2;
return 0; return 0;
out_err2: out_err2:
vmw_dmabuf_unreference(&dmabuf); vmw_bo_unreference(&bo);
kfree(vfbd); kfree(vfbd);
out_err1: out_err1:
return ret; return ret;
...@@ -1354,57 +1353,57 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) ...@@ -1354,57 +1353,57 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* vmw_kms_new_framebuffer - Create a new framebuffer. * vmw_kms_new_framebuffer - Create a new framebuffer.
* *
* @dev_priv: Pointer to device private struct. * @dev_priv: Pointer to device private struct.
* @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around. * @bo: Pointer to buffer object to wrap the kms framebuffer around.
* Either @dmabuf or @surface must be NULL. * Either @bo or @surface must be NULL.
* @surface: Pointer to a surface to wrap the kms framebuffer around. * @surface: Pointer to a surface to wrap the kms framebuffer around.
* Either @dmabuf or @surface must be NULL. * Either @bo or @surface must be NULL.
* @only_2d: No presents will occur to this dma buffer based framebuffer. This * @only_2d: No presents will occur to this buffer object based framebuffer.
* Helps the code to do some important optimizations. * This helps the code to do some important optimizations.
* @mode_cmd: Frame-buffer metadata. * @mode_cmd: Frame-buffer metadata.
*/ */
struct vmw_framebuffer * struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv, vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf, struct vmw_buffer_object *bo,
struct vmw_surface *surface, struct vmw_surface *surface,
bool only_2d, bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd) const struct drm_mode_fb_cmd2 *mode_cmd)
{ {
struct vmw_framebuffer *vfb = NULL; struct vmw_framebuffer *vfb = NULL;
bool is_dmabuf_proxy = false; bool is_bo_proxy = false;
int ret; int ret;
/* /*
* We cannot use the SurfaceDMA command in an non-accelerated VM, * We cannot use the SurfaceDMA command in an non-accelerated VM,
* therefore, wrap the DMA buf in a surface so we can use the * therefore, wrap the buffer object in a surface so we can use the
* SurfaceCopy command. * SurfaceCopy command.
*/ */
if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
dmabuf && only_2d && bo && only_2d &&
mode_cmd->width > 64 && /* Don't create a proxy for cursor */ mode_cmd->width > 64 && /* Don't create a proxy for cursor */
dev_priv->active_display_unit == vmw_du_screen_target) { dev_priv->active_display_unit == vmw_du_screen_target) {
ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
dmabuf, &surface); bo, &surface);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
is_dmabuf_proxy = true; is_bo_proxy = true;
} }
/* Create the new framebuffer depending one what we have */ /* Create the new framebuffer depending one what we have */
if (surface) { if (surface) {
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd, mode_cmd,
is_dmabuf_proxy); is_bo_proxy);
/* /*
* vmw_create_dmabuf_proxy() adds a reference that is no longer * vmw_create_bo_proxy() adds a reference that is no longer
* needed * needed
*/ */
if (is_dmabuf_proxy) if (is_bo_proxy)
vmw_surface_unreference(&surface); vmw_surface_unreference(&surface);
} else if (dmabuf) { } else if (bo) {
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb, ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
mode_cmd); mode_cmd);
} else { } else {
BUG(); BUG();
} }
...@@ -1430,23 +1429,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ...@@ -1430,23 +1429,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL; struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL; struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL; struct vmw_buffer_object *bo = NULL;
struct ttm_base_object *user_obj; struct ttm_base_object *user_obj;
int ret; int ret;
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
* requested framebuffer.
*/
if (!vmw_kms_validate_mode_vram(dev_priv,
mode_cmd->pitches[0],
mode_cmd->height)) {
DRM_ERROR("Requested mode exceed bounding box limit.\n");
return ERR_PTR(-ENOMEM);
}
/* /*
* Take a reference on the user object of the resource * Take a reference on the user object of the resource
* backing the kms fb. This ensures that user-space handle * backing the kms fb. This ensures that user-space handle
...@@ -1466,7 +1452,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ...@@ -1466,7 +1452,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* End conditioned code. * End conditioned code.
*/ */
/* returns either a dmabuf or surface */ /* returns either a bo or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile, ret = vmw_user_lookup_handle(dev_priv, tfile,
mode_cmd->handles[0], mode_cmd->handles[0],
&surface, &bo); &surface, &bo);
...@@ -1494,7 +1480,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ...@@ -1494,7 +1480,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out: err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */ /* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo) if (bo)
vmw_dmabuf_unreference(&bo); vmw_bo_unreference(&bo);
if (surface) if (surface)
vmw_surface_unreference(&surface); vmw_surface_unreference(&surface);
...@@ -1508,7 +1494,168 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ...@@ -1508,7 +1494,168 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
return &vfb->base; return &vfb->base;
} }
/**
* vmw_kms_check_display_memory - Validates display memory required for a
* topology
* @dev: DRM device
* @num_rects: number of drm_rect in rects
* @rects: array of drm_rect representing the topology to validate indexed by
* crtc index.
*
* Returns:
* 0 on success otherwise negative error code
*/
static int vmw_kms_check_display_memory(struct drm_device *dev,
uint32_t num_rects,
struct drm_rect *rects)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_rect bounding_box = {0};
u64 total_pixels = 0, pixel_mem, bb_mem;
int i;
for (i = 0; i < num_rects; i++) {
/*
* Currently this check is limiting the topology within max
* texture/screentarget size. This should change in future when
* user-space support multiple fb with topology.
*/
if (rects[i].x1 < 0 || rects[i].y1 < 0 ||
rects[i].x2 > mode_config->max_width ||
rects[i].y2 > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
return -EINVAL;
}
/* Bounding box upper left is at (0,0). */
if (rects[i].x2 > bounding_box.x2)
bounding_box.x2 = rects[i].x2;
if (rects[i].y2 > bounding_box.y2)
bounding_box.y2 = rects[i].y2;
total_pixels += (u64) drm_rect_width(&rects[i]) *
(u64) drm_rect_height(&rects[i]);
}
/* Virtual svga device primary limits are always in 32-bpp. */
pixel_mem = total_pixels * 4;
/*
* For HV10 and below prim_bb_mem is vram size. When
* SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
* limit on primary bounding box
*/
if (pixel_mem > dev_priv->prim_bb_mem) {
DRM_ERROR("Combined output size too large.\n");
return -EINVAL;
}
/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
if (dev_priv->active_display_unit != vmw_du_screen_target ||
!(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
if (bb_mem > dev_priv->prim_bb_mem) {
DRM_ERROR("Topology is beyond supported limits.\n");
return -EINVAL;
}
}
return 0;
}
/**
* vmw_kms_check_topology - Validates topology in drm_atomic_state
* @dev: DRM device
* @state: the driver state object
*
* Returns:
* 0 on success otherwise negative error code
*/
static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_rect *rects;
struct drm_crtc *crtc;
uint32_t i;
int ret = 0;
rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
GFP_KERNEL);
if (!rects)
return -ENOMEM;
mutex_lock(&dev_priv->requested_layout_mutex);
drm_for_each_crtc(crtc, dev) {
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
i = drm_crtc_index(crtc);
if (crtc_state && crtc_state->enable) {
rects[i].x1 = du->gui_x;
rects[i].y1 = du->gui_y;
rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
}
}
/* Determine change to topology due to new atomic state */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
if (!new_crtc_state->enable && old_crtc_state->enable) {
rects[i].x1 = 0;
rects[i].y1 = 0;
rects[i].x2 = 0;
rects[i].y2 = 0;
continue;
}
if (!du->pref_active) {
ret = -EINVAL;
goto clean;
}
/*
* For vmwgfx each crtc has only one connector attached and it
* is not changed so don't really need to check the
* crtc->connector_mask and iterate over it.
*/
connector = &du->connector;
conn_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(conn_state)) {
ret = PTR_ERR(conn_state);
goto clean;
}
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
vmw_conn_state->gui_x = du->gui_x;
vmw_conn_state->gui_y = du->gui_y;
rects[i].x1 = du->gui_x;
rects[i].y1 = du->gui_y;
rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay;
rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay;
}
ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
rects);
clean:
mutex_unlock(&dev_priv->requested_layout_mutex);
kfree(rects);
return ret;
}
/** /**
* vmw_kms_atomic_check_modeset- validate state object for modeset changes * vmw_kms_atomic_check_modeset- validate state object for modeset changes
...@@ -1520,40 +1667,39 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ...@@ -1520,40 +1667,39 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* us to assign a value to mode->crtc_clock so that * us to assign a value to mode->crtc_clock so that
* drm_calc_timestamping_constants() won't throw an error message * drm_calc_timestamping_constants() won't throw an error message
* *
* RETURNS * Returns:
* Zero for success or -errno * Zero for success or -errno
*/ */
static int static int
vmw_kms_atomic_check_modeset(struct drm_device *dev, vmw_kms_atomic_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct vmw_private *dev_priv = vmw_priv(dev); struct drm_crtc_state *crtc_state;
int i; bool need_modeset = false;
int i, ret;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
unsigned long requested_bb_mem = 0;
if (dev_priv->active_display_unit == vmw_du_screen_target) {
struct drm_plane *plane = crtc->primary;
struct drm_plane_state *plane_state;
plane_state = drm_atomic_get_new_plane_state(state, plane);
if (plane_state && plane_state->fb) { ret = drm_atomic_helper_check(dev, state);
int cpp = plane_state->fb->format->cpp[0]; if (ret)
return ret;
requested_bb_mem += crtc->mode.hdisplay * cpp * if (!state->allow_modeset)
crtc->mode.vdisplay; return ret;
}
if (requested_bb_mem > dev_priv->prim_bb_mem) /*
return -EINVAL; * Legacy path do not set allow_modeset properly like
} * @drm_atomic_helper_update_plane, This will result in unnecessary call
* to vmw_kms_check_topology. So extra set of check.
*/
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
need_modeset = true;
} }
return drm_atomic_helper_check(dev, state); if (need_modeset)
return vmw_kms_check_topology(dev, state);
return ret;
} }
static const struct drm_mode_config_funcs vmw_kms_funcs = { static const struct drm_mode_config_funcs vmw_kms_funcs = {
...@@ -1845,40 +1991,49 @@ void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe) ...@@ -1845,40 +1991,49 @@ void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
{ {
} }
/**
/* * vmw_du_update_layout - Update the display unit with topology from resolution
* Small shared kms functions. * plugin and generate DRM uevent
* @dev_priv: device private
* @num_rects: number of drm_rect in rects
* @rects: toplogy to update
*/ */
static int vmw_du_update_layout(struct vmw_private *dev_priv,
static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, unsigned int num_rects, struct drm_rect *rects)
struct drm_vmw_rect *rects)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du; struct vmw_display_unit *du;
struct drm_connector *con; struct drm_connector *con;
struct drm_connector_list_iter conn_iter;
mutex_lock(&dev->mode_config.mutex); /*
* Currently only gui_x/y is protected with requested_layout_mutex.
#if 0 */
{ mutex_lock(&dev_priv->requested_layout_mutex);
unsigned int i; drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(con, &conn_iter) {
DRM_INFO("%s: new layout ", __func__); du = vmw_connector_to_du(con);
for (i = 0; i < num; i++) if (num_rects > du->unit) {
DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, du->pref_width = drm_rect_width(&rects[du->unit]);
rects[i].w, rects[i].h); du->pref_height = drm_rect_height(&rects[du->unit]);
DRM_INFO("\n"); du->pref_active = true;
du->gui_x = rects[du->unit].x1;
du->gui_y = rects[du->unit].y1;
} else {
du->pref_width = 800;
du->pref_height = 600;
du->pref_active = false;
du->gui_x = 0;
du->gui_y = 0;
}
} }
#endif drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev_priv->requested_layout_mutex);
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(con, &dev->mode_config.connector_list, head) { list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con); du = vmw_connector_to_du(con);
if (num > du->unit) { if (num_rects > du->unit) {
du->pref_width = rects[du->unit].w;
du->pref_height = rects[du->unit].h;
du->pref_active = true;
du->gui_x = rects[du->unit].x;
du->gui_y = rects[du->unit].y;
drm_object_property_set_value drm_object_property_set_value
(&con->base, dev->mode_config.suggested_x_property, (&con->base, dev->mode_config.suggested_x_property,
du->gui_x); du->gui_x);
...@@ -1886,9 +2041,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, ...@@ -1886,9 +2041,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
(&con->base, dev->mode_config.suggested_y_property, (&con->base, dev->mode_config.suggested_y_property,
du->gui_y); du->gui_y);
} else { } else {
du->pref_width = 800;
du->pref_height = 600;
du->pref_active = false;
drm_object_property_set_value drm_object_property_set_value
(&con->base, dev->mode_config.suggested_x_property, (&con->base, dev->mode_config.suggested_x_property,
0); 0);
...@@ -1898,8 +2050,8 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, ...@@ -1898,8 +2050,8 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
} }
con->status = vmw_du_connector_detect(con, true); con->status = vmw_du_connector_detect(con, true);
} }
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
drm_sysfs_hotplug_event(dev); drm_sysfs_hotplug_event(dev);
return 0; return 0;
...@@ -2199,7 +2351,25 @@ vmw_du_connector_atomic_get_property(struct drm_connector *connector, ...@@ -2199,7 +2351,25 @@ vmw_du_connector_atomic_get_property(struct drm_connector *connector,
return 0; return 0;
} }
/**
* vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Update preferred topology of display unit as per ioctl request. The topology
* is expressed as array of drm_vmw_rect.
* e.g.
* [0 0 640 480] [640 0 800 600] [0 480 640 480]
*
* NOTE:
* The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
* device limit on topology, x + w and y + h (lower right) cannot be greater
* than INT_MAX. So topology beyond these limits will return with error.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
...@@ -2208,15 +2378,12 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ...@@ -2208,15 +2378,12 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_update_layout_arg *)data; (struct drm_vmw_update_layout_arg *)data;
void __user *user_rects; void __user *user_rects;
struct drm_vmw_rect *rects; struct drm_vmw_rect *rects;
struct drm_rect *drm_rects;
unsigned rects_size; unsigned rects_size;
int ret; int ret, i;
int i;
u64 total_pixels = 0;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_rect bounding_box = {0};
if (!arg->num_outputs) { if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600}; struct drm_rect def_rect = {0, 0, 800, 600};
vmw_du_update_layout(dev_priv, 1, &def_rect); vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0; return 0;
} }
...@@ -2235,52 +2402,29 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ...@@ -2235,52 +2402,29 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
goto out_free; goto out_free;
} }
for (i = 0; i < arg->num_outputs; ++i) { drm_rects = (struct drm_rect *)rects;
if (rects[i].x < 0 ||
rects[i].y < 0 ||
rects[i].x + rects[i].w > mode_config->max_width ||
rects[i].y + rects[i].h > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
}
/*
* bounding_box.w and bunding_box.h are used as
* lower-right coordinates
*/
if (rects[i].x + rects[i].w > bounding_box.w)
bounding_box.w = rects[i].x + rects[i].w;
if (rects[i].y + rects[i].h > bounding_box.h)
bounding_box.h = rects[i].y + rects[i].h;
total_pixels += (u64) rects[i].w * (u64) rects[i].h; for (i = 0; i < arg->num_outputs; i++) {
} struct drm_vmw_rect curr_rect;
if (dev_priv->active_display_unit == vmw_du_screen_target) { /* Verify user-space for overflow as kernel use drm_rect */
/* if ((rects[i].x + rects[i].w > INT_MAX) ||
* For Screen Targets, the limits for a toplogy are: (rects[i].y + rects[i].h > INT_MAX)) {
* 1. Bounding box (assuming 32bpp) must be < prim_bb_mem ret = -ERANGE;
* 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
*/
u64 bb_mem = (u64) bounding_box.w * bounding_box.h * 4;
u64 pixel_mem = total_pixels * 4;
if (bb_mem > dev_priv->prim_bb_mem) {
DRM_ERROR("Topology is beyond supported limits.\n");
ret = -EINVAL;
goto out_free; goto out_free;
} }
if (pixel_mem > dev_priv->prim_bb_mem) { curr_rect = rects[i];
DRM_ERROR("Combined output size too large\n"); drm_rects[i].x1 = curr_rect.x;
ret = -EINVAL; drm_rects[i].y1 = curr_rect.y;
goto out_free; drm_rects[i].x2 = curr_rect.x + curr_rect.w;
} drm_rects[i].y2 = curr_rect.y + curr_rect.h;
} }
vmw_du_update_layout(dev_priv, arg->num_outputs, rects); ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
if (ret == 0)
vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
out_free: out_free:
kfree(rects); kfree(rects);
...@@ -2427,7 +2571,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, ...@@ -2427,7 +2571,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
* interrupted by a signal. * interrupted by a signal.
*/ */
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
bool interruptible, bool interruptible,
bool validate_as_mob, bool validate_as_mob,
bool for_cpu_blit) bool for_cpu_blit)
...@@ -2459,7 +2603,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, ...@@ -2459,7 +2603,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
* Helper to be used if an error forces the caller to undo the actions of * Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_buffer_prepare. * vmw_kms_helper_buffer_prepare.
*/ */
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf) void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
{ {
if (buf) if (buf)
ttm_bo_unreserve(&buf->base); ttm_bo_unreserve(&buf->base);
...@@ -2482,7 +2626,7 @@ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf) ...@@ -2482,7 +2626,7 @@ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
*/ */
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv, struct drm_file *file_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user * struct drm_vmw_fence_rep __user *
user_fence_rep) user_fence_rep)
...@@ -2494,7 +2638,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, ...@@ -2494,7 +2638,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
file_priv ? &handle : NULL); file_priv ? &handle : NULL);
if (buf) if (buf)
vmw_fence_single_bo(&buf->base, fence); vmw_bo_fence_single(&buf->base, fence);
if (file_priv) if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence, ret, user_fence_rep, fence,
...@@ -2522,7 +2666,7 @@ void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx) ...@@ -2522,7 +2666,7 @@ void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
struct vmw_resource *res = ctx->res; struct vmw_resource *res = ctx->res;
vmw_kms_helper_buffer_revert(ctx->buf); vmw_kms_helper_buffer_revert(ctx->buf);
vmw_dmabuf_unreference(&ctx->buf); vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex); mutex_unlock(&res->dev_priv->cmdbuf_mutex);
} }
...@@ -2567,7 +2711,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, ...@@ -2567,7 +2711,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
if (ret) if (ret)
goto out_unreserve; goto out_unreserve;
ctx->buf = vmw_dmabuf_reference(res->backup); ctx->buf = vmw_bo_reference(res->backup);
} }
ret = vmw_resource_validate(res); ret = vmw_resource_validate(res);
if (ret) if (ret)
...@@ -2600,7 +2744,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, ...@@ -2600,7 +2744,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL); out_fence, NULL);
vmw_dmabuf_unreference(&ctx->buf); vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex); mutex_unlock(&res->dev_priv->cmdbuf_mutex);
} }
......
...@@ -90,7 +90,7 @@ struct vmw_kms_dirty { ...@@ -90,7 +90,7 @@ struct vmw_kms_dirty {
#define vmw_framebuffer_to_vfbs(x) \ #define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base) container_of(x, struct vmw_framebuffer_surface, base.base)
#define vmw_framebuffer_to_vfbd(x) \ #define vmw_framebuffer_to_vfbd(x) \
container_of(x, struct vmw_framebuffer_dmabuf, base.base) container_of(x, struct vmw_framebuffer_bo, base.base)
/** /**
* Base class for framebuffers * Base class for framebuffers
...@@ -102,7 +102,7 @@ struct vmw_framebuffer { ...@@ -102,7 +102,7 @@ struct vmw_framebuffer {
struct drm_framebuffer base; struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb); int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb); int (*unpin)(struct vmw_framebuffer *fb);
bool dmabuf; bool bo;
struct ttm_base_object *user_obj; struct ttm_base_object *user_obj;
uint32_t user_handle; uint32_t user_handle;
}; };
...@@ -117,15 +117,15 @@ struct vmw_clip_rect { ...@@ -117,15 +117,15 @@ struct vmw_clip_rect {
struct vmw_framebuffer_surface { struct vmw_framebuffer_surface {
struct vmw_framebuffer base; struct vmw_framebuffer base;
struct vmw_surface *surface; struct vmw_surface *surface;
struct vmw_dma_buffer *buffer; struct vmw_buffer_object *buffer;
struct list_head head; struct list_head head;
bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */ bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
}; };
struct vmw_framebuffer_dmabuf { struct vmw_framebuffer_bo {
struct vmw_framebuffer base; struct vmw_framebuffer base;
struct vmw_dma_buffer *buffer; struct vmw_buffer_object *buffer;
}; };
...@@ -161,18 +161,18 @@ struct vmw_crtc_state { ...@@ -161,18 +161,18 @@ struct vmw_crtc_state {
* *
* @base DRM plane object * @base DRM plane object
* @surf Display surface for STDU * @surf Display surface for STDU
* @dmabuf display dmabuf for SOU * @bo display bo for SOU
* @content_fb_type Used by STDU. * @content_fb_type Used by STDU.
* @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit * @bo_size Size of the bo, used by Screen Object Display Unit
* @pinned pin count for STDU display surface * @pinned pin count for STDU display surface
*/ */
struct vmw_plane_state { struct vmw_plane_state {
struct drm_plane_state base; struct drm_plane_state base;
struct vmw_surface *surf; struct vmw_surface *surf;
struct vmw_dma_buffer *dmabuf; struct vmw_buffer_object *bo;
int content_fb_type; int content_fb_type;
unsigned long dmabuf_size; unsigned long bo_size;
int pinned; int pinned;
...@@ -192,6 +192,24 @@ struct vmw_connector_state { ...@@ -192,6 +192,24 @@ struct vmw_connector_state {
struct drm_connector_state base; struct drm_connector_state base;
bool is_implicit; bool is_implicit;
/**
* @gui_x:
*
* vmwgfx connector property representing the x position of this display
* unit (connector is synonymous to display unit) in overall topology.
* This is what the device expect as xRoot while creating screen.
*/
int gui_x;
/**
* @gui_y:
*
* vmwgfx connector property representing the y position of this display
* unit (connector is synonymous to display unit) in overall topology.
* This is what the device expect as yRoot while creating screen.
*/
int gui_y;
}; };
/** /**
...@@ -209,7 +227,7 @@ struct vmw_display_unit { ...@@ -209,7 +227,7 @@ struct vmw_display_unit {
struct drm_plane cursor; struct drm_plane cursor;
struct vmw_surface *cursor_surface; struct vmw_surface *cursor_surface;
struct vmw_dma_buffer *cursor_dmabuf; struct vmw_buffer_object *cursor_bo;
size_t cursor_age; size_t cursor_age;
int cursor_x; int cursor_x;
...@@ -243,7 +261,7 @@ struct vmw_display_unit { ...@@ -243,7 +261,7 @@ struct vmw_display_unit {
struct vmw_validation_ctx { struct vmw_validation_ctx {
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_dma_buffer *buf; struct vmw_buffer_object *buf;
}; };
#define vmw_crtc_to_du(x) \ #define vmw_crtc_to_du(x) \
...@@ -291,14 +309,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, ...@@ -291,14 +309,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_kms_dirty *dirty); struct vmw_kms_dirty *dirty);
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
bool interruptible, bool interruptible,
bool validate_as_mob, bool validate_as_mob,
bool for_cpu_blit); bool for_cpu_blit);
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf); void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv, struct drm_file *file_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user * struct drm_vmw_fence_rep __user *
user_fence_rep); user_fence_rep);
...@@ -316,7 +334,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv, ...@@ -316,7 +334,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips); uint32_t num_clips);
struct vmw_framebuffer * struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv, vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf, struct vmw_buffer_object *bo,
struct vmw_surface *surface, struct vmw_surface *surface,
bool only_2d, bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd); const struct drm_mode_fb_cmd2 *mode_cmd);
...@@ -384,11 +402,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector, ...@@ -384,11 +402,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector,
*/ */
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv); int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv); int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer, struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color, unsigned int flags, unsigned int color,
struct drm_clip_rect *clips, struct drm_clip_rect *clips,
unsigned num_clips, int increment); unsigned int num_clips, int increment);
int vmw_kms_update_proxy(struct vmw_resource *res, int vmw_kms_update_proxy(struct vmw_resource *res,
const struct drm_clip_rect *clips, const struct drm_clip_rect *clips,
unsigned num_clips, unsigned num_clips,
...@@ -408,14 +426,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -408,14 +426,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
unsigned num_clips, int inc, unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc); struct drm_crtc *crtc);
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer, struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips, struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips, struct drm_vmw_rect *vclips,
unsigned num_clips, int increment, unsigned int num_clips, int increment,
bool interruptible, bool interruptible,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc); struct drm_crtc *crtc);
int vmw_kms_sou_readback(struct vmw_private *dev_priv, int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv, struct drm_file *file_priv,
struct vmw_framebuffer *vfb, struct vmw_framebuffer *vfb,
......
...@@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv) ...@@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
} }
int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer, struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color, unsigned int flags, unsigned int color,
struct drm_clip_rect *clips, struct drm_clip_rect *clips,
unsigned num_clips, int increment) unsigned int num_clips, int increment)
{ {
size_t fifo_size; size_t fifo_size;
int i; int i;
......
...@@ -225,7 +225,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, ...@@ -225,7 +225,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL); ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
vmw_fence_single_bo(bo, NULL); vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
} }
...@@ -362,7 +362,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ...@@ -362,7 +362,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL); ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
vmw_fence_single_bo(bo, NULL); vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
ttm_bo_unref(&batch->otable_bo); ttm_bo_unref(&batch->otable_bo);
...@@ -620,7 +620,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, ...@@ -620,7 +620,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, sizeof(*cmd)); vmw_fifo_commit(dev_priv, sizeof(*cmd));
} }
if (bo) { if (bo) {
vmw_fence_single_bo(bo, NULL); vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
} }
vmw_fifo_resource_dec(dev_priv); vmw_fifo_resource_dec(dev_priv);
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/frame.h> #include <linux/frame.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_msg.h" #include "vmwgfx_msg.h"
...@@ -234,7 +235,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, ...@@ -234,7 +235,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
(HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
DRM_ERROR("Failed to get reply size\n"); DRM_ERROR("Failed to get reply size for host message.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -245,7 +246,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, ...@@ -245,7 +246,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply_len = ebx; reply_len = ebx;
reply = kzalloc(reply_len + 1, GFP_KERNEL); reply = kzalloc(reply_len + 1, GFP_KERNEL);
if (!reply) { if (!reply) {
DRM_ERROR("Cannot allocate memory for reply\n"); DRM_ERROR("Cannot allocate memory for host message reply.\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -338,7 +339,8 @@ int vmw_host_get_guestinfo(const char *guest_info_param, ...@@ -338,7 +339,8 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param); msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
if (!msg) { if (!msg) {
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
guest_info_param);
return -ENOMEM; return -ENOMEM;
} }
...@@ -374,7 +376,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param, ...@@ -374,7 +376,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
out_open: out_open:
*length = 0; *length = 0;
kfree(msg); kfree(msg);
DRM_ERROR("Failed to get %s", guest_info_param); DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
return -EINVAL; return -EINVAL;
} }
...@@ -403,7 +405,7 @@ int vmw_host_log(const char *log) ...@@ -403,7 +405,7 @@ int vmw_host_log(const char *log)
msg = kasprintf(GFP_KERNEL, "log %s", log); msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) { if (!msg) {
DRM_ERROR("Cannot allocate memory for log message\n"); DRM_ERROR("Cannot allocate memory for host log message.\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -422,7 +424,7 @@ int vmw_host_log(const char *log) ...@@ -422,7 +424,7 @@ int vmw_host_log(const char *log)
vmw_close_channel(&channel); vmw_close_channel(&channel);
out_open: out_open:
kfree(msg); kfree(msg);
DRM_ERROR("Failed to send log\n"); DRM_ERROR("Failed to send host log message.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream { struct vmw_stream {
struct vmw_dma_buffer *buf; struct vmw_buffer_object *buf;
bool claimed; bool claimed;
bool paused; bool paused;
struct drm_vmw_control_stream_arg saved; struct drm_vmw_control_stream_arg saved;
...@@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd, ...@@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
* -ERESTARTSYS if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
static int vmw_overlay_send_put(struct vmw_private *dev_priv, static int vmw_overlay_send_put(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
struct drm_vmw_control_stream_arg *arg, struct drm_vmw_control_stream_arg *arg,
bool interruptible) bool interruptible)
{ {
...@@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, ...@@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
* used with GMRs instead of being locked to vram. * used with GMRs instead of being locked to vram.
*/ */
static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
bool pin, bool inter) bool pin, bool inter)
{ {
if (!pin) if (!pin)
return vmw_dmabuf_unpin(dev_priv, buf, inter); return vmw_bo_unpin(dev_priv, buf, inter);
if (dev_priv->active_display_unit == vmw_du_legacy) if (dev_priv->active_display_unit == vmw_du_legacy)
return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter); return vmw_bo_pin_in_vram(dev_priv, buf, inter);
return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter); return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
} }
/** /**
...@@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, ...@@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
} }
if (!pause) { if (!pause) {
vmw_dmabuf_unreference(&stream->buf); vmw_bo_unreference(&stream->buf);
stream->paused = false; stream->paused = false;
} else { } else {
stream->paused = true; stream->paused = true;
...@@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, ...@@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted. * -ERESTARTSYS if interrupted.
*/ */
static int vmw_overlay_update_stream(struct vmw_private *dev_priv, static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
struct drm_vmw_control_stream_arg *arg, struct drm_vmw_control_stream_arg *arg,
bool interruptible) bool interruptible)
{ {
...@@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, ...@@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
} }
if (stream->buf != buf) if (stream->buf != buf)
stream->buf = vmw_dmabuf_reference(buf); stream->buf = vmw_bo_reference(buf);
stream->saved = *arg; stream->saved = *arg;
/* stream is no longer stopped/paused */ /* stream is no longer stopped/paused */
stream->paused = false; stream->paused = false;
...@@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ...@@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct vmw_overlay *overlay = dev_priv->overlay_priv; struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct drm_vmw_control_stream_arg *arg = struct drm_vmw_control_stream_arg *arg =
(struct drm_vmw_control_stream_arg *)data; (struct drm_vmw_control_stream_arg *)data;
struct vmw_dma_buffer *buf; struct vmw_buffer_object *buf;
struct vmw_resource *res; struct vmw_resource *res;
int ret; int ret;
...@@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ...@@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL); ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_dmabuf_unreference(&buf); vmw_bo_unreference(&buf);
out_unlock: out_unlock:
mutex_unlock(&overlay->mutex); mutex_unlock(&overlay->mutex);
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include <drm/vmwgfx_drm.h> #include <drm/vmwgfx_drm.h>
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include "vmwgfx_resource_priv.h" #include "vmwgfx_resource_priv.h"
...@@ -35,29 +34,6 @@ ...@@ -35,29 +34,6 @@
#define VMW_RES_EVICT_ERR_COUNT 10 #define VMW_RES_EVICT_ERR_COUNT 10
struct vmw_user_dma_buffer {
struct ttm_prime_object prime;
struct vmw_dma_buffer dma;
};
struct vmw_bo_user_rep {
uint32_t handle;
uint64_t map_handle;
};
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
return container_of(bo, struct vmw_dma_buffer, base);
}
static inline struct vmw_user_dma_buffer *
vmw_user_dma_buffer(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
}
struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
{ {
kref_get(&res->kref); kref_get(&res->kref);
...@@ -116,7 +92,7 @@ static void vmw_resource_release(struct kref *kref) ...@@ -116,7 +92,7 @@ static void vmw_resource_release(struct kref *kref)
res->backup_dirty = false; res->backup_dirty = false;
list_del_init(&res->mob_head); list_del_init(&res->mob_head);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
vmw_dmabuf_unreference(&res->backup); vmw_bo_unreference(&res->backup);
} }
if (likely(res->hw_destroy != NULL)) { if (likely(res->hw_destroy != NULL)) {
...@@ -287,7 +263,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, ...@@ -287,7 +263,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
} }
/** /**
* Helper function that looks either a surface or dmabuf. * Helper function that looks either a surface or bo.
* *
* The pointer this pointed at by out_surf and out_buf needs to be null. * The pointer this pointed at by out_surf and out_buf needs to be null.
*/ */
...@@ -295,7 +271,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, ...@@ -295,7 +271,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile, struct ttm_object_file *tfile,
uint32_t handle, uint32_t handle,
struct vmw_surface **out_surf, struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf) struct vmw_buffer_object **out_buf)
{ {
struct vmw_resource *res; struct vmw_resource *res;
int ret; int ret;
...@@ -311,512 +287,10 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, ...@@ -311,512 +287,10 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
} }
*out_surf = NULL; *out_surf = NULL;
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
return ret; return ret;
} }
/**
* Buffer management.
*/
/**
* vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @size: The requested buffer size.
* @user: Whether this is an ordinary dma buffer or a user dma buffer.
*/
static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
bool user)
{
static size_t struct_size, user_struct_size;
size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
if (unlikely(struct_size == 0)) {
size_t backend_size = ttm_round_pot(vmw_tt_size);
struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_dma_buffer));
user_struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
}
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
page_array_size +=
ttm_round_pot(num_pages * sizeof(dma_addr_t));
return ((user) ? user_struct_size : struct_size) +
page_array_size;
}
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
vmw_dma_buffer_unmap(vmw_bo);
kfree(vmw_bo);
}
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
vmw_dma_buffer_unmap(&vmw_user_bo->dma);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interruptible,
void (*bo_free) (struct ttm_buffer_object *bo))
{
struct ttm_bo_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret;
bool user = (bo_free == &vmw_user_dmabuf_destroy);
BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
0, interruptible, acc_size,
NULL, NULL, bo_free);
return ret;
}
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base = *p_base;
struct ttm_buffer_object *bo;
*p_base = NULL;
if (unlikely(base == NULL))
return;
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
prime.base);
bo = &vmw_user_bo->dma.base;
ttm_bo_unref(&bo);
}
static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
enum ttm_ref_type ref_type)
{
struct vmw_user_dma_buffer *user_bo;
user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE:
ttm_bo_synccpu_write_release(&user_bo->dma.base);
break;
default:
BUG();
}
}
/**
* vmw_user_dmabuf_alloc - Allocate a user dma buffer
*
* @dev_priv: Pointer to a struct device private.
* @tfile: Pointer to a struct ttm_object_file on which to register the user
* object.
* @size: Size of the dma buffer.
* @shareable: Boolean whether the buffer is shareable with other open files.
* @handle: Pointer to where the handle value should be assigned.
* @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
* should be assigned.
*/
int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *user_bo;
struct ttm_buffer_object *tmp;
int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
if (unlikely(!user_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
}
ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
(dev_priv->has_mob) ?
&vmw_sys_placement :
&vmw_vram_sys_placement, true,
&vmw_user_dmabuf_destroy);
if (unlikely(ret != 0))
return ret;
tmp = ttm_bo_reference(&user_bo->dma.base);
ret = ttm_prime_object_init(tfile,
size,
&user_bo->prime,
shareable,
ttm_buffer_type,
&vmw_user_dmabuf_release,
&vmw_user_dmabuf_ref_obj_release);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
}
*p_dma_buf = &user_bo->dma;
if (p_base) {
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
*handle = user_bo->prime.base.hash.key;
out_no_base_object:
return ret;
}
/**
* vmw_user_dmabuf_verify_access - verify access permissions on this
* buffer object.
*
* @bo: Pointer to the buffer object being accessed
* @tfile: Identifying the caller.
*/
int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile)
{
struct vmw_user_dma_buffer *vmw_user_bo;
if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
return -EPERM;
vmw_user_bo = vmw_user_dma_buffer(bo);
/* Check that the caller has opened the object. */
if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
return 0;
DRM_ERROR("Could not grant buffer access.\n");
return -EPERM;
}
/**
* vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
* access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions.
*
* @user_bo: Pointer to the buffer object being grabbed for CPU access
* @tfile: Identifying the caller.
* @flags: Flags indicating how the grab should be performed.
*
* A blocking grab will be automatically released when @tfile is closed.
*/
static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
struct ttm_object_file *tfile,
uint32_t flags)
{
struct ttm_buffer_object *bo = &user_bo->dma.base;
bool existed;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
return lret;
return 0;
}
ret = ttm_bo_synccpu_write_grab
(bo, !!(flags & drm_vmw_synccpu_dontblock));
if (unlikely(ret != 0))
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->dma.base);
return ret;
}
/**
* vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
* and unblock command submission on the buffer if blocked.
*
* @handle: Handle identifying the buffer object.
* @tfile: Identifying the caller.
* @flags: Flags indicating the type of release.
*/
static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
struct ttm_object_file *tfile,
uint32_t flags)
{
if (!(flags & drm_vmw_synccpu_allow_cs))
return ttm_ref_object_base_unref(tfile, handle,
TTM_REF_SYNCCPU_WRITE);
return 0;
}
/**
* vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
* functionality.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller.
*
* This function checks the ioctl arguments for validity and calls the
* relevant synccpu functions.
*/
int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data;
struct vmw_dma_buffer *dma_buf;
struct vmw_user_dma_buffer *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_base_object *buffer_base;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
|| (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
drm_vmw_synccpu_dontblock |
drm_vmw_synccpu_allow_cs)) != 0) {
DRM_ERROR("Illegal synccpu flags.\n");
return -EINVAL;
}
switch (arg->op) {
case drm_vmw_synccpu_grab:
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
&buffer_base);
if (unlikely(ret != 0))
return ret;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
dma);
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
vmw_dmabuf_unreference(&dma_buf);
ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
}
break;
case drm_vmw_synccpu_release:
ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
arg->flags);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
}
break;
default:
DRM_ERROR("Invalid synccpu operation.\n");
return -EINVAL;
}
return 0;
}
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_alloc_dmabuf_arg *arg =
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_dma_buffer *dma_buf;
uint32_t handle;
int ret;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
req->size, false, &handle, &dma_buf,
NULL);
if (unlikely(ret != 0))
goto out_no_dmabuf;
rep->handle = handle;
rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
arg->handle,
TTM_REF_USAGE);
}
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out,
struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base;
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL)) {
pr_err("Invalid buffer object handle 0x%08lx\n",
(unsigned long)handle);
return -ESRCH;
}
if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
ttm_base_object_unref(&base);
pr_err("Invalid buffer object handle 0x%08lx\n",
(unsigned long)handle);
return -EINVAL;
}
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
if (p_base)
*p_base = base;
else
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;
return 0;
}
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf,
uint32_t *handle)
{
struct vmw_user_dma_buffer *user_bo;
if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
return -EINVAL;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
*handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL, false);
}
/**
* vmw_dumb_create - Create a dumb kms buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @args: Pointer to a struct drm_mode_create_dumb structure
*
* This is a driver callback for the core drm create_dumb functionality.
* Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
* that the arguments have a different format.
*/
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_dma_buffer *dma_buf;
int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
args->size, false, &args->handle,
&dma_buf, NULL);
if (unlikely(ret != 0))
goto out_no_dmabuf;
vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_dumb_map_offset - Return the address space offset of a dumb buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @handle: Handle identifying the dumb buffer.
* @offset: The address space offset returned.
*
* This is a driver callback for the core drm dumb_map_offset functionality.
*/
int vmw_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_dma_buffer *out_buf;
int ret;
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
if (ret != 0)
return -EINVAL;
*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
vmw_dmabuf_unreference(&out_buf);
return 0;
}
/**
* vmw_dumb_destroy - Destroy a dumb boffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @handle: Handle identifying the dumb buffer.
*
* This is a driver callback for the core drm dumb_destroy functionality.
*/
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle)
{
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
}
/** /**
* vmw_resource_buf_alloc - Allocate a backup buffer for a resource. * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
* *
...@@ -829,7 +303,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, ...@@ -829,7 +303,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
{ {
unsigned long size = unsigned long size =
(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
struct vmw_dma_buffer *backup; struct vmw_buffer_object *backup;
int ret; int ret;
if (likely(res->backup)) { if (likely(res->backup)) {
...@@ -841,16 +315,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, ...@@ -841,16 +315,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
if (unlikely(!backup)) if (unlikely(!backup))
return -ENOMEM; return -ENOMEM;
ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
res->func->backup_placement, res->func->backup_placement,
interruptible, interruptible,
&vmw_dmabuf_bo_free); &vmw_bo_bo_free);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_dmabuf; goto out_no_bo;
res->backup = backup; res->backup = backup;
out_no_dmabuf: out_no_bo:
return ret; return ret;
} }
...@@ -919,7 +393,7 @@ static int vmw_resource_do_validate(struct vmw_resource *res, ...@@ -919,7 +393,7 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
*/ */
void vmw_resource_unreserve(struct vmw_resource *res, void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup, bool switch_backup,
struct vmw_dma_buffer *new_backup, struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset) unsigned long new_backup_offset)
{ {
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
...@@ -931,11 +405,11 @@ void vmw_resource_unreserve(struct vmw_resource *res, ...@@ -931,11 +405,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (res->backup) { if (res->backup) {
lockdep_assert_held(&res->backup->base.resv->lock.base); lockdep_assert_held(&res->backup->base.resv->lock.base);
list_del_init(&res->mob_head); list_del_init(&res->mob_head);
vmw_dmabuf_unreference(&res->backup); vmw_bo_unreference(&res->backup);
} }
if (new_backup) { if (new_backup) {
res->backup = vmw_dmabuf_reference(new_backup); res->backup = vmw_bo_reference(new_backup);
lockdep_assert_held(&new_backup->base.resv->lock.base); lockdep_assert_held(&new_backup->base.resv->lock.base);
list_add_tail(&res->mob_head, &new_backup->res_list); list_add_tail(&res->mob_head, &new_backup->res_list);
} else { } else {
...@@ -959,6 +433,7 @@ void vmw_resource_unreserve(struct vmw_resource *res, ...@@ -959,6 +433,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* for a resource and in that case, allocate * for a resource and in that case, allocate
* one, reserve and validate it. * one, reserve and validate it.
* *
* @ticket: The ww aqcquire context to use, or NULL if trylocking.
* @res: The resource for which to allocate a backup buffer. * @res: The resource for which to allocate a backup buffer.
* @interruptible: Whether any sleeps during allocation should be * @interruptible: Whether any sleeps during allocation should be
* performed while interruptible. * performed while interruptible.
...@@ -966,7 +441,8 @@ void vmw_resource_unreserve(struct vmw_resource *res, ...@@ -966,7 +441,8 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* reserved and validated backup buffer. * reserved and validated backup buffer.
*/ */
static int static int
vmw_resource_check_buffer(struct vmw_resource *res, vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
struct vmw_resource *res,
bool interruptible, bool interruptible,
struct ttm_validate_buffer *val_buf) struct ttm_validate_buffer *val_buf)
{ {
...@@ -985,7 +461,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, ...@@ -985,7 +461,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
val_buf->bo = ttm_bo_reference(&res->backup->base); val_buf->bo = ttm_bo_reference(&res->backup->base);
val_buf->shared = false; val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list); list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL); ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_reserve; goto out_no_reserve;
...@@ -1003,11 +479,11 @@ vmw_resource_check_buffer(struct vmw_resource *res, ...@@ -1003,11 +479,11 @@ vmw_resource_check_buffer(struct vmw_resource *res,
return 0; return 0;
out_no_validate: out_no_validate:
ttm_eu_backoff_reservation(NULL, &val_list); ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve: out_no_reserve:
ttm_bo_unref(&val_buf->bo); ttm_bo_unref(&val_buf->bo);
if (backup_dirty) if (backup_dirty)
vmw_dmabuf_unreference(&res->backup); vmw_bo_unreference(&res->backup);
return ret; return ret;
} }
...@@ -1050,10 +526,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, ...@@ -1050,10 +526,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
* vmw_resource_backoff_reservation - Unreserve and unreference a * vmw_resource_backoff_reservation - Unreserve and unreference a
* backup buffer * backup buffer
*. *.
* @ticket: The ww acquire ctx used for reservation.
* @val_buf: Backup buffer information. * @val_buf: Backup buffer information.
*/ */
static void static void
vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
struct ttm_validate_buffer *val_buf)
{ {
struct list_head val_list; struct list_head val_list;
...@@ -1062,7 +540,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) ...@@ -1062,7 +540,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
INIT_LIST_HEAD(&val_list); INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list); list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(NULL, &val_list); ttm_eu_backoff_reservation(ticket, &val_list);
ttm_bo_unref(&val_buf->bo); ttm_bo_unref(&val_buf->bo);
} }
...@@ -1070,10 +548,12 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) ...@@ -1070,10 +548,12 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
* vmw_resource_do_evict - Evict a resource, and transfer its data * vmw_resource_do_evict - Evict a resource, and transfer its data
* to a backup buffer. * to a backup buffer.
* *
* @ticket: The ww acquire ticket to use, or NULL if trylocking.
* @res: The resource to evict. * @res: The resource to evict.
* @interruptible: Whether to wait interruptible. * @interruptible: Whether to wait interruptible.
*/ */
static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
struct vmw_resource *res, bool interruptible)
{ {
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func; const struct vmw_res_func *func = res->func;
...@@ -1083,7 +563,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) ...@@ -1083,7 +563,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
val_buf.bo = NULL; val_buf.bo = NULL;
val_buf.shared = false; val_buf.shared = false;
ret = vmw_resource_check_buffer(res, interruptible, &val_buf); ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1098,7 +578,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) ...@@ -1098,7 +578,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
res->backup_dirty = true; res->backup_dirty = true;
res->res_dirty = false; res->res_dirty = false;
out_no_unbind: out_no_unbind:
vmw_resource_backoff_reservation(&val_buf); vmw_resource_backoff_reservation(ticket, &val_buf);
return ret; return ret;
} }
...@@ -1152,7 +632,8 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -1152,7 +632,8 @@ int vmw_resource_validate(struct vmw_resource *res)
write_unlock(&dev_priv->resource_lock); write_unlock(&dev_priv->resource_lock);
ret = vmw_resource_do_evict(evict_res, true); /* Trylock backup buffers with a NULL ticket. */
ret = vmw_resource_do_evict(NULL, evict_res, true);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock); write_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list); list_add_tail(&evict_res->lru_head, lru_list);
...@@ -1171,7 +652,7 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -1171,7 +652,7 @@ int vmw_resource_validate(struct vmw_resource *res)
goto out_no_validate; goto out_no_validate;
else if (!res->func->needs_backup && res->backup) { else if (!res->func->needs_backup && res->backup) {
list_del_init(&res->mob_head); list_del_init(&res->mob_head);
vmw_dmabuf_unreference(&res->backup); vmw_bo_unreference(&res->backup);
} }
return 0; return 0;
...@@ -1180,109 +661,39 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -1180,109 +661,39 @@ int vmw_resource_validate(struct vmw_resource *res)
return ret; return ret;
} }
/**
* vmw_fence_single_bo - Utility function to fence a single TTM buffer
* object without unreserving it.
*
* @bo: Pointer to the struct ttm_buffer_object to fence.
* @fence: Pointer to the fence. If NULL, this function will
* insert a fence into the command stream..
*
* Contrary to the ttm_eu version of this function, it takes only
* a single buffer object instead of a list, and it also doesn't
* unreserve the buffer object, which needs to be done separately.
*/
void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence)
{
struct ttm_bo_device *bdev = bo->bdev;
struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev);
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
reservation_object_add_excl_fence(bo->resv, &fence->base);
dma_fence_put(&fence->base);
} else
reservation_object_add_excl_fence(bo->resv, &fence->base);
}
/** /**
* vmw_resource_move_notify - TTM move_notify_callback * vmw_resource_unbind_list
* *
* @bo: The TTM buffer object about to move. * @vbo: Pointer to the current backing MOB.
* @mem: The struct ttm_mem_reg indicating to what memory
* region the move is taking place.
* *
* Evicts the Guest Backed hardware resource if the backup * Evicts the Guest Backed hardware resource if the backup
* buffer is being moved out of MOB memory. * buffer is being moved out of MOB memory.
* Note that this function should not race with the resource * Note that this function will not race with the resource
* validation code as long as it accesses only members of struct * validation code, since resource validation and eviction
* resource that remain static while bo::res is !NULL and * both require the backup buffer to be reserved.
* while we have @bo reserved. struct resource::backup is *not* a
* static member. The resource validation code will take care
* to set @bo::res to NULL, while having @bo reserved when the
* buffer is no longer bound to the resource, so @bo:res can be
* used to determine whether there is a need to unbind and whether
* it is safe to unbind.
*/ */
void vmw_resource_move_notify(struct ttm_buffer_object *bo, void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
struct ttm_mem_reg *mem)
{ {
struct vmw_dma_buffer *dma_buf;
if (mem == NULL)
return;
if (bo->destroy != vmw_dmabuf_bo_free &&
bo->destroy != vmw_user_dmabuf_destroy)
return;
dma_buf = container_of(bo, struct vmw_dma_buffer, base);
/*
* Kill any cached kernel maps before move. An optimization could
* be to do this iff source or destination memory type is VRAM.
*/
vmw_dma_buffer_unmap(dma_buf);
if (mem->mem_type != VMW_PL_MOB) { struct vmw_resource *res, *next;
struct vmw_resource *res, *n; struct ttm_validate_buffer val_buf = {
struct ttm_validate_buffer val_buf; .bo = &vbo->base,
.shared = false
};
val_buf.bo = bo; lockdep_assert_held(&vbo->base.resv->lock.base);
val_buf.shared = false; list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
if (!res->func->unbind)
continue;
list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { (void) res->func->unbind(res, true, &val_buf);
res->backup_dirty = true;
if (unlikely(res->func->unbind == NULL)) res->res_dirty = false;
continue; list_del_init(&res->mob_head);
(void) res->func->unbind(res, true, &val_buf);
res->backup_dirty = true;
res->res_dirty = false;
list_del_init(&res->mob_head);
}
(void) ttm_bo_wait(bo, false, false);
} }
}
/**
* vmw_resource_swap_notify - swapout notify callback.
*
* @bo: The buffer object to be swapped out.
*/
void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
{
if (bo->destroy != vmw_dmabuf_bo_free &&
bo->destroy != vmw_user_dmabuf_destroy)
return;
/* Kill any cached kernel maps before swapout */ (void) ttm_bo_wait(&vbo->base, false, false);
vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
} }
...@@ -1294,7 +705,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo) ...@@ -1294,7 +705,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
* Read back cached states from the device if they exist. This function * Read back cached states from the device if they exist. This function
* assumings binding_mutex is held. * assumings binding_mutex is held.
*/ */
int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
{ {
struct vmw_resource *dx_query_ctx; struct vmw_resource *dx_query_ctx;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
...@@ -1344,7 +755,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) ...@@ -1344,7 +755,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
void vmw_query_move_notify(struct ttm_buffer_object *bo, void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct vmw_dma_buffer *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
...@@ -1353,7 +764,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, ...@@ -1353,7 +764,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
mutex_lock(&dev_priv->binding_mutex); mutex_lock(&dev_priv->binding_mutex);
dx_query_mob = container_of(bo, struct vmw_dma_buffer, base); dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
mutex_unlock(&dev_priv->binding_mutex); mutex_unlock(&dev_priv->binding_mutex);
return; return;
...@@ -1368,7 +779,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, ...@@ -1368,7 +779,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
/* Create a fence and attach the BO to it */ /* Create a fence and attach the BO to it */
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_fence_single_bo(bo, fence); vmw_bo_fence_single(bo, fence);
if (fence != NULL) if (fence != NULL)
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -1405,6 +816,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, ...@@ -1405,6 +816,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
struct vmw_resource *evict_res; struct vmw_resource *evict_res;
unsigned err_count = 0; unsigned err_count = 0;
int ret; int ret;
struct ww_acquire_ctx ticket;
do { do {
write_lock(&dev_priv->resource_lock); write_lock(&dev_priv->resource_lock);
...@@ -1418,7 +830,8 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, ...@@ -1418,7 +830,8 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
list_del_init(&evict_res->lru_head); list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock); write_unlock(&dev_priv->resource_lock);
ret = vmw_resource_do_evict(evict_res, false); /* Wait lock backup buffers with a ticket. */
ret = vmw_resource_do_evict(&ticket, evict_res, false);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock); write_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list); list_add_tail(&evict_res->lru_head, lru_list);
...@@ -1481,7 +894,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) ...@@ -1481,7 +894,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
goto out_no_reserve; goto out_no_reserve;
if (res->pin_count == 0) { if (res->pin_count == 0) {
struct vmw_dma_buffer *vbo = NULL; struct vmw_buffer_object *vbo = NULL;
if (res->backup) { if (res->backup) {
vbo = res->backup; vbo = res->backup;
...@@ -1539,7 +952,7 @@ void vmw_resource_unpin(struct vmw_resource *res) ...@@ -1539,7 +952,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
WARN_ON(res->pin_count == 0); WARN_ON(res->pin_count == 0);
if (--res->pin_count == 0 && res->backup) { if (--res->pin_count == 0 && res->backup) {
struct vmw_dma_buffer *vbo = res->backup; struct vmw_buffer_object *vbo = res->backup;
(void) ttm_bo_reserve(&vbo->base, false, false, NULL); (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
vmw_bo_pin_reserved(vbo, false); vmw_bo_pin_reserved(vbo, false);
......
...@@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit { ...@@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit {
SVGAFifoCmdBlitScreenToGMRFB body; SVGAFifoCmdBlitScreenToGMRFB body;
}; };
struct vmw_kms_sou_dmabuf_blit { struct vmw_kms_sou_bo_blit {
uint32 header; uint32 header;
SVGAFifoCmdBlitGMRFBToScreen body; SVGAFifoCmdBlitGMRFBToScreen body;
}; };
...@@ -83,7 +83,7 @@ struct vmw_screen_object_unit { ...@@ -83,7 +83,7 @@ struct vmw_screen_object_unit {
struct vmw_display_unit base; struct vmw_display_unit base;
unsigned long buffer_size; /**< Size of allocated buffer */ unsigned long buffer_size; /**< Size of allocated buffer */
struct vmw_dma_buffer *buffer; /**< Backing store buffer */ struct vmw_buffer_object *buffer; /**< Backing store buffer */
bool defined; bool defined;
}; };
...@@ -109,7 +109,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) ...@@ -109,7 +109,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
*/ */
static int vmw_sou_fifo_create(struct vmw_private *dev_priv, static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou, struct vmw_screen_object_unit *sou,
uint32_t x, uint32_t y, int x, int y,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
size_t fifo_size; size_t fifo_size;
...@@ -139,13 +139,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, ...@@ -139,13 +139,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
cmd->obj.size.width = mode->hdisplay; cmd->obj.size.width = mode->hdisplay;
cmd->obj.size.height = mode->vdisplay; cmd->obj.size.height = mode->vdisplay;
if (sou->base.is_implicit) { cmd->obj.root.x = x;
cmd->obj.root.x = x; cmd->obj.root.y = y;
cmd->obj.root.y = y;
} else {
cmd->obj.root.x = sou->base.gui_x;
cmd->obj.root.y = sou->base.gui_y;
}
sou->base.set_gui_x = cmd->obj.root.x; sou->base.set_gui_x = cmd->obj.root.x;
sou->base.set_gui_y = cmd->obj.root.y; sou->base.set_gui_y = cmd->obj.root.y;
...@@ -222,12 +217,11 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) ...@@ -222,12 +217,11 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct vmw_plane_state *vps; struct vmw_plane_state *vps;
int ret; int ret;
sou = vmw_crtc_to_sou(crtc);
sou = vmw_crtc_to_sou(crtc);
dev_priv = vmw_priv(crtc->dev); dev_priv = vmw_priv(crtc->dev);
ps = crtc->primary->state; ps = crtc->primary->state;
fb = ps->fb; fb = ps->fb;
vps = vmw_plane_state_to_vps(ps); vps = vmw_plane_state_to_vps(ps);
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL; vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
...@@ -240,11 +234,25 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) ...@@ -240,11 +234,25 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
} }
if (vfb) { if (vfb) {
sou->buffer = vps->dmabuf; struct drm_connector_state *conn_state;
sou->buffer_size = vps->dmabuf_size; struct vmw_connector_state *vmw_conn_state;
int x, y;
sou->buffer = vps->bo;
sou->buffer_size = vps->bo_size;
if (sou->base.is_implicit) {
x = crtc->x;
y = crtc->y;
} else {
conn_state = sou->base.connector.state;
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
x = vmw_conn_state->gui_x;
y = vmw_conn_state->gui_y;
}
ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y, ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
&crtc->mode);
if (ret) if (ret)
DRM_ERROR("Failed to define Screen Object %dx%d\n", DRM_ERROR("Failed to define Screen Object %dx%d\n",
crtc->x, crtc->y); crtc->x, crtc->y);
...@@ -408,10 +416,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, ...@@ -408,10 +416,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc ? struct drm_crtc *crtc = plane->state->crtc ?
plane->state->crtc : old_state->crtc; plane->state->crtc : old_state->crtc;
if (vps->dmabuf) if (vps->bo)
vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false); vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
vmw_dmabuf_unreference(&vps->dmabuf); vmw_bo_unreference(&vps->bo);
vps->dmabuf_size = 0; vps->bo_size = 0;
vmw_du_plane_cleanup_fb(plane, old_state); vmw_du_plane_cleanup_fb(plane, old_state);
} }
...@@ -440,8 +448,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -440,8 +448,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
if (!new_fb) { if (!new_fb) {
vmw_dmabuf_unreference(&vps->dmabuf); vmw_bo_unreference(&vps->bo);
vps->dmabuf_size = 0; vps->bo_size = 0;
return 0; return 0;
} }
...@@ -449,22 +457,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -449,22 +457,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
size = new_state->crtc_w * new_state->crtc_h * 4; size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev); dev_priv = vmw_priv(crtc->dev);
if (vps->dmabuf) { if (vps->bo) {
if (vps->dmabuf_size == size) { if (vps->bo_size == size) {
/* /*
* Note that this might temporarily up the pin-count * Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called. * to 2, until cleanup_fb() is called.
*/ */
return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, return vmw_bo_pin_in_vram(dev_priv, vps->bo,
true); true);
} }
vmw_dmabuf_unreference(&vps->dmabuf); vmw_bo_unreference(&vps->bo);
vps->dmabuf_size = 0; vps->bo_size = 0;
} }
vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL); vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
if (!vps->dmabuf) if (!vps->bo)
return -ENOMEM; return -ENOMEM;
vmw_svga_enable(dev_priv); vmw_svga_enable(dev_priv);
...@@ -473,22 +481,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -473,22 +481,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc. * resume the overlays, this is preferred to failing to alloc.
*/ */
vmw_overlay_pause_all(dev_priv); vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size, ret = vmw_bo_init(dev_priv, vps->bo, size,
&vmw_vram_ne_placement, &vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free); false, &vmw_bo_bo_free);
vmw_overlay_resume_all(dev_priv); vmw_overlay_resume_all(dev_priv);
if (ret) { if (ret) {
vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ vps->bo = NULL; /* vmw_bo_init frees on error */
return ret; return ret;
} }
vps->dmabuf_size = size; vps->bo_size = size;
/* /*
* TTM already thinks the buffer is pinned, but make sure the * TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped. * pin_count is upped.
*/ */
return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true); return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
} }
...@@ -512,10 +520,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, ...@@ -512,10 +520,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
vclips.w = crtc->mode.hdisplay; vclips.w = crtc->mode.hdisplay;
vclips.h = crtc->mode.vdisplay; vclips.h = crtc->mode.vdisplay;
if (vfb->dmabuf) if (vfb->bo)
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL, ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
&vclips, 1, 1, true, &vclips, 1, 1, true,
&fence, crtc); &fence, crtc);
else else
ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
&vclips, NULL, 0, 0, &vclips, NULL, 0, 0,
...@@ -775,11 +783,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) ...@@ -775,11 +783,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
return 0; return 0;
} }
static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv, static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer) struct vmw_framebuffer *framebuffer)
{ {
struct vmw_dma_buffer *buf = struct vmw_buffer_object *buf =
container_of(framebuffer, struct vmw_framebuffer_dmabuf, container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer; base)->buffer;
int depth = framebuffer->base.format->depth; int depth = framebuffer->base.format->depth;
struct { struct {
...@@ -970,13 +978,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -970,13 +978,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
} }
/** /**
* vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips. * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips.
* *
* @dirty: The closure structure. * @dirty: The closure structure.
* *
* Commits a previously built command buffer of readback clips. * Commits a previously built command buffer of readback clips.
*/ */
static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{ {
if (!dirty->num_hits) { if (!dirty->num_hits) {
vmw_fifo_commit(dirty->dev_priv, 0); vmw_fifo_commit(dirty->dev_priv, 0);
...@@ -984,20 +992,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) ...@@ -984,20 +992,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
} }
vmw_fifo_commit(dirty->dev_priv, vmw_fifo_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_dmabuf_blit) * sizeof(struct vmw_kms_sou_bo_blit) *
dirty->num_hits); dirty->num_hits);
} }
/** /**
* vmw_sou_dmabuf_clip - Callback to encode a readback cliprect. * vmw_sou_bo_clip - Callback to encode a readback cliprect.
* *
* @dirty: The closure structure * @dirty: The closure structure
* *
* Encodes a BLIT_GMRFB_TO_SCREEN cliprect. * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
*/ */
static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
{ {
struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd; struct vmw_kms_sou_bo_blit *blit = dirty->cmd;
blit += dirty->num_hits; blit += dirty->num_hits;
blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
...@@ -1012,10 +1020,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) ...@@ -1012,10 +1020,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
} }
/** /**
* vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
* *
* @dev_priv: Pointer to the device private structure. * @dev_priv: Pointer to the device private structure.
* @framebuffer: Pointer to the dma-buffer backed framebuffer. * @framebuffer: Pointer to the buffer-object backed framebuffer.
* @clips: Array of clip rects. * @clips: Array of clip rects.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must * @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL. * be NULL.
...@@ -1025,12 +1033,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) ...@@ -1025,12 +1033,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a * @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which * struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized. * case the device has already synchronized.
* @crtc: If crtc is passed, perform dmabuf dirty on that crtc only. * @crtc: If crtc is passed, perform bo dirty on that crtc only.
* *
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted. * interrupted.
*/ */
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer, struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips, struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips, struct drm_vmw_rect *vclips,
...@@ -1039,8 +1047,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, ...@@ -1039,8 +1047,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc) struct drm_crtc *crtc)
{ {
struct vmw_dma_buffer *buf = struct vmw_buffer_object *buf =
container_of(framebuffer, struct vmw_framebuffer_dmabuf, container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer; base)->buffer;
struct vmw_kms_dirty dirty; struct vmw_kms_dirty dirty;
int ret; int ret;
...@@ -1050,14 +1058,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, ...@@ -1050,14 +1058,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
if (ret) if (ret)
return ret; return ret;
ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer); ret = do_bo_define_gmrfb(dev_priv, framebuffer);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_revert; goto out_revert;
dirty.crtc = crtc; dirty.crtc = crtc;
dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit; dirty.fifo_commit = vmw_sou_bo_fifo_commit;
dirty.clip = vmw_sou_dmabuf_clip; dirty.clip = vmw_sou_bo_clip;
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) * dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) *
num_clips; num_clips;
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty); 0, 0, num_clips, increment, &dirty);
...@@ -1116,12 +1124,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty) ...@@ -1116,12 +1124,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
/** /**
* vmw_kms_sou_readback - Perform a readback from the screen object system to * vmw_kms_sou_readback - Perform a readback from the screen object system to
* a dma-buffer backed framebuffer. * a buffer-object backed framebuffer.
* *
* @dev_priv: Pointer to the device private structure. * @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller. * @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL. * Must be set to NULL if @user_fence_rep is NULL.
* @vfb: Pointer to the dma-buffer backed framebuffer. * @vfb: Pointer to the buffer-object backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information. * @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL. * Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects. * @vclips: Array of clip rects.
...@@ -1139,8 +1147,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, ...@@ -1139,8 +1147,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
uint32_t num_clips, uint32_t num_clips,
struct drm_crtc *crtc) struct drm_crtc *crtc)
{ {
struct vmw_dma_buffer *buf = struct vmw_buffer_object *buf =
container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty; struct vmw_kms_dirty dirty;
int ret; int ret;
...@@ -1149,7 +1157,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, ...@@ -1149,7 +1157,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
if (ret) if (ret)
return ret; return ret;
ret = do_dmabuf_define_gmrfb(dev_priv, vfb); ret = do_bo_define_gmrfb(dev_priv, vfb);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_revert; goto out_revert;
......
...@@ -159,7 +159,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, ...@@ -159,7 +159,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
SVGA3dShaderType type, SVGA3dShaderType type,
uint8_t num_input_sig, uint8_t num_input_sig,
uint8_t num_output_sig, uint8_t num_output_sig,
struct vmw_dma_buffer *byte_code, struct vmw_buffer_object *byte_code,
void (*res_free) (struct vmw_resource *res)) void (*res_free) (struct vmw_resource *res))
{ {
struct vmw_shader *shader = vmw_res_to_shader(res); struct vmw_shader *shader = vmw_res_to_shader(res);
...@@ -178,7 +178,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, ...@@ -178,7 +178,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
res->backup_size = size; res->backup_size = size;
if (byte_code) { if (byte_code) {
res->backup = vmw_dmabuf_reference(byte_code); res->backup = vmw_bo_reference(byte_code);
res->backup_offset = offset; res->backup_offset = offset;
} }
shader->size = size; shader->size = size;
...@@ -306,7 +306,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, ...@@ -306,7 +306,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv, (void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL); &fence, NULL);
vmw_fence_single_bo(val_buf->bo, fence); vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -537,7 +537,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res, ...@@ -537,7 +537,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv, (void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL); &fence, NULL);
vmw_fence_single_bo(val_buf->bo, fence); vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -723,7 +723,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -723,7 +723,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
} }
static int vmw_user_shader_alloc(struct vmw_private *dev_priv, static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buffer, struct vmw_buffer_object *buffer,
size_t shader_size, size_t shader_size,
size_t offset, size_t offset,
SVGA3dShaderType shader_type, SVGA3dShaderType shader_type,
...@@ -801,7 +801,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, ...@@ -801,7 +801,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buffer, struct vmw_buffer_object *buffer,
size_t shader_size, size_t shader_size,
size_t offset, size_t offset,
SVGA3dShaderType shader_type) SVGA3dShaderType shader_type)
...@@ -862,12 +862,12 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ...@@ -862,12 +862,12 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_dma_buffer *buffer = NULL; struct vmw_buffer_object *buffer = NULL;
SVGA3dShaderType shader_type; SVGA3dShaderType shader_type;
int ret; int ret;
if (buffer_handle != SVGA3D_INVALID_ID) { if (buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, ret = vmw_user_bo_lookup(tfile, buffer_handle,
&buffer, NULL); &buffer, NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader " DRM_ERROR("Could not find buffer for shader "
...@@ -906,7 +906,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ...@@ -906,7 +906,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
ttm_read_unlock(&dev_priv->reservation_sem); ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg: out_bad_arg:
vmw_dmabuf_unreference(&buffer); vmw_bo_unreference(&buffer);
return ret; return ret;
} }
...@@ -983,7 +983,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, ...@@ -983,7 +983,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
struct list_head *list) struct list_head *list)
{ {
struct ttm_operation_ctx ctx = { false, true }; struct ttm_operation_ctx ctx = { false, true };
struct vmw_dma_buffer *buf; struct vmw_buffer_object *buf;
struct ttm_bo_kmap_obj map; struct ttm_bo_kmap_obj map;
bool is_iomem; bool is_iomem;
int ret; int ret;
...@@ -997,8 +997,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, ...@@ -997,8 +997,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(!buf)) if (unlikely(!buf))
return -ENOMEM; return -ENOMEM;
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
true, vmw_dmabuf_bo_free); true, vmw_bo_bo_free);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out; goto out;
...@@ -1031,7 +1031,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, ...@@ -1031,7 +1031,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
res, list); res, list);
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
no_reserve: no_reserve:
vmw_dmabuf_unreference(&buf); vmw_bo_unreference(&buf);
out: out:
return ret; return ret;
} }
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
enum stdu_content_type { enum stdu_content_type {
SAME_AS_DISPLAY = 0, SAME_AS_DISPLAY = 0,
SEPARATE_SURFACE, SEPARATE_SURFACE,
SEPARATE_DMA SEPARATE_BO
}; };
/** /**
...@@ -58,7 +58,7 @@ enum stdu_content_type { ...@@ -58,7 +58,7 @@ enum stdu_content_type {
* @bottom: Bottom side of bounding box. * @bottom: Bottom side of bounding box.
* @fb_left: Left side of the framebuffer/content bounding box * @fb_left: Left side of the framebuffer/content bounding box
* @fb_top: Top of the framebuffer/content bounding box * @fb_top: Top of the framebuffer/content bounding box
* @buf: DMA buffer when DMA-ing between buffer and screen targets. * @buf: buffer object when DMA-ing between buffer and screen targets.
* @sid: Surface ID when copying between surface and screen targets. * @sid: Surface ID when copying between surface and screen targets.
*/ */
struct vmw_stdu_dirty { struct vmw_stdu_dirty {
...@@ -68,7 +68,7 @@ struct vmw_stdu_dirty { ...@@ -68,7 +68,7 @@ struct vmw_stdu_dirty {
s32 fb_left, fb_top; s32 fb_left, fb_top;
u32 pitch; u32 pitch;
union { union {
struct vmw_dma_buffer *buf; struct vmw_buffer_object *buf;
u32 sid; u32 sid;
}; };
}; };
...@@ -178,13 +178,9 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv, ...@@ -178,13 +178,9 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
cmd->body.height = mode->vdisplay; cmd->body.height = mode->vdisplay;
cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0; cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
cmd->body.dpi = 0; cmd->body.dpi = 0;
if (stdu->base.is_implicit) { cmd->body.xRoot = crtc_x;
cmd->body.xRoot = crtc_x; cmd->body.yRoot = crtc_y;
cmd->body.yRoot = crtc_y;
} else {
cmd->body.xRoot = stdu->base.gui_x;
cmd->body.yRoot = stdu->base.gui_y;
}
stdu->base.set_gui_x = cmd->body.xRoot; stdu->base.set_gui_x = cmd->body.xRoot;
stdu->base.set_gui_y = cmd->body.yRoot; stdu->base.set_gui_y = cmd->body.yRoot;
...@@ -374,11 +370,14 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc) ...@@ -374,11 +370,14 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{ {
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu; struct vmw_screen_target_display_unit *stdu;
int ret; struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
int x, y, ret;
stdu = vmw_crtc_to_stdu(crtc); stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev); dev_priv = vmw_priv(crtc->dev);
conn_state = stdu->base.connector.state;
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
if (stdu->defined) { if (stdu->defined) {
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL); ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
...@@ -397,8 +396,16 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc) ...@@ -397,8 +396,16 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!crtc->state->enable) if (!crtc->state->enable)
return; return;
if (stdu->base.is_implicit) {
x = crtc->x;
y = crtc->y;
} else {
x = vmw_conn_state->gui_x;
y = vmw_conn_state->gui_y;
}
vmw_svga_enable(dev_priv); vmw_svga_enable(dev_priv);
ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, crtc->x, crtc->y); ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
if (ret) if (ret)
DRM_ERROR("Failed to define Screen Target of size %dx%d\n", DRM_ERROR("Failed to define Screen Target of size %dx%d\n",
...@@ -508,14 +515,14 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc, ...@@ -508,14 +515,14 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
/** /**
* vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
* *
* @dirty: The closure structure. * @dirty: The closure structure.
* *
* Encodes a surface DMA command cliprect and updates the bounding box * Encodes a surface DMA command cliprect and updates the bounding box
* for the DMA. * for the DMA.
*/ */
static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty) static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
{ {
struct vmw_stdu_dirty *ddirty = struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base); container_of(dirty, struct vmw_stdu_dirty, base);
...@@ -543,14 +550,14 @@ static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty) ...@@ -543,14 +550,14 @@ static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
} }
/** /**
* vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command. * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
* *
* @dirty: The closure structure. * @dirty: The closure structure.
* *
* Fills in the missing fields in a DMA command, and optionally encodes * Fills in the missing fields in a DMA command, and optionally encodes
* a screen target update command, depending on transfer direction. * a screen target update command, depending on transfer direction.
*/ */
static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{ {
struct vmw_stdu_dirty *ddirty = struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base); container_of(dirty, struct vmw_stdu_dirty, base);
...@@ -594,13 +601,13 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) ...@@ -594,13 +601,13 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
/** /**
* vmw_stdu_dmabuf_cpu_clip - Callback to encode a CPU blit * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
* *
* @dirty: The closure structure. * @dirty: The closure structure.
* *
* This function calculates the bounding box for all the incoming clips. * This function calculates the bounding box for all the incoming clips.
*/ */
static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty)
{ {
struct vmw_stdu_dirty *ddirty = struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base); container_of(dirty, struct vmw_stdu_dirty, base);
...@@ -624,14 +631,14 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) ...@@ -624,14 +631,14 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
/** /**
* vmw_stdu_dmabuf_cpu_commit - Callback to do a CPU blit from DMAbuf * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object
* *
* @dirty: The closure structure. * @dirty: The closure structure.
* *
* For the special case when we cannot create a proxy surface in a * For the special case when we cannot create a proxy surface in a
* 2D VM, we have to do a CPU blit ourselves. * 2D VM, we have to do a CPU blit ourselves.
*/ */
static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
{ {
struct vmw_stdu_dirty *ddirty = struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base); container_of(dirty, struct vmw_stdu_dirty, base);
...@@ -652,7 +659,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) ...@@ -652,7 +659,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
if (width == 0 || height == 0) if (width == 0 || height == 0)
return; return;
/* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */ /* Assume we are blitting from Guest (bo) to Host (display_srf) */
dst_pitch = stdu->display_srf->base_size.width * stdu->cpp; dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
dst_bo = &stdu->display_srf->res.backup->base; dst_bo = &stdu->display_srf->res.backup->base;
dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp; dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
...@@ -712,13 +719,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) ...@@ -712,13 +719,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
} }
/** /**
* vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
* framebuffer and the screen target system. * framebuffer and the screen target system.
* *
* @dev_priv: Pointer to the device private structure. * @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm-file identifying the caller. May be * @file_priv: Pointer to a struct drm-file identifying the caller. May be
* set to NULL, but then @user_fence_rep must also be set to NULL. * set to NULL, but then @user_fence_rep must also be set to NULL.
* @vfb: Pointer to the dma-buffer backed framebuffer. * @vfb: Pointer to the buffer-object backed framebuffer.
* @clips: Array of clip rects. Either @clips or @vclips must be NULL. * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must * @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL. * be NULL.
...@@ -747,8 +754,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, ...@@ -747,8 +754,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
bool interruptible, bool interruptible,
struct drm_crtc *crtc) struct drm_crtc *crtc)
{ {
struct vmw_dma_buffer *buf = struct vmw_buffer_object *buf =
container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_stdu_dirty ddirty; struct vmw_stdu_dirty ddirty;
int ret; int ret;
bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D); bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
...@@ -770,8 +777,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, ...@@ -770,8 +777,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
ddirty.fb_left = ddirty.fb_top = S32_MAX; ddirty.fb_left = ddirty.fb_top = S32_MAX;
ddirty.pitch = vfb->base.pitches[0]; ddirty.pitch = vfb->base.pitches[0];
ddirty.buf = buf; ddirty.buf = buf;
ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
ddirty.base.clip = vmw_stdu_dmabuf_clip; ddirty.base.clip = vmw_stdu_bo_clip;
ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) + ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
num_clips * sizeof(SVGA3dCopyBox) + num_clips * sizeof(SVGA3dCopyBox) +
sizeof(SVGA3dCmdSurfaceDMASuffix); sizeof(SVGA3dCmdSurfaceDMASuffix);
...@@ -780,8 +787,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, ...@@ -780,8 +787,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
if (cpu_blit) { if (cpu_blit) {
ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit; ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip; ddirty.base.clip = vmw_stdu_bo_cpu_clip;
ddirty.base.fifo_reserve_size = 0; ddirty.base.fifo_reserve_size = 0;
} }
...@@ -927,7 +934,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, ...@@ -927,7 +934,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
if (ret) if (ret)
return ret; return ret;
if (vfbs->is_dmabuf_proxy) { if (vfbs->is_bo_proxy) {
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
if (ret) if (ret)
goto out_finish; goto out_finish;
...@@ -1075,7 +1082,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane, ...@@ -1075,7 +1082,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
* @new_state: info on the new plane state, including the FB * @new_state: info on the new plane state, including the FB
* *
* This function allocates a new display surface if the content is * This function allocates a new display surface if the content is
* backed by a DMA. The display surface is pinned here, and it'll * backed by a buffer object. The display surface is pinned here, and it'll
* be unpinned in .cleanup_fb() * be unpinned in .cleanup_fb()
* *
* Returns 0 on success * Returns 0 on success
...@@ -1105,13 +1112,13 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -1105,13 +1112,13 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
} }
vfb = vmw_framebuffer_to_vfb(new_fb); vfb = vmw_framebuffer_to_vfb(new_fb);
new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb); new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay && if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
new_vfbs->surface->base_size.height == vdisplay) new_vfbs->surface->base_size.height == vdisplay)
new_content_type = SAME_AS_DISPLAY; new_content_type = SAME_AS_DISPLAY;
else if (vfb->dmabuf) else if (vfb->bo)
new_content_type = SEPARATE_DMA; new_content_type = SEPARATE_BO;
else else
new_content_type = SEPARATE_SURFACE; new_content_type = SEPARATE_SURFACE;
...@@ -1124,10 +1131,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -1124,10 +1131,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
display_base_size.depth = 1; display_base_size.depth = 1;
/* /*
* If content buffer is a DMA buf, then we have to construct * If content buffer is a buffer object, then we have to
* surface info * construct surface info
*/ */
if (new_content_type == SEPARATE_DMA) { if (new_content_type == SEPARATE_BO) {
switch (new_fb->format->cpp[0]*8) { switch (new_fb->format->cpp[0]*8) {
case 32: case 32:
...@@ -1212,12 +1219,12 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -1212,12 +1219,12 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
vps->content_fb_type = new_content_type; vps->content_fb_type = new_content_type;
/* /*
* This should only happen if the DMA buf is too large to create a * This should only happen if the buffer object is too large to create a
* proxy surface for. * proxy surface for.
* If we are a 2D VM with a DMA buffer then we have to use CPU blit * If we are a 2D VM with a buffer object then we have to use CPU blit
* so cache these mappings * so cache these mappings
*/ */
if (vps->content_fb_type == SEPARATE_DMA && if (vps->content_fb_type == SEPARATE_BO &&
!(dev_priv->capabilities & SVGA_CAP_3D)) !(dev_priv->capabilities & SVGA_CAP_3D))
vps->cpp = new_fb->pitches[0] / new_fb->width; vps->cpp = new_fb->pitches[0] / new_fb->width;
...@@ -1276,7 +1283,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, ...@@ -1276,7 +1283,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
if (ret) if (ret)
DRM_ERROR("Failed to bind surface to STDU.\n"); DRM_ERROR("Failed to bind surface to STDU.\n");
if (vfb->dmabuf) if (vfb->bo)
ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL, ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
&vclips, 1, 1, true, false, &vclips, 1, 1, true, false,
crtc); crtc);
......
...@@ -468,7 +468,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res, ...@@ -468,7 +468,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv, (void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL); &fence, NULL);
vmw_fence_single_bo(val_buf->bo, fence); vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -842,12 +842,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -842,12 +842,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (dev_priv->has_mob && req->shareable) { if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle; uint32_t backup_handle;
ret = vmw_user_dmabuf_alloc(dev_priv, tfile, ret = vmw_user_bo_alloc(dev_priv, tfile,
res->backup_size, res->backup_size,
true, true,
&backup_handle, &backup_handle,
&res->backup, &res->backup,
&user_srf->backup_base); &user_srf->backup_base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
goto out_unlock; goto out_unlock;
...@@ -1072,7 +1072,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) ...@@ -1072,7 +1072,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd2->header.size = cmd_len; cmd2->header.size = cmd_len;
cmd2->body.sid = srf->res.id; cmd2->body.sid = srf->res.id;
cmd2->body.surfaceFlags = srf->flags; cmd2->body.surfaceFlags = srf->flags;
cmd2->body.format = cpu_to_le32(srf->format); cmd2->body.format = srf->format;
cmd2->body.numMipLevels = srf->mip_levels[0]; cmd2->body.numMipLevels = srf->mip_levels[0];
cmd2->body.multisampleCount = srf->multisample_count; cmd2->body.multisampleCount = srf->multisample_count;
cmd2->body.autogenFilter = srf->autogen_filter; cmd2->body.autogenFilter = srf->autogen_filter;
...@@ -1085,7 +1085,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) ...@@ -1085,7 +1085,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd->header.size = cmd_len; cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id; cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags; cmd->body.surfaceFlags = srf->flags;
cmd->body.format = cpu_to_le32(srf->format); cmd->body.format = srf->format;
cmd->body.numMipLevels = srf->mip_levels[0]; cmd->body.numMipLevels = srf->mip_levels[0];
cmd->body.multisampleCount = srf->multisample_count; cmd->body.multisampleCount = srf->multisample_count;
cmd->body.autogenFilter = srf->autogen_filter; cmd->body.autogenFilter = srf->autogen_filter;
...@@ -1210,7 +1210,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, ...@@ -1210,7 +1210,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv, (void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL); &fence, NULL);
vmw_fence_single_bo(val_buf->bo, fence); vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -1317,14 +1317,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1317,14 +1317,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
if (req->buffer_handle != SVGA3D_INVALID_ID) { if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, ret = vmw_user_bo_lookup(tfile, req->buffer_handle,
&res->backup, &res->backup,
&user_srf->backup_base); &user_srf->backup_base);
if (ret == 0) { if (ret == 0) {
if (res->backup->base.num_pages * PAGE_SIZE < if (res->backup->base.num_pages * PAGE_SIZE <
res->backup_size) { res->backup_size) {
DRM_ERROR("Surface backup buffer is too small.\n"); DRM_ERROR("Surface backup buffer is too small.\n");
vmw_dmabuf_unreference(&res->backup); vmw_bo_unreference(&res->backup);
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
} else { } else {
...@@ -1332,13 +1332,13 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1332,13 +1332,13 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
} }
} }
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile, ret = vmw_user_bo_alloc(dev_priv, tfile,
res->backup_size, res->backup_size,
req->drm_surface_flags & req->drm_surface_flags &
drm_vmw_surface_flag_shareable, drm_vmw_surface_flag_shareable,
&backup_handle, &backup_handle,
&res->backup, &res->backup,
&user_srf->backup_base); &user_srf->backup_base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
...@@ -1414,8 +1414,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, ...@@ -1414,8 +1414,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
} }
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
&backup_handle);
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
......
...@@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) ...@@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
struct ttm_object_file *tfile = struct ttm_object_file *tfile =
vmw_fpriv((struct drm_file *)filp->private_data)->tfile; vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
return vmw_user_dmabuf_verify_access(bo, tfile); return vmw_user_bo_verify_access(bo, tfile);
} }
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
...@@ -852,7 +852,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, ...@@ -852,7 +852,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
bool evict, bool evict,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
vmw_resource_move_notify(bo, mem); vmw_bo_move_notify(bo, mem);
vmw_query_move_notify(bo, mem); vmw_query_move_notify(bo, mem);
} }
...@@ -864,7 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, ...@@ -864,7 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/ */
static void vmw_swap_notify(struct ttm_buffer_object *bo) static void vmw_swap_notify(struct ttm_buffer_object *bo)
{ {
vmw_resource_swap_notify(bo); vmw_bo_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false); (void) ttm_bo_wait(bo, false, false);
} }
......
...@@ -40,6 +40,7 @@ extern "C" { ...@@ -40,6 +40,7 @@ extern "C" {
#define DRM_VMW_GET_PARAM 0 #define DRM_VMW_GET_PARAM 0
#define DRM_VMW_ALLOC_DMABUF 1 #define DRM_VMW_ALLOC_DMABUF 1
#define DRM_VMW_ALLOC_BO 1
#define DRM_VMW_UNREF_DMABUF 2 #define DRM_VMW_UNREF_DMABUF 2
#define DRM_VMW_HANDLE_CLOSE 2 #define DRM_VMW_HANDLE_CLOSE 2
#define DRM_VMW_CURSOR_BYPASS 3 #define DRM_VMW_CURSOR_BYPASS 3
...@@ -356,9 +357,9 @@ struct drm_vmw_fence_rep { ...@@ -356,9 +357,9 @@ struct drm_vmw_fence_rep {
/*************************************************************************/ /*************************************************************************/
/** /**
* DRM_VMW_ALLOC_DMABUF * DRM_VMW_ALLOC_BO
* *
* Allocate a DMA buffer that is visible also to the host. * Allocate a buffer object that is visible also to the host.
* NOTE: The buffer is * NOTE: The buffer is
* identified by a handle and an offset, which are private to the guest, but * identified by a handle and an offset, which are private to the guest, but
* useable in the command stream. The guest kernel may translate these * useable in the command stream. The guest kernel may translate these
...@@ -366,27 +367,28 @@ struct drm_vmw_fence_rep { ...@@ -366,27 +367,28 @@ struct drm_vmw_fence_rep {
* be zero at all times, or it may disappear from the interface before it is * be zero at all times, or it may disappear from the interface before it is
* fixed. * fixed.
* *
* The DMA buffer may stay user-space mapped in the guest at all times, * The buffer object may stay user-space mapped in the guest at all times,
* and is thus suitable for sub-allocation. * and is thus suitable for sub-allocation.
* *
* DMA buffers are mapped using the mmap() syscall on the drm device. * Buffer objects are mapped using the mmap() syscall on the drm device.
*/ */
/** /**
* struct drm_vmw_alloc_dmabuf_req * struct drm_vmw_alloc_bo_req
* *
* @size: Required minimum size of the buffer. * @size: Required minimum size of the buffer.
* *
* Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. * Input data to the DRM_VMW_ALLOC_BO Ioctl.
*/ */
struct drm_vmw_alloc_dmabuf_req { struct drm_vmw_alloc_bo_req {
__u32 size; __u32 size;
__u32 pad64; __u32 pad64;
}; };
#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
/** /**
* struct drm_vmw_dmabuf_rep * struct drm_vmw_bo_rep
* *
* @map_handle: Offset to use in the mmap() call used to map the buffer. * @map_handle: Offset to use in the mmap() call used to map the buffer.
* @handle: Handle unique to this buffer. Used for unreferencing. * @handle: Handle unique to this buffer. Used for unreferencing.
...@@ -395,50 +397,32 @@ struct drm_vmw_alloc_dmabuf_req { ...@@ -395,50 +397,32 @@ struct drm_vmw_alloc_dmabuf_req {
* @cur_gmr_offset: Offset to use in the command stream when this buffer is * @cur_gmr_offset: Offset to use in the command stream when this buffer is
* referenced. See note above. * referenced. See note above.
* *
* Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. * Output data from the DRM_VMW_ALLOC_BO Ioctl.
*/ */
struct drm_vmw_dmabuf_rep { struct drm_vmw_bo_rep {
__u64 map_handle; __u64 map_handle;
__u32 handle; __u32 handle;
__u32 cur_gmr_id; __u32 cur_gmr_id;
__u32 cur_gmr_offset; __u32 cur_gmr_offset;
__u32 pad64; __u32 pad64;
}; };
#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
/** /**
* union drm_vmw_dmabuf_arg * union drm_vmw_alloc_bo_arg
* *
* @req: Input data as described above. * @req: Input data as described above.
* @rep: Output data as described above. * @rep: Output data as described above.
* *
* Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. * Argument to the DRM_VMW_ALLOC_BO Ioctl.
*/ */
union drm_vmw_alloc_dmabuf_arg { union drm_vmw_alloc_bo_arg {
struct drm_vmw_alloc_dmabuf_req req; struct drm_vmw_alloc_bo_req req;
struct drm_vmw_dmabuf_rep rep; struct drm_vmw_bo_rep rep;
};
/*************************************************************************/
/**
* DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
*
*/
/**
* struct drm_vmw_unref_dmabuf_arg
*
* @handle: Handle indicating what buffer to free. Obtained from the
* DRM_VMW_ALLOC_DMABUF Ioctl.
*
* Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
*/
struct drm_vmw_unref_dmabuf_arg {
__u32 handle;
__u32 pad64;
}; };
#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
/*************************************************************************/ /*************************************************************************/
/** /**
...@@ -1103,9 +1087,8 @@ union drm_vmw_extended_context_arg { ...@@ -1103,9 +1087,8 @@ union drm_vmw_extended_context_arg {
* DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
* underlying resource. * underlying resource.
* *
* Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl. * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
* The ioctl arguments therefore need to be identical in layout. * Ioctl.
*
*/ */
/** /**
...@@ -1119,7 +1102,7 @@ struct drm_vmw_handle_close_arg { ...@@ -1119,7 +1102,7 @@ struct drm_vmw_handle_close_arg {
__u32 handle; __u32 handle;
__u32 pad64; __u32 pad64;
}; };
#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
#if defined(__cplusplus) #if defined(__cplusplus)
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment