Commit e546e281 authored by Tina Zhang's avatar Tina Zhang Committed by Zhenyu Wang

drm/i915/gvt: Dmabuf support for GVT-g

This patch introduces a guest's framebuffer sharing mechanism based on
dma-buf subsystem. With this sharing mechanism, guest's framebuffer can
be shared between guest VM and host.

v17:
- modify VFIO_DEVICE_GET_GFX_DMABUF interface. (Alex)

v16:
- add x_hot and y_hot. (Gerd)
- add flag validation for VFIO_DEVICE_GET_GFX_DMABUF. (Alex)
- rebase 4.14.0-rc6.

v15:
- add VFIO_DEVICE_GET_GFX_DMABUF ABI. (Gerd)
- add intel_vgpu_dmabuf_cleanup() to clean up the vGPU's dmabuf. (Gerd)

v14:
- add PROBE, DMABUF and REGION flags. (Alex)

v12:
- refine the lifecycle of dmabuf.

v9:
- remove dma-buf management. (Alex)
- track the dma-buf create and release in kernel mode. (Gerd) (Daniel)

v8:
- refine the dma-buf ioctl definition.(Alex)
- add a lock to protect the dmabuf list. (Alex)

v7:
- release dma-buf related allocations in dma-buf's associated release
  function. (Alex)
- refine ioctl interface for querying plane info or create dma-buf.
  (Alex)

v6:
- align the dma-buf life cycle with the vfio device. (Alex)
- add the dma-buf related operations in a separate patch. (Gerd)
- i915 related changes. (Chris)

v5:
- fix bug while checking whether the gem obj is gvt's dma-buf when user
  change caching mode or domains. Add a helper function to do it.
  (Xiaoguang)
- add definition for the query plane and create dma-buf. (Xiaoguang)

v4:
- fix bug while checking whether the gem obj is gvt's dma-buf when set
  caching mode or doamins. (Xiaoguang)

v3:
- declare a new flag I915_GEM_OBJECT_IS_GVT_DMABUF in drm_i915_gem_object
  to represent the gem obj for gvt's dma-buf. The tiling mode, caching
  mode and domains can not be changed for this kind of gem object. (Alex)
- change dma-buf related information to be more generic. So other vendor
  can use the same interface. (Alex)

v2:
- create a management fd for dma-buf operations. (Alex)
- alloc gem object's backing storage in gem obj's get_pages() callback.
  (Chris)
Signed-off-by: default avatarTina Zhang <tina.zhang@intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent e20eaa23
......@@ -2,7 +2,7 @@ GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o \
fb_decoder.o
fb_decoder.o dmabuf.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
......
This diff is collapsed.
/*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Xiaoguang Chen
* Tina Zhang <tina.zhang@intel.com>
*/
#ifndef _GVT_DMABUF_H_
#define _GVT_DMABUF_H_
#include <linux/vfio.h>
struct intel_vgpu_fb_info {
__u64 start;
__u64 start_gpa;
__u64 drm_format_mod;
__u32 drm_format; /* drm format of plane */
__u32 width; /* width of plane */
__u32 height; /* height of plane */
__u32 stride; /* stride of plane */
__u32 size; /* size of plane in bytes, align on page */
__u32 x_pos; /* horizontal position of cursor plane */
__u32 y_pos; /* vertical position of cursor plane */
__u32 x_hot; /* horizontal position of cursor hotspot */
__u32 y_hot; /* vertical position of cursor hotspot */
struct intel_vgpu_dmabuf_obj *obj;
};
/**
* struct intel_vgpu_dmabuf_obj- Intel vGPU device buffer object
*/
struct intel_vgpu_dmabuf_obj {
struct intel_vgpu *vgpu;
struct intel_vgpu_fb_info *info;
__u32 dmabuf_id;
struct kref kref;
bool initref;
struct list_head list;
};
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args);
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id);
void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu);
#endif
......@@ -181,6 +181,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
.gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
.get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
};
/**
......
......@@ -47,6 +47,7 @@
#include "render.h"
#include "cmd_parser.h"
#include "fb_decoder.h"
#include "dmabuf.h"
#define GVT_MAX_VGPU 8
......@@ -209,8 +210,16 @@ struct intel_vgpu {
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
struct vfio_device *vfio_device;
} vdev;
#endif
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
struct completion vblank_done;
};
/* validating GM healthy status*/
......@@ -536,6 +545,8 @@ struct intel_gvt_ops {
const char *name);
bool (*get_gvt_attrs)(struct attribute ***type_attrs,
struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
};
......
......@@ -56,6 +56,8 @@ struct intel_gvt_mpt {
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
int (*set_opregion)(void *vgpu);
int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu);
};
extern struct intel_gvt_mpt xengt_mpt;
......
......@@ -377,10 +377,23 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
vgpu->vdev.num_regions++;
return 0;
}
static int kvmgt_get_vfio_device(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
vgpu->vdev.vfio_device = vfio_device_get_from_dev(
mdev_dev(vgpu->vdev.mdev));
if (!vgpu->vdev.vfio_device) {
gvt_vgpu_err("failed to get vfio device\n");
return -ENODEV;
}
return 0;
}
static int kvmgt_set_opregion(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
......@@ -409,6 +422,14 @@ static int kvmgt_set_opregion(void *p_vgpu)
return ret;
}
static void kvmgt_put_vfio_device(void *vgpu)
{
if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
return;
vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
}
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = NULL;
......@@ -1146,6 +1167,33 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} else if (cmd == VFIO_DEVICE_RESET) {
intel_gvt_ops->vgpu_reset(vgpu);
return 0;
} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
struct vfio_device_gfx_plane_info dmabuf;
int ret = 0;
minsz = offsetofend(struct vfio_device_gfx_plane_info,
dmabuf_id);
if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
return -EFAULT;
if (dmabuf.argsz < minsz)
return -EINVAL;
ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
if (ret != 0)
return ret;
return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
__u32 dmabuf_id;
__s32 dmabuf_fd;
if (get_user(dmabuf_id, (__u32 __user *)arg))
return -EFAULT;
dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
return dmabuf_fd;
}
return 0;
......@@ -1387,6 +1435,9 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
mutex_init(&vgpu->dmabuf_lock);
init_completion(&vgpu->vblank_done);
info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
......@@ -1528,6 +1579,8 @@ struct intel_gvt_mpt kvmgt_mpt = {
.write_gpa = kvmgt_write_gpa,
.gfn_to_mfn = kvmgt_gfn_to_pfn,
.set_opregion = kvmgt_set_opregion,
.get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device,
};
EXPORT_SYMBOL_GPL(kvmgt_mpt);
......
......@@ -309,4 +309,34 @@ static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
return intel_gvt_host.mpt->set_opregion(vgpu);
}
/**
* intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->get_vfio_device)
return 0;
return intel_gvt_host.mpt->get_vfio_device(vgpu);
}
/**
* intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->put_vfio_device)
return;
intel_gvt_host.mpt->put_vfio_device(vgpu);
}
#endif /* _GVT_MPT_H_ */
......@@ -236,6 +236,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
}
intel_vgpu_stop_schedule(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&gvt->lock);
}
......@@ -265,6 +266,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
vfree(vgpu);
intel_gvt_update_vgpu_types(gvt);
......@@ -349,7 +351,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary);
ret = intel_vgpu_init_mmio(vgpu);
......
......@@ -261,6 +261,8 @@ struct drm_i915_gem_object {
} userptr;
unsigned long scratch;
void *gvt_info;
};
/** for phys allocated objects */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment