Commit 0d7e76be authored by Rodrigo Vivi's avatar Rodrigo Vivi

Merge tag 'gvt-next-2017-12-05' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2017-12-05

- VFIO mdev display dmabuf interface and gvt support (Tina)
- VFIO mdev opregion support/fixes (Tina/Xiong/Chris)
- workload scheduling optimization (Changbin)
- preemption fix and temporal workaround (Zhenyu)
- and misc fixes after refactor (Chris)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171205032629.vylemph57toipeax@zhen-hp.sh.intel.com
parents 2abf3c0d 1603660b
GVT_DIR := gvt GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
......
...@@ -335,7 +335,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -335,7 +335,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
case INTEL_GVT_PCI_OPREGION: case INTEL_GVT_PCI_OPREGION:
if (WARN_ON(!IS_ALIGNED(offset, 4))) if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL; return -EINVAL;
ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data); ret = intel_vgpu_opregion_base_write_handler(vgpu,
*(u32 *)p_data);
if (ret) if (ret)
return ret; return ret;
......
...@@ -67,7 +67,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) ...@@ -67,7 +67,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
return 1; return 1;
} }
static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
......
...@@ -179,4 +179,6 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution); ...@@ -179,4 +179,6 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu); void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu); void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe);
#endif #endif
/*
* Copyright 2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Xiaoguang Chen
* Tina Zhang <tina.zhang@intel.com>
*/
#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include <linux/vfio.h>
#include "i915_drv.h"
#include "gvt.h"
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
static int vgpu_gem_get_pages(
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
int i, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
if (WARN_ON(!fb_info))
return -ENODEV;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (unlikely(!st))
return -ENOMEM;
ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
if (ret) {
kfree(st);
return ret;
}
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(fb_info->start >> PAGE_SHIFT);
for_each_sg(st->sgl, sg, fb_info->size, i) {
sg->offset = 0;
sg->length = PAGE_SIZE;
sg_dma_address(sg) =
GEN8_DECODE_PTE(readq(&gtt_entries[i]));
sg_dma_len(sg) = PAGE_SIZE;
}
__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
return 0;
}
static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
sg_free_table(pages);
kfree(pages);
}
static void dmabuf_gem_object_free(struct kref *kref)
{
struct intel_vgpu_dmabuf_obj *obj =
container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
struct intel_vgpu *vgpu = obj->vgpu;
struct list_head *pos;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
dmabuf_obj = container_of(pos,
struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
intel_gvt_hypervisor_put_vfio_device(vgpu);
idr_remove(&vgpu->object_idr,
dmabuf_obj->dmabuf_id);
kfree(dmabuf_obj->info);
kfree(dmabuf_obj);
list_del(pos);
break;
}
}
} else {
/* Free the orphan dmabuf_objs here */
kfree(obj->info);
kfree(obj);
}
}
static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
{
kref_get(&obj->kref);
}
static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
{
kref_put(&obj->kref, dmabuf_gem_object_free);
}
static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
{
struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
struct intel_vgpu *vgpu = obj->vgpu;
if (vgpu) {
mutex_lock(&vgpu->dmabuf_lock);
gem_obj->base.dma_buf = NULL;
dmabuf_obj_put(obj);
mutex_unlock(&vgpu->dmabuf_lock);
} else {
/* vgpu is NULL, as it has been removed already */
gem_obj->base.dma_buf = NULL;
dmabuf_obj_put(obj);
}
}
static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
.flags = I915_GEM_OBJECT_IS_PROXY,
.get_pages = vgpu_gem_get_pages,
.put_pages = vgpu_gem_put_pages,
.release = vgpu_gem_release,
};
static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct intel_vgpu_fb_info *info)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return NULL;
drm_gem_private_object_init(dev, &obj->base,
info->size << PAGE_SHIFT);
i915_gem_object_init(obj, &intel_vgpu_gem_ops);
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->base.write_domain = 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
switch (info->drm_format_mod << 10) {
case PLANE_CTL_TILED_LINEAR:
tiling_mode = I915_TILING_NONE;
break;
case PLANE_CTL_TILED_X:
tiling_mode = I915_TILING_X;
stride = info->stride;
break;
case PLANE_CTL_TILED_Y:
tiling_mode = I915_TILING_Y;
stride = info->stride;
break;
default:
gvt_dbg_core("not supported tiling mode\n");
}
obj->tiling_and_stride = tiling_mode | stride;
} else {
obj->tiling_and_stride = info->drm_format_mod ?
I915_TILING_X : 0;
}
return obj;
}
static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu *vgpu,
struct intel_vgpu_fb_info *info,
int plane_id)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_vgpu_primary_plane_format p;
struct intel_vgpu_cursor_plane_format c;
int ret;
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
if (ret)
return ret;
info->start = p.base;
info->start_gpa = p.base_gpa;
info->width = p.width;
info->height = p.height;
info->stride = p.stride;
info->drm_format = p.drm_format;
info->drm_format_mod = p.tiled;
info->size = (((p.stride * p.height * p.bpp) / 8) +
(PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
if (ret)
return ret;
info->start = c.base;
info->start_gpa = c.base_gpa;
info->width = c.width;
info->height = c.height;
info->stride = c.width * (c.bpp / 8);
info->drm_format = c.drm_format;
info->drm_format_mod = 0;
info->x_pos = c.x_pos;
info->y_pos = c.y_pos;
/* The invalid cursor hotspot value is delivered to host
* until we find a way to get the cursor hotspot info of
* guest OS.
*/
info->x_hot = UINT_MAX;
info->y_hot = UINT_MAX;
info->size = (((info->stride * c.height * c.bpp) / 8)
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else {
gvt_vgpu_err("invalid plane id:%d\n", plane_id);
return -EINVAL;
}
if (info->size == 0) {
gvt_vgpu_err("fb size is zero\n");
return -EINVAL;
}
if (info->start & (PAGE_SIZE - 1)) {
gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
return -EFAULT;
}
if (((info->start >> PAGE_SHIFT) + info->size) >
ggtt_total_entries(&dev_priv->ggtt)) {
gvt_vgpu_err("Invalid GTT offset or size\n");
return -EFAULT;
}
if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
gvt_vgpu_err("invalid gma addr\n");
return -EFAULT;
}
return 0;
}
static struct intel_vgpu_dmabuf_obj *
pick_dmabuf_by_info(struct intel_vgpu *vgpu,
struct intel_vgpu_fb_info *latest_info)
{
struct list_head *pos;
struct intel_vgpu_fb_info *fb_info;
struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
struct intel_vgpu_dmabuf_obj *ret = NULL;
list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
list);
if ((dmabuf_obj == NULL) ||
(dmabuf_obj->info == NULL))
continue;
fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
if ((fb_info->start == latest_info->start) &&
(fb_info->start_gpa == latest_info->start_gpa) &&
(fb_info->size == latest_info->size) &&
(fb_info->drm_format_mod == latest_info->drm_format_mod) &&
(fb_info->drm_format == latest_info->drm_format) &&
(fb_info->width == latest_info->width) &&
(fb_info->height == latest_info->height)) {
ret = dmabuf_obj;
break;
}
}
return ret;
}
static struct intel_vgpu_dmabuf_obj *
pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
{
struct list_head *pos;
struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
struct intel_vgpu_dmabuf_obj *ret = NULL;
list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
list);
if (!dmabuf_obj)
continue;
if (dmabuf_obj->dmabuf_id == id) {
ret = dmabuf_obj;
break;
}
}
return ret;
}
static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
struct intel_vgpu_fb_info *fb_info)
{
gvt_dmabuf->drm_format = fb_info->drm_format;
gvt_dmabuf->width = fb_info->width;
gvt_dmabuf->height = fb_info->height;
gvt_dmabuf->stride = fb_info->stride;
gvt_dmabuf->size = fb_info->size;
gvt_dmabuf->x_pos = fb_info->x_pos;
gvt_dmabuf->y_pos = fb_info->y_pos;
gvt_dmabuf->x_hot = fb_info->x_hot;
gvt_dmabuf->y_hot = fb_info->y_hot;
}
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
{
struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
struct vfio_device_gfx_plane_info *gfx_plane_info = args;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct intel_vgpu_fb_info fb_info;
int ret = 0;
if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
VFIO_GFX_PLANE_TYPE_PROBE))
return ret;
else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
(!gfx_plane_info->flags))
return -EINVAL;
ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
gfx_plane_info->drm_plane_type);
if (ret != 0)
goto out;
mutex_lock(&vgpu->dmabuf_lock);
/* If exists, pick up the exposed dmabuf_obj */
dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
if (dmabuf_obj) {
update_fb_info(gfx_plane_info, &fb_info);
gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
/* This buffer may be released between query_plane ioctl and
* get_dmabuf ioctl. Add the refcount to make sure it won't
* be released between the two ioctls.
*/
if (!dmabuf_obj->initref) {
dmabuf_obj->initref = true;
dmabuf_obj_get(dmabuf_obj);
}
ret = 0;
gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
vgpu->id, kref_read(&dmabuf_obj->kref),
gfx_plane_info->dmabuf_id);
mutex_unlock(&vgpu->dmabuf_lock);
goto out;
}
mutex_unlock(&vgpu->dmabuf_lock);
/* Need to allocate a new one*/
dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
if (unlikely(!dmabuf_obj)) {
gvt_vgpu_err("alloc dmabuf_obj failed\n");
ret = -ENOMEM;
goto out;
}
dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
GFP_KERNEL);
if (unlikely(!dmabuf_obj->info)) {
gvt_vgpu_err("allocate intel vgpu fb info failed\n");
ret = -ENOMEM;
goto out_free_dmabuf;
}
memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
dmabuf_obj->vgpu = vgpu;
ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
if (ret < 0)
goto out_free_info;
gfx_plane_info->dmabuf_id = ret;
dmabuf_obj->dmabuf_id = ret;
dmabuf_obj->initref = true;
kref_init(&dmabuf_obj->kref);
mutex_lock(&vgpu->dmabuf_lock);
if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
gvt_vgpu_err("get vfio device failed\n");
mutex_unlock(&vgpu->dmabuf_lock);
goto out_free_info;
}
mutex_unlock(&vgpu->dmabuf_lock);
update_fb_info(gfx_plane_info, &fb_info);
INIT_LIST_HEAD(&dmabuf_obj->list);
mutex_lock(&vgpu->dmabuf_lock);
list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
mutex_unlock(&vgpu->dmabuf_lock);
gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
__func__, kref_read(&dmabuf_obj->kref), ret);
return 0;
out_free_info:
kfree(dmabuf_obj->info);
out_free_dmabuf:
kfree(dmabuf_obj);
out:
/* ENODEV means plane isn't ready, which might be a normal case. */
return (ret == -ENODEV) ? 0 : ret;
}
/* To associate an exposed dmabuf with the dmabuf_obj */
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
{
struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
int dmabuf_fd;
int ret = 0;
mutex_lock(&vgpu->dmabuf_lock);
dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
if (dmabuf_obj == NULL) {
gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
ret = -EINVAL;
goto out;
}
obj = vgpu_create_gem(dev, dmabuf_obj->info);
if (obj == NULL) {
gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu->id);
ret = -ENOMEM;
goto out;
}
obj->gvt_info = dmabuf_obj->info;
dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
if (IS_ERR(dmabuf)) {
gvt_vgpu_err("export dma-buf failed\n");
ret = PTR_ERR(dmabuf);
goto out_free_gem;
}
obj->base.dma_buf = dmabuf;
i915_gem_object_put(obj);
ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
if (ret < 0) {
gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
goto out_free_dmabuf;
}
dmabuf_fd = ret;
dmabuf_obj_get(dmabuf_obj);
if (dmabuf_obj->initref) {
dmabuf_obj->initref = false;
dmabuf_obj_put(dmabuf_obj);
}
mutex_unlock(&vgpu->dmabuf_lock);
gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
" file count: %ld, GEM ref: %d\n",
vgpu->id, dmabuf_obj->dmabuf_id,
kref_read(&dmabuf_obj->kref),
dmabuf_fd,
file_count(dmabuf->file),
kref_read(&obj->base.refcount));
return dmabuf_fd;
out_free_dmabuf:
dma_buf_put(dmabuf);
out_free_gem:
i915_gem_object_put(obj);
out:
mutex_unlock(&vgpu->dmabuf_lock);
return ret;
}
void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
mutex_lock(&vgpu->dmabuf_lock);
list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
list);
if (dmabuf_obj->initref) {
dmabuf_obj->initref = false;
dmabuf_obj_put(dmabuf_obj);
}
idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
if (dmabuf_obj->vgpu)
intel_gvt_hypervisor_put_vfio_device(vgpu);
list_del(pos);
dmabuf_obj->vgpu = NULL;
}
mutex_unlock(&vgpu->dmabuf_lock);
}
/*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Xiaoguang Chen
* Tina Zhang <tina.zhang@intel.com>
*/
#ifndef _GVT_DMABUF_H_
#define _GVT_DMABUF_H_
#include <linux/vfio.h>
struct intel_vgpu_fb_info {
__u64 start;
__u64 start_gpa;
__u64 drm_format_mod;
__u32 drm_format; /* drm format of plane */
__u32 width; /* width of plane */
__u32 height; /* height of plane */
__u32 stride; /* stride of plane */
__u32 size; /* size of plane in bytes, align on page */
__u32 x_pos; /* horizontal position of cursor plane */
__u32 y_pos; /* vertical position of cursor plane */
__u32 x_hot; /* horizontal position of cursor hotspot */
__u32 y_hot; /* vertical position of cursor hotspot */
struct intel_vgpu_dmabuf_obj *obj;
};
/**
* struct intel_vgpu_dmabuf_obj- Intel vGPU device buffer object
*/
struct intel_vgpu_dmabuf_obj {
struct intel_vgpu *vgpu;
struct intel_vgpu_fb_info *info;
__u32 dmabuf_id;
struct kref kref;
bool initref;
struct list_head list;
};
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args);
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id);
void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu);
#endif
...@@ -458,7 +458,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -458,7 +458,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload, gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
emulate_schedule_in); emulate_schedule_in);
queue_workload(workload); intel_vgpu_queue_workload(workload);
return 0; return 0;
} }
...@@ -528,7 +528,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) ...@@ -528,7 +528,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
} }
void clean_execlist(struct intel_vgpu *vgpu) static void clean_execlist(struct intel_vgpu *vgpu)
{ {
enum intel_engine_id i; enum intel_engine_id i;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
...@@ -542,7 +542,7 @@ void clean_execlist(struct intel_vgpu *vgpu) ...@@ -542,7 +542,7 @@ void clean_execlist(struct intel_vgpu *vgpu)
} }
} }
void reset_execlist(struct intel_vgpu *vgpu, static void reset_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask) unsigned long engine_mask)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
...@@ -553,7 +553,7 @@ void reset_execlist(struct intel_vgpu *vgpu, ...@@ -553,7 +553,7 @@ void reset_execlist(struct intel_vgpu *vgpu,
init_vgpu_execlist(vgpu, engine->id); init_vgpu_execlist(vgpu, engine->id);
} }
int init_execlist(struct intel_vgpu *vgpu) static int init_execlist(struct intel_vgpu *vgpu)
{ {
reset_execlist(vgpu, ALL_ENGINES); reset_execlist(vgpu, ALL_ENGINES);
return 0; return 0;
......
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Bing Niu <bing.niu@intel.com>
* Xu Han <xu.han@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Xiaoguang Chen <xiaoguang.chen@intel.com>
* Yang Liu <yang2.liu@intel.com>
* Tina Zhang <tina.zhang@intel.com>
*
*/
#include <uapi/drm/drm_fourcc.h>
#include "i915_drv.h"
#include "gvt.h"
#define PRIMARY_FORMAT_NUM 16
struct pixel_format {
int drm_format; /* Pixel format in DRM definition */
int bpp; /* Bits per pixel, 0 indicates invalid */
char *desc; /* The description */
};
static struct pixel_format bdw_pixel_formats[] = {
{DRM_FORMAT_C8, 8, "8-bit Indexed"},
{DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"},
{DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"},
{DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"},
{DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
{DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
/* non-supported format has bpp default to 0 */
{0, 0, NULL},
};
static struct pixel_format skl_pixel_formats[] = {
{DRM_FORMAT_YUYV, 16, "16-bit packed YUYV (8:8:8:8 MSB-V:Y2:U:Y1)"},
{DRM_FORMAT_UYVY, 16, "16-bit packed UYVY (8:8:8:8 MSB-Y2:V:Y1:U)"},
{DRM_FORMAT_YVYU, 16, "16-bit packed YVYU (8:8:8:8 MSB-U:Y2:V:Y1)"},
{DRM_FORMAT_VYUY, 16, "16-bit packed VYUY (8:8:8:8 MSB-Y2:U:Y1:V)"},
{DRM_FORMAT_C8, 8, "8-bit Indexed"},
{DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"},
{DRM_FORMAT_ABGR8888, 32, "32-bit RGBA (8:8:8:8 MSB-A:B:G:R)"},
{DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
{DRM_FORMAT_ARGB8888, 32, "32-bit BGRA (8:8:8:8 MSB-A:R:G:B)"},
{DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"},
{DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"},
{DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
/* non-supported format has bpp default to 0 */
{0, 0, NULL},
};
static int bdw_format_to_drm(int format)
{
int bdw_pixel_formats_index = 6;
switch (format) {
case DISPPLANE_8BPP:
bdw_pixel_formats_index = 0;
break;
case DISPPLANE_BGRX565:
bdw_pixel_formats_index = 1;
break;
case DISPPLANE_BGRX888:
bdw_pixel_formats_index = 2;
break;
case DISPPLANE_RGBX101010:
bdw_pixel_formats_index = 3;
break;
case DISPPLANE_BGRX101010:
bdw_pixel_formats_index = 4;
break;
case DISPPLANE_RGBX888:
bdw_pixel_formats_index = 5;
break;
default:
break;
}
return bdw_pixel_formats_index;
}
static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
int yuv_order)
{
int skl_pixel_formats_index = 12;
switch (format) {
case PLANE_CTL_FORMAT_INDEXED:
skl_pixel_formats_index = 4;
break;
case PLANE_CTL_FORMAT_RGB_565:
skl_pixel_formats_index = 5;
break;
case PLANE_CTL_FORMAT_XRGB_8888:
if (rgb_order)
skl_pixel_formats_index = alpha ? 6 : 7;
else
skl_pixel_formats_index = alpha ? 8 : 9;
break;
case PLANE_CTL_FORMAT_XRGB_2101010:
skl_pixel_formats_index = rgb_order ? 10 : 11;
break;
case PLANE_CTL_FORMAT_YUV422:
skl_pixel_formats_index = yuv_order >> 16;
if (skl_pixel_formats_index > 3)
return -EINVAL;
break;
default:
break;
}
return skl_pixel_formats_index;
}
static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u32 stride_reg = vgpu_vreg(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
switch (tiled) {
case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64;
break;
case PLANE_CTL_TILED_X:
stride = stride_reg * 512;
break;
case PLANE_CTL_TILED_Y:
stride = stride_reg * 128;
break;
case PLANE_CTL_TILED_YF:
if (bpp == 8)
stride = stride_reg * 64;
else if (bpp == 16 || bpp == 32 || bpp == 64)
stride = stride_reg * 128;
else
gvt_dbg_core("skl: unsupported bpp:%d\n", bpp);
break;
default:
gvt_dbg_core("skl: unsupported tile format:%x\n",
tiled);
}
}
return stride;
}
static int get_active_pipe(struct intel_vgpu *vgpu)
{
int i;
for (i = 0; i < I915_MAX_PIPES; i++)
if (pipe_is_enabled(vgpu, i))
break;
return i;
}
/**
* intel_vgpu_decode_primary_plane - Decode primary plane
* @vgpu: input vgpu
* @plane: primary plane to save decoded info
* This function is called for decoding plane
*
* Returns:
* 0 on success, non-zero if failed.
*/
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
u32 val, fmt;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
pipe = get_active_pipe(vgpu);
if (pipe >= I915_MAX_PIPES)
return -ENODEV;
val = vgpu_vreg(vgpu, DSPCNTR(pipe));
plane->enabled = !!(val & DISPLAY_PLANE_ENABLE);
if (!plane->enabled)
return -ENODEV;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
_PLANE_CTL_TILED_SHIFT;
fmt = skl_format_to_drm(
val & PLANE_CTL_FORMAT_MASK,
val & PLANE_CTL_ORDER_RGBX,
val & PLANE_CTL_ALPHA_MASK,
val & PLANE_CTL_YUV422_ORDER_MASK);
plane->bpp = skl_pixel_formats[fmt].bpp;
plane->drm_format = skl_pixel_formats[fmt].drm_format;
} else {
plane->tiled = !!(val & DISPPLANE_TILED);
fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
plane->bpp = bdw_pixel_formats[fmt].bpp;
plane->drm_format = bdw_pixel_formats[fmt].drm_format;
}
if (!plane->bpp) {
gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
return -EINVAL;
}
plane->hw_format = fmt;
plane->base = vgpu_vreg(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
return -EINVAL;
}
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
return -EINVAL;
}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);
plane->width = (vgpu_vreg(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
_PIPE_H_SRCSZ_SHIFT;
plane->width += 1;
plane->height = (vgpu_vreg(vgpu, PIPESRC(pipe)) &
_PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
plane->height += 1; /* raw height is one minus the real value */
val = vgpu_vreg(vgpu, DSPTILEOFF(pipe));
plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
_PRI_PLANE_X_OFF_SHIFT;
plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
_PRI_PLANE_Y_OFF_SHIFT;
return 0;
}
#define CURSOR_FORMAT_NUM (1 << 6)
struct cursor_mode_format {
int drm_format; /* Pixel format in DRM definition */
u8 bpp; /* Bits per pixel; 0 indicates invalid */
u32 width; /* In pixel */
u32 height; /* In lines */
char *desc; /* The description */
};
static struct cursor_mode_format cursor_pixel_formats[] = {
{DRM_FORMAT_ARGB8888, 32, 128, 128, "128x128 32bpp ARGB"},
{DRM_FORMAT_ARGB8888, 32, 256, 256, "256x256 32bpp ARGB"},
{DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
{DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
/* non-supported format has bpp default to 0 */
{0, 0, 0, 0, NULL},
};
static int cursor_mode_to_drm(int mode)
{
int cursor_pixel_formats_index = 4;
switch (mode) {
case CURSOR_MODE_128_ARGB_AX:
cursor_pixel_formats_index = 0;
break;
case CURSOR_MODE_256_ARGB_AX:
cursor_pixel_formats_index = 1;
break;
case CURSOR_MODE_64_ARGB_AX:
cursor_pixel_formats_index = 2;
break;
case CURSOR_MODE_64_32B_AX:
cursor_pixel_formats_index = 3;
break;
default:
break;
}
return cursor_pixel_formats_index;
}
/**
* intel_vgpu_decode_cursor_plane - Decode sprite plane
* @vgpu: input vgpu
* @plane: cursor plane to save decoded info
* This function is called for decoding plane
*
* Returns:
* 0 on success, non-zero if failed.
*/
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
u32 val, mode, index;
u32 alpha_plane, alpha_force;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
pipe = get_active_pipe(vgpu);
if (pipe >= I915_MAX_PIPES)
return -ENODEV;
val = vgpu_vreg(vgpu, CURCNTR(pipe));
mode = val & CURSOR_MODE;
plane->enabled = (mode != CURSOR_MODE_DISABLE);
if (!plane->enabled)
return -ENODEV;
index = cursor_mode_to_drm(mode);
if (!cursor_pixel_formats[index].bpp) {
gvt_vgpu_err("Non-supported cursor mode (0x%x)\n", mode);
return -EINVAL;
}
plane->mode = mode;
plane->bpp = cursor_pixel_formats[index].bpp;
plane->drm_format = cursor_pixel_formats[index].drm_format;
plane->width = cursor_pixel_formats[index].width;
plane->height = cursor_pixel_formats[index].height;
alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >>
_CURSOR_ALPHA_PLANE_SHIFT;
alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >>
_CURSOR_ALPHA_FORCE_SHIFT;
if (alpha_plane || alpha_force)
gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
alpha_plane, alpha_force);
plane->base = vgpu_vreg(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
return -EINVAL;
}
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
return -EINVAL;
}
val = vgpu_vreg(vgpu, CURPOS(pipe));
plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
return 0;
}
#define SPRITE_FORMAT_NUM (1 << 3)
static struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = {
[0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"},
[0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"},
[0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"},
[0x4] = {DRM_FORMAT_AYUV, 32,
"YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"},
};
/**
* intel_vgpu_decode_sprite_plane - Decode sprite plane
* @vgpu: input vgpu
* @plane: sprite plane to save decoded info
* This function is called for decoding plane
*
* Returns:
* 0 on success, non-zero if failed.
*/
int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_sprite_plane_format *plane)
{
u32 val, fmt;
u32 color_order, yuv_order;
int drm_format;
int pipe;
pipe = get_active_pipe(vgpu);
if (pipe >= I915_MAX_PIPES)
return -ENODEV;
val = vgpu_vreg(vgpu, SPRCTL(pipe));
plane->enabled = !!(val & SPRITE_ENABLE);
if (!plane->enabled)
return -ENODEV;
plane->tiled = !!(val & SPRITE_TILED);
color_order = !!(val & SPRITE_RGB_ORDER_RGBX);
yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >>
_SPRITE_YUV_ORDER_SHIFT;
fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT;
if (!sprite_pixel_formats[fmt].bpp) {
gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
return -EINVAL;
}
plane->hw_format = fmt;
plane->bpp = sprite_pixel_formats[fmt].bpp;
drm_format = sprite_pixel_formats[fmt].drm_format;
/* Order of RGB values in an RGBxxx buffer may be ordered RGB or
* BGR depending on the state of the color_order field
*/
if (!color_order) {
if (drm_format == DRM_FORMAT_XRGB2101010)
drm_format = DRM_FORMAT_XBGR2101010;
else if (drm_format == DRM_FORMAT_XRGB8888)
drm_format = DRM_FORMAT_XBGR8888;
}
if (drm_format == DRM_FORMAT_YUV422) {
switch (yuv_order) {
case 0:
drm_format = DRM_FORMAT_YUYV;
break;
case 1:
drm_format = DRM_FORMAT_UYVY;
break;
case 2:
drm_format = DRM_FORMAT_YVYU;
break;
case 3:
drm_format = DRM_FORMAT_VYUY;
break;
default:
/* yuv_order has only 2 bits */
break;
}
}
plane->drm_format = drm_format;
plane->base = vgpu_vreg(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
return -EINVAL;
}
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
return -EINVAL;
}
plane->stride = vgpu_vreg(vgpu, SPRSTRIDE(pipe)) &
_SPRITE_STRIDE_MASK;
val = vgpu_vreg(vgpu, SPRSIZE(pipe));
plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >>
_SPRITE_SIZE_HEIGHT_SHIFT;
plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >>
_SPRITE_SIZE_WIDTH_SHIFT;
plane->height += 1; /* raw height is one minus the real value */
plane->width += 1; /* raw width is one minus the real value */
val = vgpu_vreg(vgpu, SPRPOS(pipe));
plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT;
plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT;
val = vgpu_vreg(vgpu, SPROFFSET(pipe));
plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >>
_SPRITE_OFFSET_START_X_SHIFT;
plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >>
_SPRITE_OFFSET_START_Y_SHIFT;
return 0;
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Bing Niu <bing.niu@intel.com>
* Xu Han <xu.han@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Xiaoguang Chen <xiaoguang.chen@intel.com>
* Yang Liu <yang2.liu@intel.com>
* Tina Zhang <tina.zhang@intel.com>
*
*/
#ifndef _GVT_FB_DECODER_H_
#define _GVT_FB_DECODER_H_
#define _PLANE_CTL_FORMAT_SHIFT 24
#define _PLANE_CTL_TILED_SHIFT 10
#define _PIPE_V_SRCSZ_SHIFT 0
#define _PIPE_V_SRCSZ_MASK (0xfff << _PIPE_V_SRCSZ_SHIFT)
#define _PIPE_H_SRCSZ_SHIFT 16
#define _PIPE_H_SRCSZ_MASK (0x1fff << _PIPE_H_SRCSZ_SHIFT)
#define _PRI_PLANE_FMT_SHIFT 26
#define _PRI_PLANE_STRIDE_MASK (0x3ff << 6)
#define _PRI_PLANE_X_OFF_SHIFT 0
#define _PRI_PLANE_X_OFF_MASK (0x1fff << _PRI_PLANE_X_OFF_SHIFT)
#define _PRI_PLANE_Y_OFF_SHIFT 16
#define _PRI_PLANE_Y_OFF_MASK (0xfff << _PRI_PLANE_Y_OFF_SHIFT)
#define _CURSOR_MODE 0x3f
#define _CURSOR_ALPHA_FORCE_SHIFT 8
#define _CURSOR_ALPHA_FORCE_MASK (0x3 << _CURSOR_ALPHA_FORCE_SHIFT)
#define _CURSOR_ALPHA_PLANE_SHIFT 10
#define _CURSOR_ALPHA_PLANE_MASK (0x3 << _CURSOR_ALPHA_PLANE_SHIFT)
#define _CURSOR_POS_X_SHIFT 0
#define _CURSOR_POS_X_MASK (0x1fff << _CURSOR_POS_X_SHIFT)
#define _CURSOR_SIGN_X_SHIFT 15
#define _CURSOR_SIGN_X_MASK (1 << _CURSOR_SIGN_X_SHIFT)
#define _CURSOR_POS_Y_SHIFT 16
#define _CURSOR_POS_Y_MASK (0xfff << _CURSOR_POS_Y_SHIFT)
#define _CURSOR_SIGN_Y_SHIFT 31
#define _CURSOR_SIGN_Y_MASK (1 << _CURSOR_SIGN_Y_SHIFT)
#define _SPRITE_FMT_SHIFT 25
#define _SPRITE_COLOR_ORDER_SHIFT 20
#define _SPRITE_YUV_ORDER_SHIFT 16
#define _SPRITE_STRIDE_SHIFT 6
#define _SPRITE_STRIDE_MASK (0x1ff << _SPRITE_STRIDE_SHIFT)
#define _SPRITE_SIZE_WIDTH_SHIFT 0
#define _SPRITE_SIZE_HEIGHT_SHIFT 16
#define _SPRITE_SIZE_WIDTH_MASK (0x1fff << _SPRITE_SIZE_WIDTH_SHIFT)
#define _SPRITE_SIZE_HEIGHT_MASK (0xfff << _SPRITE_SIZE_HEIGHT_SHIFT)
#define _SPRITE_POS_X_SHIFT 0
#define _SPRITE_POS_Y_SHIFT 16
#define _SPRITE_POS_X_MASK (0x1fff << _SPRITE_POS_X_SHIFT)
#define _SPRITE_POS_Y_MASK (0xfff << _SPRITE_POS_Y_SHIFT)
#define _SPRITE_OFFSET_START_X_SHIFT 0
#define _SPRITE_OFFSET_START_Y_SHIFT 16
#define _SPRITE_OFFSET_START_X_MASK (0x1fff << _SPRITE_OFFSET_START_X_SHIFT)
#define _SPRITE_OFFSET_START_Y_MASK (0xfff << _SPRITE_OFFSET_START_Y_SHIFT)
enum GVT_FB_EVENT {
FB_MODE_SET_START = 1,
FB_MODE_SET_END,
FB_DISPLAY_FLIP,
};
enum DDI_PORT {
DDI_PORT_NONE = 0,
DDI_PORT_B = 1,
DDI_PORT_C = 2,
DDI_PORT_D = 3,
DDI_PORT_E = 4
};
struct intel_gvt;
/* color space conversion and gamma correction are not included */
struct intel_vgpu_primary_plane_format {
u8 enabled; /* plane is enabled */
u8 tiled; /* X-tiled */
u8 bpp; /* bits per pixel */
u32 hw_format; /* format field in the PRI_CTL register */
u32 drm_format; /* format in DRM definition */
u32 base; /* framebuffer base in graphics memory */
u64 base_gpa;
u32 x_offset; /* in pixels */
u32 y_offset; /* in lines */
u32 width; /* in pixels */
u32 height; /* in lines */
u32 stride; /* in bytes */
};
struct intel_vgpu_sprite_plane_format {
u8 enabled; /* plane is enabled */
u8 tiled; /* X-tiled */
u8 bpp; /* bits per pixel */
u32 hw_format; /* format field in the SPR_CTL register */
u32 drm_format; /* format in DRM definition */
u32 base; /* sprite base in graphics memory */
u64 base_gpa;
u32 x_pos; /* in pixels */
u32 y_pos; /* in lines */
u32 x_offset; /* in pixels */
u32 y_offset; /* in lines */
u32 width; /* in pixels */
u32 height; /* in lines */
u32 stride; /* in bytes */
};
struct intel_vgpu_cursor_plane_format {
u8 enabled;
u8 mode; /* cursor mode select */
u8 bpp; /* bits per pixel */
u32 drm_format; /* format in DRM definition */
u32 base; /* cursor base in graphics memory */
u64 base_gpa;
u32 x_pos; /* in pixels */
u32 y_pos; /* in lines */
u8 x_sign; /* X Position Sign */
u8 y_sign; /* Y Position Sign */
u32 width; /* in pixels */
u32 height; /* in lines */
u32 x_hot; /* in pixels */
u32 y_hot; /* in pixels */
};
struct intel_vgpu_pipe_format {
struct intel_vgpu_primary_plane_format primary;
struct intel_vgpu_sprite_plane_format sprite;
struct intel_vgpu_cursor_plane_format cursor;
enum DDI_PORT ddi_port; /* the DDI port that pipe is connected to */
};
struct intel_vgpu_fb_format {
struct intel_vgpu_pipe_format pipes[I915_MAX_PIPES];
};
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane);
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane);
int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_sprite_plane_format *plane);
#endif
...@@ -181,6 +181,8 @@ static const struct intel_gvt_ops intel_gvt_ops = { ...@@ -181,6 +181,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_deactivate = intel_gvt_deactivate_vgpu, .vgpu_deactivate = intel_gvt_deactivate_vgpu,
.gvt_find_vgpu_type = intel_gvt_find_vgpu_type, .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
.get_gvt_attrs = intel_get_gvt_attrs, .get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
}; };
/** /**
......
...@@ -46,6 +46,8 @@ ...@@ -46,6 +46,8 @@
#include "sched_policy.h" #include "sched_policy.h"
#include "render.h" #include "render.h"
#include "cmd_parser.h" #include "cmd_parser.h"
#include "fb_decoder.h"
#include "dmabuf.h"
#define GVT_MAX_VGPU 8 #define GVT_MAX_VGPU 8
...@@ -123,7 +125,9 @@ struct intel_vgpu_irq { ...@@ -123,7 +125,9 @@ struct intel_vgpu_irq {
}; };
struct intel_vgpu_opregion { struct intel_vgpu_opregion {
bool mapped;
void *va; void *va;
void *va_gopregion;
u32 gfn[INTEL_GVT_OPREGION_PAGES]; u32 gfn[INTEL_GVT_OPREGION_PAGES];
}; };
...@@ -206,8 +210,16 @@ struct intel_vgpu { ...@@ -206,8 +210,16 @@ struct intel_vgpu {
struct kvm *kvm; struct kvm *kvm;
struct work_struct release_work; struct work_struct release_work;
atomic_t released; atomic_t released;
struct vfio_device *vfio_device;
} vdev; } vdev;
#endif #endif
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
struct completion vblank_done;
}; };
/* validating GM healthy status*/ /* validating GM healthy status*/
...@@ -505,7 +517,8 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) ...@@ -505,7 +517,8 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
} }
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
void populate_pvinfo_page(struct intel_vgpu *vgpu); void populate_pvinfo_page(struct intel_vgpu *vgpu);
...@@ -532,6 +545,8 @@ struct intel_gvt_ops { ...@@ -532,6 +545,8 @@ struct intel_gvt_ops {
const char *name); const char *name);
bool (*get_gvt_attrs)(struct attribute ***type_attrs, bool (*get_gvt_attrs)(struct attribute ***type_attrs,
struct attribute_group ***intel_vgpu_type_groups); struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
}; };
......
...@@ -55,6 +55,9 @@ struct intel_gvt_mpt { ...@@ -55,6 +55,9 @@ struct intel_gvt_mpt {
unsigned long mfn, unsigned int nr, bool map); unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end, int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map); bool map);
int (*set_opregion)(void *vgpu);
int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu);
}; };
extern struct intel_gvt_mpt xengt_mpt; extern struct intel_gvt_mpt xengt_mpt;
......
...@@ -53,11 +53,23 @@ static const struct intel_gvt_ops *intel_gvt_ops; ...@@ -53,11 +53,23 @@ static const struct intel_gvt_ops *intel_gvt_ops;
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
#define OPREGION_SIGNATURE "IntelGraphicsMem"
struct vfio_region;
struct intel_vgpu_regops {
size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite);
void (*release)(struct intel_vgpu *vgpu,
struct vfio_region *region);
};
struct vfio_region { struct vfio_region {
u32 type; u32 type;
u32 subtype; u32 subtype;
size_t size; size_t size;
u32 flags; u32 flags;
const struct intel_vgpu_regops *ops;
void *data;
}; };
struct kvmgt_pgfn { struct kvmgt_pgfn {
...@@ -316,6 +328,108 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, ...@@ -316,6 +328,108 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
} }
} }
static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite)
{
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
void *base = vgpu->vdev.region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos >= vgpu->vdev.region[i].size || iswrite) {
gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
return -EINVAL;
}
count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
memcpy(buf, base + pos, count);
return count;
}
static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
struct vfio_region *region)
{
}
static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
.rw = intel_vgpu_reg_rw_opregion,
.release = intel_vgpu_reg_release_opregion,
};
static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
unsigned int type, unsigned int subtype,
const struct intel_vgpu_regops *ops,
size_t size, u32 flags, void *data)
{
struct vfio_region *region;
region = krealloc(vgpu->vdev.region,
(vgpu->vdev.num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
vgpu->vdev.region = region;
vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
vgpu->vdev.num_regions++;
return 0;
}
static int kvmgt_get_vfio_device(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
vgpu->vdev.vfio_device = vfio_device_get_from_dev(
mdev_dev(vgpu->vdev.mdev));
if (!vgpu->vdev.vfio_device) {
gvt_vgpu_err("failed to get vfio device\n");
return -ENODEV;
}
return 0;
}
static int kvmgt_set_opregion(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
void *base;
int ret;
/* Each vgpu has its own opregion, although VFIO would create another
* one later. This one is used to expose opregion to VFIO. And the
* other one created by VFIO later, is used by guest actually.
*/
base = vgpu_opregion(vgpu)->va;
if (!base)
return -ENOMEM;
if (memcmp(base, OPREGION_SIGNATURE, 16)) {
memunmap(base);
return -EINVAL;
}
ret = intel_vgpu_register_reg(vgpu,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
&intel_vgpu_regops_opregion, OPREGION_SIZE,
VFIO_REGION_INFO_FLAG_READ, base);
return ret;
}
static void kvmgt_put_vfio_device(void *vgpu)
{
if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
return;
vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
}
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{ {
struct intel_vgpu *vgpu = NULL; struct intel_vgpu *vgpu = NULL;
...@@ -546,7 +660,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -546,7 +660,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
int ret = -EINVAL; int ret = -EINVAL;
if (index >= VFIO_PCI_NUM_REGIONS) { if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
gvt_vgpu_err("invalid index: %u\n", index); gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL; return -EINVAL;
} }
...@@ -574,8 +688,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -574,8 +688,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_BAR5_REGION_INDEX: case VFIO_PCI_BAR5_REGION_INDEX:
case VFIO_PCI_VGA_REGION_INDEX: case VFIO_PCI_VGA_REGION_INDEX:
case VFIO_PCI_ROM_REGION_INDEX: case VFIO_PCI_ROM_REGION_INDEX:
break;
default: default:
gvt_vgpu_err("unsupported region: %u\n", index); if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
return -EINVAL;
index -= VFIO_PCI_NUM_REGIONS;
return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
ppos, is_write);
} }
return ret == 0 ? count : ret; return ret == 0 ? count : ret;
...@@ -838,7 +958,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -838,7 +958,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI; info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET; info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS; info.num_regions = VFIO_PCI_NUM_REGIONS +
vgpu->vdev.num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS; info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ? return copy_to_user((void __user *)arg, &info, minsz) ?
...@@ -959,6 +1080,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -959,6 +1080,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} }
if (caps.size) { if (caps.size) {
info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) { if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size; info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0; info.cap_offset = 0;
...@@ -1045,6 +1167,33 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -1045,6 +1167,33 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} else if (cmd == VFIO_DEVICE_RESET) { } else if (cmd == VFIO_DEVICE_RESET) {
intel_gvt_ops->vgpu_reset(vgpu); intel_gvt_ops->vgpu_reset(vgpu);
return 0; return 0;
} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
struct vfio_device_gfx_plane_info dmabuf;
int ret = 0;
minsz = offsetofend(struct vfio_device_gfx_plane_info,
dmabuf_id);
if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
return -EFAULT;
if (dmabuf.argsz < minsz)
return -EINVAL;
ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
if (ret != 0)
return ret;
return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
__u32 dmabuf_id;
__s32 dmabuf_fd;
if (get_user(dmabuf_id, (__u32 __user *)arg))
return -EFAULT;
dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
return dmabuf_fd;
} }
return 0; return 0;
...@@ -1286,6 +1435,9 @@ static int kvmgt_guest_init(struct mdev_device *mdev) ...@@ -1286,6 +1435,9 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info); kvmgt_protect_table_init(info);
gvt_cache_init(vgpu); gvt_cache_init(vgpu);
mutex_init(&vgpu->dmabuf_lock);
init_completion(&vgpu->vblank_done);
info->track_node.track_write = kvmgt_page_track_write; info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node); kvm_page_track_register_notifier(kvm, &info->track_node);
...@@ -1426,6 +1578,9 @@ struct intel_gvt_mpt kvmgt_mpt = { ...@@ -1426,6 +1578,9 @@ struct intel_gvt_mpt kvmgt_mpt = {
.read_gpa = kvmgt_read_gpa, .read_gpa = kvmgt_read_gpa,
.write_gpa = kvmgt_write_gpa, .write_gpa = kvmgt_write_gpa,
.gfn_to_mfn = kvmgt_gfn_to_pfn, .gfn_to_mfn = kvmgt_gfn_to_pfn,
.set_opregion = kvmgt_set_opregion,
.get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device,
}; };
EXPORT_SYMBOL_GPL(kvmgt_mpt); EXPORT_SYMBOL_GPL(kvmgt_mpt);
......
...@@ -294,4 +294,49 @@ static inline int intel_gvt_hypervisor_set_trap_area( ...@@ -294,4 +294,49 @@ static inline int intel_gvt_hypervisor_set_trap_area(
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
} }
/**
* intel_gvt_hypervisor_set_opregion - Set opregion for guest
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->set_opregion)
return 0;
return intel_gvt_host.mpt->set_opregion(vgpu);
}
/**
* intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->get_vfio_device)
return 0;
return intel_gvt_host.mpt->get_vfio_device(vgpu);
}
/**
* intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->put_vfio_device)
return;
intel_gvt_host.mpt->put_vfio_device(vgpu);
}
#endif /* _GVT_MPT_H_ */ #endif /* _GVT_MPT_H_ */
...@@ -213,11 +213,20 @@ static void virt_vbt_generation(struct vbt *v) ...@@ -213,11 +213,20 @@ static void virt_vbt_generation(struct vbt *v)
v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS; v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
} }
static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu) /**
* intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
* @vgpu: a vGPU
* @gpa: guest physical address of opregion
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
{ {
u8 *buf; u8 *buf;
struct opregion_header *header; struct opregion_header *header;
struct vbt v; struct vbt v;
const char opregion_signature[16] = OPREGION_SIGNATURE;
gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
...@@ -231,8 +240,8 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu) ...@@ -231,8 +240,8 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
/* emulated opregion with VBT mailbox only */ /* emulated opregion with VBT mailbox only */
buf = (u8 *)vgpu_opregion(vgpu)->va; buf = (u8 *)vgpu_opregion(vgpu)->va;
header = (struct opregion_header *)buf; header = (struct opregion_header *)buf;
memcpy(header->signature, OPREGION_SIGNATURE, memcpy(header->signature, opregion_signature,
sizeof(OPREGION_SIGNATURE)); sizeof(opregion_signature));
header->size = 0x8; header->size = 0x8;
header->opregion_ver = 0x02000000; header->opregion_ver = 0x02000000;
header->mboxes = MBOX_VBT; header->mboxes = MBOX_VBT;
...@@ -250,25 +259,6 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu) ...@@ -250,25 +259,6 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
return 0; return 0;
} }
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
int i, ret;
if (WARN((vgpu_opregion(vgpu)->va),
"vgpu%d: opregion has been initialized already.\n",
vgpu->id))
return -EINVAL;
ret = alloc_and_init_virt_opregion(vgpu);
if (ret < 0)
return ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
return 0;
}
static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
{ {
u64 mfn; u64 mfn;
...@@ -290,59 +280,91 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) ...@@ -290,59 +280,91 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
return ret; return ret;
} }
} }
vgpu_opregion(vgpu)->mapped = map;
return 0; return 0;
} }
/** /**
* intel_vgpu_clean_opregion - clean the stuff used to emulate opregion * intel_vgpu_opregion_base_write_handler - Opregion base register write handler
*
* @vgpu: a vGPU * @vgpu: a vGPU
* @gpa: guest physical address of opregion
* *
* Returns:
* Zero on success, negative error code if failed.
*/ */
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{ {
gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
if (!vgpu_opregion(vgpu)->va) int i, ret = 0;
return; unsigned long pfn;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { gvt_dbg_core("emulate opregion from kernel\n");
map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va, switch (intel_gvt_host.hypervisor_type) {
get_order(INTEL_GVT_OPREGION_SIZE)); case INTEL_GVT_HYPERVISOR_KVM:
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT);
vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT,
INTEL_GVT_OPREGION_SIZE,
MEMREMAP_WB);
if (!vgpu_opregion(vgpu)->va_gopregion) {
gvt_vgpu_err("failed to map guest opregion\n");
ret = -EFAULT;
}
vgpu_opregion(vgpu)->mapped = true;
break;
case INTEL_GVT_HYPERVISOR_XEN:
/**
* Wins guest on Xengt will write this register twice: xen
* hvmloader and windows graphic driver.
*/
if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
vgpu_opregion(vgpu)->va = NULL; ret = map_vgpu_opregion(vgpu, true);
break;
default:
ret = -EINVAL;
gvt_vgpu_err("not supported hypervisor\n");
} }
return ret;
} }
/** /**
* intel_vgpu_init_opregion - initialize the stuff used to emulate opregion * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
* @vgpu: a vGPU * @vgpu: a vGPU
* @gpa: guest physical address of opregion
* *
* Returns:
* Zero on success, negative error code if failed.
*/ */
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{ {
int ret; gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id); if (!vgpu_opregion(vgpu)->va)
return;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
gvt_dbg_core("emulate opregion from kernel\n"); if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false);
ret = init_vgpu_opregion(vgpu, gpa); } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
if (ret) if (vgpu_opregion(vgpu)->mapped) {
return ret; memunmap(vgpu_opregion(vgpu)->va_gopregion);
vgpu_opregion(vgpu)->va_gopregion = NULL;
ret = map_vgpu_opregion(vgpu, true); }
if (ret)
return ret;
} }
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
get_order(INTEL_GVT_OPREGION_SIZE));
vgpu_opregion(vgpu)->va = NULL;
return 0;
} }
#define GVT_OPREGION_FUNC(scic) \ #define GVT_OPREGION_FUNC(scic) \
({ \ ({ \
u32 __ret; \ u32 __ret; \
...@@ -461,8 +483,21 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) ...@@ -461,8 +483,21 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
u32 *scic, *parm; u32 *scic, *parm;
u32 func, subfunc; u32 func, subfunc;
scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC; switch (intel_gvt_host.hypervisor_type) {
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; case INTEL_GVT_HYPERVISOR_XEN:
scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
break;
case INTEL_GVT_HYPERVISOR_KVM:
scic = vgpu_opregion(vgpu)->va_gopregion +
INTEL_GVT_OPREGION_SCIC;
parm = vgpu_opregion(vgpu)->va_gopregion +
INTEL_GVT_OPREGION_PARM;
break;
default:
gvt_vgpu_err("not supported hypervisor\n");
return -EINVAL;
}
if (!(swsci & SWSCI_SCI_SELECT)) { if (!(swsci & SWSCI_SCI_SELECT)) {
gvt_vgpu_err("requesting SMI service\n"); gvt_vgpu_err("requesting SMI service\n");
......
...@@ -372,6 +372,11 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) ...@@ -372,6 +372,11 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
} }
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
{
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{ {
struct intel_gvt_workload_scheduler *scheduler = struct intel_gvt_workload_scheduler *scheduler =
......
...@@ -57,4 +57,6 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu); ...@@ -57,4 +57,6 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu); void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
void intel_gvt_kick_schedule(struct intel_gvt *gvt);
#endif #endif
...@@ -188,10 +188,12 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -188,10 +188,12 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1); atomic_set(&workload->shadow_ctx_active, 1);
break; break;
case INTEL_CONTEXT_SCHEDULE_OUT: case INTEL_CONTEXT_SCHEDULE_OUT:
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
save_ring_hw_state(workload->vgpu, ring_id); save_ring_hw_state(workload->vgpu, ring_id);
atomic_set(&workload->shadow_ctx_active, 0); atomic_set(&workload->shadow_ctx_active, 0);
break; break;
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
save_ring_hw_state(workload->vgpu, ring_id);
break;
default: default:
WARN_ON(1); WARN_ON(1);
return NOTIFY_OK; return NOTIFY_OK;
...@@ -245,7 +247,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -245,7 +247,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
return 0; return 0;
} }
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
if (!wa_ctx->indirect_ctx.obj) if (!wa_ctx->indirect_ctx.obj)
return; return;
...@@ -1036,6 +1038,9 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1036,6 +1038,9 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
if (IS_ERR(s->shadow_ctx)) if (IS_ERR(s->shadow_ctx))
return PTR_ERR(s->shadow_ctx); return PTR_ERR(s->shadow_ctx);
if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
s->shadow_ctx->priority = INT_MAX;
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
s->workloads = kmem_cache_create("gvt-g_vgpu_workload", s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
...@@ -1328,3 +1333,15 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1328,3 +1333,15 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
return workload; return workload;
} }
/**
* intel_vgpu_queue_workload - Qeue a vGPU workload
* @workload: the workload to queue in
*/
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
{
list_add_tail(&workload->list,
workload_q_head(workload->vgpu, workload->ring_id));
intel_gvt_kick_schedule(workload->vgpu->gvt);
wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
}
...@@ -125,12 +125,7 @@ struct intel_vgpu_shadow_bb { ...@@ -125,12 +125,7 @@ struct intel_vgpu_shadow_bb {
#define workload_q_head(vgpu, ring_id) \ #define workload_q_head(vgpu, ring_id) \
(&(vgpu->submission.workload_q_head[ring_id])) (&(vgpu->submission.workload_q_head[ring_id]))
#define queue_workload(workload) do { \ void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
list_add_tail(&workload->list, \
workload_q_head(workload->vgpu, workload->ring_id)); \
wake_up(&workload->vgpu->gvt-> \
scheduler.waitq[workload->ring_id]); \
} while (0)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt); int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
......
...@@ -236,6 +236,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) ...@@ -236,6 +236,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
} }
intel_vgpu_stop_schedule(vgpu); intel_vgpu_stop_schedule(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
} }
...@@ -265,6 +266,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) ...@@ -265,6 +266,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_gvt_hypervisor_detach_vgpu(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu); intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu); intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
vfree(vgpu); vfree(vgpu);
intel_gvt_update_vgpu_types(gvt); intel_gvt_update_vgpu_types(gvt);
...@@ -349,7 +351,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -349,7 +351,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle; vgpu->handle = param->handle;
vgpu->gvt = gvt; vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight; vgpu->sched_ctl.weight = param->weight;
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary); intel_vgpu_init_cfg_space(vgpu, param->primary);
ret = intel_vgpu_init_mmio(vgpu); ret = intel_vgpu_init_mmio(vgpu);
...@@ -370,10 +373,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -370,10 +373,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret) if (ret)
goto out_detach_hypervisor_vgpu; goto out_detach_hypervisor_vgpu;
ret = intel_vgpu_init_display(vgpu, param->resolution); ret = intel_vgpu_init_opregion(vgpu);
if (ret) if (ret)
goto out_clean_gtt; goto out_clean_gtt;
ret = intel_vgpu_init_display(vgpu, param->resolution);
if (ret)
goto out_clean_opregion;
ret = intel_vgpu_setup_submission(vgpu); ret = intel_vgpu_setup_submission(vgpu);
if (ret) if (ret)
goto out_clean_display; goto out_clean_display;
...@@ -386,6 +393,10 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -386,6 +393,10 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret) if (ret)
goto out_clean_sched_policy; goto out_clean_sched_policy;
ret = intel_gvt_hypervisor_set_opregion(vgpu);
if (ret)
goto out_clean_sched_policy;
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return vgpu; return vgpu;
...@@ -396,6 +407,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -396,6 +407,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_submission(vgpu);
out_clean_display: out_clean_display:
intel_vgpu_clean_display(vgpu); intel_vgpu_clean_display(vgpu);
out_clean_opregion:
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt: out_clean_gtt:
intel_vgpu_clean_gtt(vgpu); intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu: out_detach_hypervisor_vgpu:
......
...@@ -261,6 +261,8 @@ struct drm_i915_gem_object { ...@@ -261,6 +261,8 @@ struct drm_i915_gem_object {
} userptr; } userptr;
unsigned long scratch; unsigned long scratch;
void *gvt_info;
}; };
/** for phys allocated objects */ /** for phys allocated objects */
......
...@@ -502,6 +502,68 @@ struct vfio_pci_hot_reset { ...@@ -502,6 +502,68 @@ struct vfio_pci_hot_reset {
#define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13) #define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13)
/**
* VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
* struct vfio_device_query_gfx_plane)
*
* Set the drm_plane_type and flags, then retrieve the gfx plane info.
*
* flags supported:
* - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
* to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
* support for dma-buf.
* - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
* to ask if the mdev supports region. 0 on support, -EINVAL on no
* support for region.
* - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
* with each call to query the plane info.
* - Others are invalid and return -EINVAL.
*
* Note:
* 1. Plane could be disabled by guest. In that case, success will be
* returned with zero-initialized drm_format, size, width and height
* fields.
* 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
*
* Return: 0 on success, -errno on other failure.
*/
struct vfio_device_gfx_plane_info {
__u32 argsz;
__u32 flags;
#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
/* in */
__u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */
/* out */
__u32 drm_format; /* drm format of plane */
__u64 drm_format_mod; /* tiled mode */
__u32 width; /* width of plane */
__u32 height; /* height of plane */
__u32 stride; /* stride of plane */
__u32 size; /* size of plane in bytes, align on page*/
__u32 x_pos; /* horizontal position of cursor plane */
__u32 y_pos; /* vertical position of cursor plane*/
__u32 x_hot; /* horizontal position of cursor hotspot */
__u32 y_hot; /* vertical position of cursor hotspot */
union {
__u32 region_index; /* region index */
__u32 dmabuf_id; /* dma-buf id */
};
};
#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
/**
* VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
*
* Return a new dma-buf file descriptor for an exposed guest framebuffer
* described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
* DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
*/
#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
/* -------- API for Type1 VFIO IOMMU -------- */ /* -------- API for Type1 VFIO IOMMU -------- */
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment