Commit 1b4fd5d3 authored by Rodrigo Vivi's avatar Rodrigo Vivi

Merge tag 'gvt-next-2019-02-01' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2019-02-01

- new VFIO EDID region support (Henry)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190201061523.GE5588@zhen-hp.sh.intel.com
parents 7360c9f6 39c68e87
...@@ -342,6 +342,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, ...@@ -342,6 +342,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->dpcd->data_valid = true; port->dpcd->data_valid = true;
port->dpcd->data[DPCD_SINK_COUNT] = 0x1; port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
port->type = type; port->type = type;
port->id = resolution;
emulate_monitor_status_change(vgpu); emulate_monitor_status_change(vgpu);
...@@ -444,6 +445,36 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) ...@@ -444,6 +445,36 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
} }
/**
* intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
* @vgpu: a vGPU
* @conncted: link state
*
* This function is used to trigger hotplug interrupt for vGPU
*
*/
void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
/* TODO: add more platforms support */
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED;
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
} else {
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDID_DETECTED;
vgpu_vreg_t(vgpu, SDEISR) &= ~SDE_PORTD_HOTPLUG_CPT;
}
vgpu_vreg_t(vgpu, SDEIIR) |= SDE_PORTD_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTD_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
}
}
/** /**
* intel_vgpu_clean_display - clean vGPU virtual display emulation * intel_vgpu_clean_display - clean vGPU virtual display emulation
* @vgpu: a vGPU * @vgpu: a vGPU
......
...@@ -146,18 +146,19 @@ enum intel_vgpu_port_type { ...@@ -146,18 +146,19 @@ enum intel_vgpu_port_type {
GVT_PORT_MAX GVT_PORT_MAX
}; };
enum intel_vgpu_edid {
GVT_EDID_1024_768,
GVT_EDID_1920_1200,
GVT_EDID_NUM,
};
struct intel_vgpu_port { struct intel_vgpu_port {
/* per display EDID information */ /* per display EDID information */
struct intel_vgpu_edid_data *edid; struct intel_vgpu_edid_data *edid;
/* per display DPCD information */ /* per display DPCD information */
struct intel_vgpu_dpcd_data *dpcd; struct intel_vgpu_dpcd_data *dpcd;
int type; int type;
}; enum intel_vgpu_edid id;
enum intel_vgpu_edid {
GVT_EDID_1024_768,
GVT_EDID_1920_1200,
GVT_EDID_NUM,
}; };
static inline char *vgpu_edid_str(enum intel_vgpu_edid id) static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
...@@ -172,6 +173,30 @@ static inline char *vgpu_edid_str(enum intel_vgpu_edid id) ...@@ -172,6 +173,30 @@ static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
} }
} }
static inline unsigned int vgpu_edid_xres(enum intel_vgpu_edid id)
{
switch (id) {
case GVT_EDID_1024_768:
return 1024;
case GVT_EDID_1920_1200:
return 1920;
default:
return 0;
}
}
static inline unsigned int vgpu_edid_yres(enum intel_vgpu_edid id)
{
switch (id) {
case GVT_EDID_1024_768:
return 768;
case GVT_EDID_1920_1200:
return 1200;
default:
return 0;
}
}
void intel_gvt_emulate_vblank(struct intel_gvt *gvt); void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
......
...@@ -185,6 +185,7 @@ static const struct intel_gvt_ops intel_gvt_ops = { ...@@ -185,6 +185,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_query_plane = intel_vgpu_query_plane, .vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf, .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler, .write_protect_handler = intel_vgpu_page_track_handler,
.emulate_hotplug = intel_vgpu_emulate_hotplug,
}; };
static void init_device_info(struct intel_gvt *gvt) static void init_device_info(struct intel_gvt *gvt)
......
...@@ -536,6 +536,8 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -536,6 +536,8 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes); void *p_data, unsigned int bytes);
void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
{ {
/* We are 64bit bar. */ /* We are 64bit bar. */
...@@ -577,6 +579,7 @@ struct intel_gvt_ops { ...@@ -577,6 +579,7 @@ struct intel_gvt_ops {
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *, int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
unsigned int); unsigned int);
void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
}; };
......
...@@ -67,6 +67,7 @@ struct intel_gvt_mpt { ...@@ -67,6 +67,7 @@ struct intel_gvt_mpt {
int (*set_trap_area)(unsigned long handle, u64 start, u64 end, int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map); bool map);
int (*set_opregion)(void *vgpu); int (*set_opregion)(void *vgpu);
int (*set_edid)(void *vgpu, int port_num);
int (*get_vfio_device)(void *vgpu); int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu); void (*put_vfio_device)(void *vgpu);
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
......
...@@ -57,6 +57,8 @@ static const struct intel_gvt_ops *intel_gvt_ops; ...@@ -57,6 +57,8 @@ static const struct intel_gvt_ops *intel_gvt_ops;
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
#define EDID_BLOB_OFFSET (PAGE_SIZE/2)
#define OPREGION_SIGNATURE "IntelGraphicsMem" #define OPREGION_SIGNATURE "IntelGraphicsMem"
struct vfio_region; struct vfio_region;
...@@ -76,6 +78,11 @@ struct vfio_region { ...@@ -76,6 +78,11 @@ struct vfio_region {
void *data; void *data;
}; };
struct vfio_edid_region {
struct vfio_region_gfx_edid vfio_edid_regs;
void *edid_blob;
};
struct kvmgt_pgfn { struct kvmgt_pgfn {
gfn_t gfn; gfn_t gfn;
struct hlist_node hnode; struct hlist_node hnode;
...@@ -427,6 +434,111 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = { ...@@ -427,6 +434,111 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
.release = intel_vgpu_reg_release_opregion, .release = intel_vgpu_reg_release_opregion,
}; };
static int handle_edid_regs(struct intel_vgpu *vgpu,
struct vfio_edid_region *region, char *buf,
size_t count, u16 offset, bool is_write)
{
struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs;
unsigned int data;
if (offset + count > sizeof(*regs))
return -EINVAL;
if (count != 4)
return -EINVAL;
if (is_write) {
data = *((unsigned int *)buf);
switch (offset) {
case offsetof(struct vfio_region_gfx_edid, link_state):
if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
if (!drm_edid_block_valid(
(u8 *)region->edid_blob,
0,
true,
NULL)) {
gvt_vgpu_err("invalid EDID blob\n");
return -EINVAL;
}
intel_gvt_ops->emulate_hotplug(vgpu, true);
} else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
intel_gvt_ops->emulate_hotplug(vgpu, false);
else {
gvt_vgpu_err("invalid EDID link state %d\n",
regs->link_state);
return -EINVAL;
}
regs->link_state = data;
break;
case offsetof(struct vfio_region_gfx_edid, edid_size):
if (data > regs->edid_max_size) {
gvt_vgpu_err("EDID size is bigger than %d!\n",
regs->edid_max_size);
return -EINVAL;
}
regs->edid_size = data;
break;
default:
/* read-only regs */
gvt_vgpu_err("write read-only EDID region at offset %d\n",
offset);
return -EPERM;
}
} else {
memcpy(buf, (char *)regs + offset, count);
}
return count;
}
static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
size_t count, u16 offset, bool is_write)
{
if (offset + count > region->vfio_edid_regs.edid_size)
return -EINVAL;
if (is_write)
memcpy(region->edid_blob + offset, buf, count);
else
memcpy(buf, region->edid_blob + offset, count);
return count;
}
static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite)
{
int ret;
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
struct vfio_edid_region *region =
(struct vfio_edid_region *)vgpu->vdev.region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos < region->vfio_edid_regs.edid_offset) {
ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
} else {
pos -= EDID_BLOB_OFFSET;
ret = handle_edid_blob(region, buf, count, pos, iswrite);
}
if (ret < 0)
gvt_vgpu_err("failed to access EDID region\n");
return ret;
}
static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
struct vfio_region *region)
{
kfree(region->data);
}
static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
.rw = intel_vgpu_reg_rw_edid,
.release = intel_vgpu_reg_release_edid,
};
static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
unsigned int type, unsigned int subtype, unsigned int type, unsigned int subtype,
const struct intel_vgpu_regops *ops, const struct intel_vgpu_regops *ops,
...@@ -493,6 +605,36 @@ static int kvmgt_set_opregion(void *p_vgpu) ...@@ -493,6 +605,36 @@ static int kvmgt_set_opregion(void *p_vgpu)
return ret; return ret;
} }
static int kvmgt_set_edid(void *p_vgpu, int port_num)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
struct vfio_edid_region *base;
int ret;
base = kzalloc(sizeof(*base), GFP_KERNEL);
if (!base)
return -ENOMEM;
/* TODO: Add multi-port and EDID extension block support */
base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
base->vfio_edid_regs.edid_max_size = EDID_SIZE;
base->vfio_edid_regs.edid_size = EDID_SIZE;
base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
base->edid_blob = port->edid->edid_block;
ret = intel_vgpu_register_reg(vgpu,
VFIO_REGION_TYPE_GFX,
VFIO_REGION_SUBTYPE_GFX_EDID,
&intel_vgpu_regops_edid, EDID_SIZE,
VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE |
VFIO_REGION_INFO_FLAG_CAPS, base);
return ret;
}
static void kvmgt_put_vfio_device(void *vgpu) static void kvmgt_put_vfio_device(void *vgpu)
{ {
if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device)) if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
...@@ -1874,6 +2016,7 @@ static struct intel_gvt_mpt kvmgt_mpt = { ...@@ -1874,6 +2016,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
.dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_map_guest_page = kvmgt_dma_map_guest_page,
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
.set_opregion = kvmgt_set_opregion, .set_opregion = kvmgt_set_opregion,
.set_edid = kvmgt_set_edid,
.get_vfio_device = kvmgt_get_vfio_device, .get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device, .put_vfio_device = kvmgt_put_vfio_device,
.is_valid_gfn = kvmgt_is_valid_gfn, .is_valid_gfn = kvmgt_is_valid_gfn,
......
...@@ -313,6 +313,23 @@ static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu) ...@@ -313,6 +313,23 @@ static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
return intel_gvt_host.mpt->set_opregion(vgpu); return intel_gvt_host.mpt->set_opregion(vgpu);
} }
/**
* intel_gvt_hypervisor_set_edid - Set EDID region for guest
* @vgpu: a vGPU
* @port_num: display port number
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
int port_num)
{
if (!intel_gvt_host.mpt->set_edid)
return 0;
return intel_gvt_host.mpt->set_edid(vgpu, port_num);
}
/** /**
* intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
* @vgpu: a vGPU * @vgpu: a vGPU
......
...@@ -428,6 +428,12 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -428,6 +428,12 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret) if (ret)
goto out_clean_sched_policy; goto out_clean_sched_policy;
/*TODO: add more platforms support */
if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
return vgpu; return vgpu;
out_clean_sched_policy: out_clean_sched_policy:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment