Commit 507ad757 authored by Jani Nikula's avatar Jani Nikula

Merge tag 'gvt-fixes-2017-06-29' of https://github.com/01org/gvt-linux into drm-intel-next-fixes

gvt-fixes-2017-06-29

- two race fixes for VFIO locks from Chuanxiao
- virtual display fix for BDW from Xiong
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170629065424.kxopjbvntuakbyz2@zhen-hp.sh.intel.com
parents bdbbf7d6 5cd82b75
...@@ -197,6 +197,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -197,6 +197,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) | (PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
PORT_CLK_SEL_LCPLL_810;
}
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
...@@ -211,6 +217,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -211,6 +217,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) | (PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
PORT_CLK_SEL_LCPLL_810;
}
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
...@@ -225,6 +237,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -225,6 +237,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) | (PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
PORT_CLK_SEL_LCPLL_810;
}
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
...@@ -244,6 +262,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -244,6 +262,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
} }
/* Clear host CRT status, so guest couldn't detect this host CRT. */
if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
} }
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
......
...@@ -182,6 +182,9 @@ struct intel_vgpu { ...@@ -182,6 +182,9 @@ struct intel_vgpu {
struct kvm *kvm; struct kvm *kvm;
struct work_struct release_work; struct work_struct release_work;
atomic_t released; atomic_t released;
struct work_struct unpin_work;
spinlock_t unpin_lock; /* To protect unpin_list */
struct list_head unpin_list;
} vdev; } vdev;
#endif #endif
}; };
......
...@@ -367,21 +367,24 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -367,21 +367,24 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
*(u32 *)p_data = (1 << 17); switch (offset) {
return 0; case 0xe651c:
} case 0xe661c:
case 0xe671c:
static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset, case 0xe681c:
void *p_data, unsigned int bytes) vgpu_vreg(vgpu, offset) = 1 << 17;
{ break;
*(u32 *)p_data = 3; case 0xe6c04:
return 0; vgpu_vreg(vgpu, offset) = 0x3;
} break;
case 0xe6e1c:
vgpu_vreg(vgpu, offset) = 0x2f << 16;
break;
default:
return -EINVAL;
}
static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset, read_vreg(vgpu, offset, p_data, bytes);
void *p_data, unsigned int bytes)
{
*(u32 *)p_data = (0x2f << 16);
return 0; return 0;
} }
...@@ -1925,7 +1928,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) ...@@ -1925,7 +1928,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write); dp_aux_ch_ctl_mmio_write);
MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write); MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write); MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write); MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
...@@ -2011,8 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) ...@@ -2011,8 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL); MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL); MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0, MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
PORTA_HOTPLUG_STATUS_MASK PORTA_HOTPLUG_STATUS_MASK
......
...@@ -78,6 +78,7 @@ struct gvt_dma { ...@@ -78,6 +78,7 @@ struct gvt_dma {
struct rb_node node; struct rb_node node;
gfn_t gfn; gfn_t gfn;
unsigned long iova; unsigned long iova;
struct list_head list;
}; };
static inline bool handle_valid(unsigned long handle) static inline bool handle_valid(unsigned long handle)
...@@ -166,6 +167,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, ...@@ -166,6 +167,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
new->gfn = gfn; new->gfn = gfn;
new->iova = iova; new->iova = iova;
INIT_LIST_HEAD(&new->list);
mutex_lock(&vgpu->vdev.cache_lock); mutex_lock(&vgpu->vdev.cache_lock);
while (*link) { while (*link) {
...@@ -197,26 +199,52 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, ...@@ -197,26 +199,52 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
kfree(entry); kfree(entry);
} }
static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) static void intel_vgpu_unpin_work(struct work_struct *work)
{ {
struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
vdev.unpin_work);
struct device *dev = mdev_dev(vgpu->vdev.mdev); struct device *dev = mdev_dev(vgpu->vdev.mdev);
struct gvt_dma *this; struct gvt_dma *this;
unsigned long g1; unsigned long gfn;
int rc;
for (;;) {
spin_lock(&vgpu->vdev.unpin_lock);
if (list_empty(&vgpu->vdev.unpin_list)) {
spin_unlock(&vgpu->vdev.unpin_lock);
break;
}
this = list_first_entry(&vgpu->vdev.unpin_list,
struct gvt_dma, list);
list_del(&this->list);
spin_unlock(&vgpu->vdev.unpin_lock);
gfn = this->gfn;
vfio_unpin_pages(dev, &gfn, 1);
kfree(this);
}
}
static bool gvt_cache_mark_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct gvt_dma *this;
mutex_lock(&vgpu->vdev.cache_lock); mutex_lock(&vgpu->vdev.cache_lock);
this = __gvt_cache_find(vgpu, gfn); this = __gvt_cache_find(vgpu, gfn);
if (!this) { if (!this) {
mutex_unlock(&vgpu->vdev.cache_lock); mutex_unlock(&vgpu->vdev.cache_lock);
return; return false;
} }
g1 = gfn;
gvt_dma_unmap_iova(vgpu, this->iova); gvt_dma_unmap_iova(vgpu, this->iova);
rc = vfio_unpin_pages(dev, &g1, 1); /* remove this from rb tree */
WARN_ON(rc != 1); rb_erase(&this->node, &vgpu->vdev.cache);
__gvt_cache_remove_entry(vgpu, this);
mutex_unlock(&vgpu->vdev.cache_lock); mutex_unlock(&vgpu->vdev.cache_lock);
/* put this to the unpin_list */
spin_lock(&vgpu->vdev.unpin_lock);
list_move_tail(&this->list, &vgpu->vdev.unpin_list);
spin_unlock(&vgpu->vdev.unpin_lock);
return true;
} }
static void gvt_cache_init(struct intel_vgpu *vgpu) static void gvt_cache_init(struct intel_vgpu *vgpu)
...@@ -232,16 +260,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) ...@@ -232,16 +260,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
struct device *dev = mdev_dev(vgpu->vdev.mdev); struct device *dev = mdev_dev(vgpu->vdev.mdev);
unsigned long gfn; unsigned long gfn;
mutex_lock(&vgpu->vdev.cache_lock); for (;;) {
while ((node = rb_first(&vgpu->vdev.cache))) { mutex_lock(&vgpu->vdev.cache_lock);
node = rb_first(&vgpu->vdev.cache);
if (!node) {
mutex_unlock(&vgpu->vdev.cache_lock);
break;
}
dma = rb_entry(node, struct gvt_dma, node); dma = rb_entry(node, struct gvt_dma, node);
gvt_dma_unmap_iova(vgpu, dma->iova); gvt_dma_unmap_iova(vgpu, dma->iova);
gfn = dma->gfn; gfn = dma->gfn;
vfio_unpin_pages(dev, &gfn, 1);
__gvt_cache_remove_entry(vgpu, dma); __gvt_cache_remove_entry(vgpu, dma);
mutex_unlock(&vgpu->vdev.cache_lock);
vfio_unpin_pages(dev, &gfn, 1);
} }
mutex_unlock(&vgpu->vdev.cache_lock);
} }
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
...@@ -453,6 +485,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) ...@@ -453,6 +485,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
} }
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
INIT_WORK(&vgpu->vdev.unpin_work, intel_vgpu_unpin_work);
spin_lock_init(&vgpu->vdev.unpin_lock);
INIT_LIST_HEAD(&vgpu->vdev.unpin_list);
vgpu->vdev.mdev = mdev; vgpu->vdev.mdev = mdev;
mdev_set_drvdata(mdev, vgpu); mdev_set_drvdata(mdev, vgpu);
...@@ -482,6 +517,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, ...@@ -482,6 +517,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
struct intel_vgpu *vgpu = container_of(nb, struct intel_vgpu *vgpu = container_of(nb,
struct intel_vgpu, struct intel_vgpu,
vdev.iommu_notifier); vdev.iommu_notifier);
bool sched_unmap = false;
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data; struct vfio_iommu_type1_dma_unmap *unmap = data;
...@@ -491,7 +527,10 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, ...@@ -491,7 +527,10 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
end_gfn = gfn + unmap->size / PAGE_SIZE; end_gfn = gfn + unmap->size / PAGE_SIZE;
while (gfn < end_gfn) while (gfn < end_gfn)
gvt_cache_remove(vgpu, gfn++); sched_unmap |= gvt_cache_mark_remove(vgpu, gfn++);
if (sched_unmap)
schedule_work(&vgpu->vdev.unpin_work);
} }
return NOTIFY_OK; return NOTIFY_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment