Commit 62980cac authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Zhi Wang

drm/i915/gvt: merge struct kvmgt_vdev into struct intel_vgpu

Move towards having only a single structure for the per-VGPU state.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-11-hch@lst.deReviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 3cbac24c
...@@ -207,21 +207,36 @@ struct intel_vgpu { ...@@ -207,21 +207,36 @@ struct intel_vgpu {
struct dentry *debugfs; struct dentry *debugfs;
/* Hypervisor-specific device state. */
void *vdev;
struct list_head dmabuf_obj_list_head; struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock; struct mutex dmabuf_lock;
struct idr object_idr; struct idr object_idr;
struct intel_vgpu_vblank_timer vblank_timer; struct intel_vgpu_vblank_timer vblank_timer;
u32 scan_nonprivbb; u32 scan_nonprivbb;
};
static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu) struct mdev_device *mdev;
{ struct vfio_region *region;
return vgpu->vdev; int num_regions;
} struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
/*
* Two caches are used to avoid mapping duplicated pages (eg.
* scratch pages). This help to reduce dma setup overhead.
*/
struct rb_root gfn_cache;
struct rb_root dma_addr_cache;
unsigned long nr_cache_entries;
struct mutex cache_lock;
struct notifier_block iommu_notifier;
struct notifier_block group_notifier;
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
struct vfio_device *vfio_device;
struct vfio_group *vfio_group;
};
/* validating GM healthy status*/ /* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \ #define vgpu_is_vm_unhealthy(ret_val) \
......
...@@ -44,7 +44,6 @@ struct device; ...@@ -44,7 +44,6 @@ struct device;
struct intel_gvt_mpt { struct intel_gvt_mpt {
int (*host_init)(struct device *dev, void *gvt); int (*host_init)(struct device *dev, void *gvt);
void (*host_exit)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu); void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data); int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
int (*enable_page_track)(unsigned long handle, u64 gfn); int (*enable_page_track)(unsigned long handle, u64 gfn);
......
...@@ -112,37 +112,6 @@ struct gvt_dma { ...@@ -112,37 +112,6 @@ struct gvt_dma {
struct kref ref; struct kref ref;
}; };
struct kvmgt_vdev {
struct intel_vgpu *vgpu;
struct mdev_device *mdev;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
/*
* Two caches are used to avoid mapping duplicated pages (eg.
* scratch pages). This help to reduce dma setup overhead.
*/
struct rb_root gfn_cache;
struct rb_root dma_addr_cache;
unsigned long nr_cache_entries;
struct mutex cache_lock;
struct notifier_block iommu_notifier;
struct notifier_block group_notifier;
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
struct vfio_device *vfio_device;
struct vfio_group *vfio_group;
};
static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
{
return intel_vgpu_vdev(vgpu);
}
static inline bool handle_valid(unsigned long handle) static inline bool handle_valid(unsigned long handle)
{ {
return !!(handle & ~0xff); return !!(handle & ~0xff);
...@@ -269,7 +238,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -269,7 +238,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size) unsigned long size)
{ {
struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
int total_pages; int total_pages;
int npage; int npage;
int ret; int ret;
...@@ -279,7 +247,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -279,7 +247,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) { for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage; unsigned long cur_gfn = gfn + npage;
ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1); ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1);
drm_WARN_ON(&i915->drm, ret != 1); drm_WARN_ON(&i915->drm, ret != 1);
} }
} }
...@@ -288,7 +256,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -288,7 +256,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page) unsigned long size, struct page **page)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long base_pfn = 0; unsigned long base_pfn = 0;
int total_pages; int total_pages;
int npage; int npage;
...@@ -303,7 +270,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -303,7 +270,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage; unsigned long cur_gfn = gfn + npage;
unsigned long pfn; unsigned long pfn;
ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1, ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1,
IOMMU_READ | IOMMU_WRITE, &pfn); IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) { if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
...@@ -370,7 +337,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -370,7 +337,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
dma_addr_t dma_addr) dma_addr_t dma_addr)
{ {
struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node; struct rb_node *node = vgpu->dma_addr_cache.rb_node;
struct gvt_dma *itr; struct gvt_dma *itr;
while (node) { while (node) {
...@@ -388,7 +355,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, ...@@ -388,7 +355,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
{ {
struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node; struct rb_node *node = vgpu->gfn_cache.rb_node;
struct gvt_dma *itr; struct gvt_dma *itr;
while (node) { while (node) {
...@@ -409,7 +376,6 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, ...@@ -409,7 +376,6 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
{ {
struct gvt_dma *new, *itr; struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL; struct rb_node **link, *parent = NULL;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new) if (!new)
...@@ -422,7 +388,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, ...@@ -422,7 +388,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
kref_init(&new->ref); kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */ /* gfn_cache maps gfn to struct gvt_dma. */
link = &vdev->gfn_cache.rb_node; link = &vgpu->gfn_cache.rb_node;
while (*link) { while (*link) {
parent = *link; parent = *link;
itr = rb_entry(parent, struct gvt_dma, gfn_node); itr = rb_entry(parent, struct gvt_dma, gfn_node);
...@@ -433,11 +399,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, ...@@ -433,11 +399,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right; link = &parent->rb_right;
} }
rb_link_node(&new->gfn_node, parent, link); rb_link_node(&new->gfn_node, parent, link);
rb_insert_color(&new->gfn_node, &vdev->gfn_cache); rb_insert_color(&new->gfn_node, &vgpu->gfn_cache);
/* dma_addr_cache maps dma addr to struct gvt_dma. */ /* dma_addr_cache maps dma addr to struct gvt_dma. */
parent = NULL; parent = NULL;
link = &vdev->dma_addr_cache.rb_node; link = &vgpu->dma_addr_cache.rb_node;
while (*link) { while (*link) {
parent = *link; parent = *link;
itr = rb_entry(parent, struct gvt_dma, dma_addr_node); itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
...@@ -448,51 +414,46 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, ...@@ -448,51 +414,46 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right; link = &parent->rb_right;
} }
rb_link_node(&new->dma_addr_node, parent, link); rb_link_node(&new->dma_addr_node, parent, link);
rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache); rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache);
vdev->nr_cache_entries++; vgpu->nr_cache_entries++;
return 0; return 0;
} }
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
struct gvt_dma *entry) struct gvt_dma *entry)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); rb_erase(&entry->gfn_node, &vgpu->gfn_cache);
rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache);
rb_erase(&entry->gfn_node, &vdev->gfn_cache);
rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
kfree(entry); kfree(entry);
vdev->nr_cache_entries--; vgpu->nr_cache_entries--;
} }
static void gvt_cache_destroy(struct intel_vgpu *vgpu) static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{ {
struct gvt_dma *dma; struct gvt_dma *dma;
struct rb_node *node = NULL; struct rb_node *node = NULL;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
for (;;) { for (;;) {
mutex_lock(&vdev->cache_lock); mutex_lock(&vgpu->cache_lock);
node = rb_first(&vdev->gfn_cache); node = rb_first(&vgpu->gfn_cache);
if (!node) { if (!node) {
mutex_unlock(&vdev->cache_lock); mutex_unlock(&vgpu->cache_lock);
break; break;
} }
dma = rb_entry(node, struct gvt_dma, gfn_node); dma = rb_entry(node, struct gvt_dma, gfn_node);
gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma); __gvt_cache_remove_entry(vgpu, dma);
mutex_unlock(&vdev->cache_lock); mutex_unlock(&vgpu->cache_lock);
} }
} }
static void gvt_cache_init(struct intel_vgpu *vgpu) static void gvt_cache_init(struct intel_vgpu *vgpu)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); vgpu->gfn_cache = RB_ROOT;
vgpu->dma_addr_cache = RB_ROOT;
vdev->gfn_cache = RB_ROOT; vgpu->nr_cache_entries = 0;
vdev->dma_addr_cache = RB_ROOT; mutex_init(&vgpu->cache_lock);
vdev->nr_cache_entries = 0;
mutex_init(&vdev->cache_lock);
} }
static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
...@@ -566,18 +527,17 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, ...@@ -566,18 +527,17 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf, static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite) size_t count, loff_t *ppos, bool iswrite)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS; VFIO_PCI_NUM_REGIONS;
void *base = vdev->region[i].data; void *base = vgpu->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos >= vdev->region[i].size || iswrite) { if (pos >= vgpu->region[i].size || iswrite) {
gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n"); gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
return -EINVAL; return -EINVAL;
} }
count = min(count, (size_t)(vdev->region[i].size - pos)); count = min(count, (size_t)(vgpu->region[i].size - pos));
memcpy(buf, base + pos, count); memcpy(buf, base + pos, count);
return count; return count;
...@@ -670,8 +630,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf, ...@@ -670,8 +630,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
int ret; int ret;
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS; VFIO_PCI_NUM_REGIONS;
struct vfio_edid_region *region = struct vfio_edid_region *region = vgpu->region[i].data;
(struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos < region->vfio_edid_regs.edid_offset) { if (pos < region->vfio_edid_regs.edid_offset) {
...@@ -703,34 +662,32 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, ...@@ -703,34 +662,32 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
const struct intel_vgpu_regops *ops, const struct intel_vgpu_regops *ops,
size_t size, u32 flags, void *data) size_t size, u32 flags, void *data)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct vfio_region *region; struct vfio_region *region;
region = krealloc(vdev->region, region = krealloc(vgpu->region,
(vdev->num_regions + 1) * sizeof(*region), (vgpu->num_regions + 1) * sizeof(*region),
GFP_KERNEL); GFP_KERNEL);
if (!region) if (!region)
return -ENOMEM; return -ENOMEM;
vdev->region = region; vgpu->region = region;
vdev->region[vdev->num_regions].type = type; vgpu->region[vgpu->num_regions].type = type;
vdev->region[vdev->num_regions].subtype = subtype; vgpu->region[vgpu->num_regions].subtype = subtype;
vdev->region[vdev->num_regions].ops = ops; vgpu->region[vgpu->num_regions].ops = ops;
vdev->region[vdev->num_regions].size = size; vgpu->region[vgpu->num_regions].size = size;
vdev->region[vdev->num_regions].flags = flags; vgpu->region[vgpu->num_regions].flags = flags;
vdev->region[vdev->num_regions].data = data; vgpu->region[vgpu->num_regions].data = data;
vdev->num_regions++; vgpu->num_regions++;
return 0; return 0;
} }
static int kvmgt_get_vfio_device(void *p_vgpu) static int kvmgt_get_vfio_device(void *p_vgpu)
{ {
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
vdev->vfio_device = vfio_device_get_from_dev( vgpu->vfio_device = vfio_device_get_from_dev(
mdev_dev(vdev->mdev)); mdev_dev(vgpu->mdev));
if (!vdev->vfio_device) { if (!vgpu->vfio_device) {
gvt_vgpu_err("failed to get vfio device\n"); gvt_vgpu_err("failed to get vfio device\n");
return -ENODEV; return -ENODEV;
} }
...@@ -796,14 +753,14 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num) ...@@ -796,14 +753,14 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num)
return ret; return ret;
} }
static void kvmgt_put_vfio_device(void *vgpu) static void kvmgt_put_vfio_device(void *data)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu); struct intel_vgpu *vgpu = data;
if (WARN_ON(!vdev->vfio_device)) if (WARN_ON(!vgpu->vfio_device))
return; return;
vfio_device_put(vdev->vfio_device); vfio_device_put(vgpu->vfio_device);
} }
static int intel_vgpu_create(struct mdev_device *mdev) static int intel_vgpu_create(struct mdev_device *mdev)
...@@ -830,9 +787,9 @@ static int intel_vgpu_create(struct mdev_device *mdev) ...@@ -830,9 +787,9 @@ static int intel_vgpu_create(struct mdev_device *mdev)
goto out; goto out;
} }
INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work); INIT_WORK(&vgpu->release_work, intel_vgpu_release_work);
kvmgt_vdev(vgpu)->mdev = mdev; vgpu->mdev = mdev;
mdev_set_drvdata(mdev, vgpu); mdev_set_drvdata(mdev, vgpu);
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
...@@ -857,10 +814,8 @@ static int intel_vgpu_remove(struct mdev_device *mdev) ...@@ -857,10 +814,8 @@ static int intel_vgpu_remove(struct mdev_device *mdev)
static int intel_vgpu_iommu_notifier(struct notifier_block *nb, static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
struct kvmgt_vdev *vdev = container_of(nb, struct intel_vgpu *vgpu =
struct kvmgt_vdev, container_of(nb, struct intel_vgpu, iommu_notifier);
iommu_notifier);
struct intel_vgpu *vgpu = vdev->vgpu;
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data; struct vfio_iommu_type1_dma_unmap *unmap = data;
...@@ -870,7 +825,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, ...@@ -870,7 +825,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
iov_pfn = unmap->iova >> PAGE_SHIFT; iov_pfn = unmap->iova >> PAGE_SHIFT;
end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE; end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
mutex_lock(&vdev->cache_lock); mutex_lock(&vgpu->cache_lock);
for (; iov_pfn < end_iov_pfn; iov_pfn++) { for (; iov_pfn < end_iov_pfn; iov_pfn++) {
entry = __gvt_cache_find_gfn(vgpu, iov_pfn); entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
if (!entry) if (!entry)
...@@ -880,7 +835,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, ...@@ -880,7 +835,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
entry->size); entry->size);
__gvt_cache_remove_entry(vgpu, entry); __gvt_cache_remove_entry(vgpu, entry);
} }
mutex_unlock(&vdev->cache_lock); mutex_unlock(&vgpu->cache_lock);
} }
return NOTIFY_OK; return NOTIFY_OK;
...@@ -889,16 +844,15 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, ...@@ -889,16 +844,15 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
static int intel_vgpu_group_notifier(struct notifier_block *nb, static int intel_vgpu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
struct kvmgt_vdev *vdev = container_of(nb, struct intel_vgpu *vgpu =
struct kvmgt_vdev, container_of(nb, struct intel_vgpu, group_notifier);
group_notifier);
/* the only action we care about */ /* the only action we care about */
if (action == VFIO_GROUP_NOTIFY_SET_KVM) { if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
vdev->kvm = data; vgpu->kvm = data;
if (!data) if (!data)
schedule_work(&vdev->release_work); schedule_work(&vgpu->release_work);
} }
return NOTIFY_OK; return NOTIFY_OK;
...@@ -907,17 +861,16 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb, ...@@ -907,17 +861,16 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb,
static int intel_vgpu_open_device(struct mdev_device *mdev) static int intel_vgpu_open_device(struct mdev_device *mdev)
{ {
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long events; unsigned long events;
int ret; int ret;
struct vfio_group *vfio_group; struct vfio_group *vfio_group;
vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
vdev->group_notifier.notifier_call = intel_vgpu_group_notifier; vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
&vdev->iommu_notifier); &vgpu->iommu_notifier);
if (ret != 0) { if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret); ret);
...@@ -926,7 +879,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) ...@@ -926,7 +879,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev)
events = VFIO_GROUP_NOTIFY_SET_KVM; events = VFIO_GROUP_NOTIFY_SET_KVM;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
&vdev->group_notifier); &vgpu->group_notifier);
if (ret != 0) { if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
ret); ret);
...@@ -939,7 +892,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) ...@@ -939,7 +892,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev)
gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
goto undo_register; goto undo_register;
} }
vdev->vfio_group = vfio_group; vgpu->vfio_group = vfio_group;
/* Take a module reference as mdev core doesn't take /* Take a module reference as mdev core doesn't take
* a reference for vendor driver. * a reference for vendor driver.
...@@ -955,39 +908,37 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) ...@@ -955,39 +908,37 @@ static int intel_vgpu_open_device(struct mdev_device *mdev)
intel_gvt_activate_vgpu(vgpu); intel_gvt_activate_vgpu(vgpu);
atomic_set(&vdev->released, 0); atomic_set(&vgpu->released, 0);
return ret; return ret;
undo_group: undo_group:
vfio_group_put_external_user(vdev->vfio_group); vfio_group_put_external_user(vgpu->vfio_group);
vdev->vfio_group = NULL; vgpu->vfio_group = NULL;
undo_register: undo_register:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&vdev->group_notifier); &vgpu->group_notifier);
undo_iommu: undo_iommu:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&vdev->iommu_notifier); &vgpu->iommu_notifier);
out: out:
return ret; return ret;
} }
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct eventfd_ctx *trigger; struct eventfd_ctx *trigger;
trigger = vdev->msi_trigger; trigger = vgpu->msi_trigger;
if (trigger) { if (trigger) {
eventfd_ctx_put(trigger); eventfd_ctx_put(trigger);
vdev->msi_trigger = NULL; vgpu->msi_trigger = NULL;
} }
} }
static void __intel_vgpu_release(struct intel_vgpu *vgpu) static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
int ret; int ret;
...@@ -995,18 +946,18 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) ...@@ -995,18 +946,18 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
if (!handle_valid(vgpu->handle)) if (!handle_valid(vgpu->handle))
return; return;
if (atomic_cmpxchg(&vdev->released, 0, 1)) if (atomic_cmpxchg(&vgpu->released, 0, 1))
return; return;
intel_gvt_release_vgpu(vgpu); intel_gvt_release_vgpu(vgpu);
ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY, ret = vfio_unregister_notifier(mdev_dev(vgpu->mdev), VFIO_IOMMU_NOTIFY,
&vdev->iommu_notifier); &vgpu->iommu_notifier);
drm_WARN(&i915->drm, ret, drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for iommu failed: %d\n", ret); "vfio_unregister_notifier for iommu failed: %d\n", ret);
ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY, ret = vfio_unregister_notifier(mdev_dev(vgpu->mdev), VFIO_GROUP_NOTIFY,
&vdev->group_notifier); &vgpu->group_notifier);
drm_WARN(&i915->drm, ret, drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for group failed: %d\n", ret); "vfio_unregister_notifier for group failed: %d\n", ret);
...@@ -1017,9 +968,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) ...@@ -1017,9 +968,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
kvmgt_guest_exit(info); kvmgt_guest_exit(info);
intel_vgpu_release_msi_eventfd_ctx(vgpu); intel_vgpu_release_msi_eventfd_ctx(vgpu);
vfio_group_put_external_user(vdev->vfio_group); vfio_group_put_external_user(vgpu->vfio_group);
vdev->kvm = NULL; vgpu->kvm = NULL;
vgpu->handle = 0; vgpu->handle = 0;
} }
...@@ -1032,10 +983,10 @@ static void intel_vgpu_close_device(struct mdev_device *mdev) ...@@ -1032,10 +983,10 @@ static void intel_vgpu_close_device(struct mdev_device *mdev)
static void intel_vgpu_release_work(struct work_struct *work) static void intel_vgpu_release_work(struct work_struct *work)
{ {
struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev, struct intel_vgpu *vgpu =
release_work); container_of(work, struct intel_vgpu, release_work);
__intel_vgpu_release(vdev->vgpu); __intel_vgpu_release(vgpu);
} }
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
...@@ -1117,13 +1068,12 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -1117,13 +1068,12 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write) size_t count, loff_t *ppos, bool is_write)
{ {
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL; int ret = -EINVAL;
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) { if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) {
gvt_vgpu_err("invalid index: %u\n", index); gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL; return -EINVAL;
} }
...@@ -1152,11 +1102,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -1152,11 +1102,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_ROM_REGION_INDEX: case VFIO_PCI_ROM_REGION_INDEX:
break; break;
default: default:
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
return -EINVAL; return -EINVAL;
index -= VFIO_PCI_NUM_REGIONS; index -= VFIO_PCI_NUM_REGIONS;
return vdev->region[index].ops->rw(vgpu, buf, count, return vgpu->region[index].ops->rw(vgpu, buf, count,
ppos, is_write); ppos, is_write);
} }
...@@ -1409,7 +1359,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, ...@@ -1409,7 +1359,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
gvt_vgpu_err("eventfd_ctx_fdget failed\n"); gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger); return PTR_ERR(trigger);
} }
kvmgt_vdev(vgpu)->msi_trigger = trigger; vgpu->msi_trigger = trigger;
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count) } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
intel_vgpu_release_msi_eventfd_ctx(vgpu); intel_vgpu_release_msi_eventfd_ctx(vgpu);
...@@ -1461,7 +1411,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -1461,7 +1411,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long minsz; unsigned long minsz;
gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
...@@ -1480,7 +1429,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -1480,7 +1429,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI; info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET; info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS + info.num_regions = VFIO_PCI_NUM_REGIONS +
vdev->num_regions; vgpu->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS; info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ? return copy_to_user((void __user *)arg, &info, minsz) ?
...@@ -1571,22 +1520,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -1571,22 +1520,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
.header.version = 1 }; .header.version = 1 };
if (info.index >= VFIO_PCI_NUM_REGIONS + if (info.index >= VFIO_PCI_NUM_REGIONS +
vdev->num_regions) vgpu->num_regions)
return -EINVAL; return -EINVAL;
info.index = info.index =
array_index_nospec(info.index, array_index_nospec(info.index,
VFIO_PCI_NUM_REGIONS + VFIO_PCI_NUM_REGIONS +
vdev->num_regions); vgpu->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS; i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset = info.offset =
VFIO_PCI_INDEX_TO_OFFSET(info.index); VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = vdev->region[i].size; info.size = vgpu->region[i].size;
info.flags = vdev->region[i].flags; info.flags = vgpu->region[i].flags;
cap_type.type = vdev->region[i].type; cap_type.type = vgpu->region[i].type;
cap_type.subtype = vdev->region[i].subtype; cap_type.subtype = vgpu->region[i].subtype;
ret = vfio_info_add_capability(&caps, ret = vfio_info_add_capability(&caps,
&cap_type.header, &cap_type.header,
...@@ -1928,15 +1877,13 @@ static int kvmgt_guest_init(struct mdev_device *mdev) ...@@ -1928,15 +1877,13 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
{ {
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
struct kvmgt_vdev *vdev;
struct kvm *kvm; struct kvm *kvm;
vgpu = mdev_get_drvdata(mdev); vgpu = mdev_get_drvdata(mdev);
if (handle_valid(vgpu->handle)) if (handle_valid(vgpu->handle))
return -EEXIST; return -EEXIST;
vdev = kvmgt_vdev(vgpu); kvm = vgpu->kvm;
kvm = vdev->kvm;
if (!kvm || kvm->mm != current->mm) { if (!kvm || kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n"); gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH; return -ESRCH;
...@@ -1962,7 +1909,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) ...@@ -1962,7 +1909,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvm_page_track_register_notifier(kvm, &info->track_node); kvm_page_track_register_notifier(kvm, &info->track_node);
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
&vdev->nr_cache_entries); &vgpu->nr_cache_entries);
return 0; return 0;
} }
...@@ -1980,52 +1927,33 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) ...@@ -1980,52 +1927,33 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
return true; return true;
} }
static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
if (!vgpu->vdev)
return -ENOMEM;
kvmgt_vdev(vgpu)->vgpu = vgpu;
return 0;
}
static void kvmgt_detach_vgpu(void *p_vgpu) static void kvmgt_detach_vgpu(void *p_vgpu)
{ {
int i; int i;
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
if (!vdev->region) if (!vgpu->region)
return; return;
for (i = 0; i < vdev->num_regions; i++) for (i = 0; i < vgpu->num_regions; i++)
if (vdev->region[i].ops->release) if (vgpu->region[i].ops->release)
vdev->region[i].ops->release(vgpu, vgpu->region[i].ops->release(vgpu,
&vdev->region[i]); &vgpu->region[i]);
vdev->num_regions = 0; vgpu->num_regions = 0;
kfree(vdev->region); kfree(vgpu->region);
vdev->region = NULL; vgpu->region = NULL;
kfree(vdev);
} }
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
{ {
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
struct kvmgt_vdev *vdev;
if (!handle_valid(handle)) if (!handle_valid(handle))
return -ESRCH; return -ESRCH;
info = (struct kvmgt_guest_info *)handle; info = (struct kvmgt_guest_info *)handle;
vgpu = info->vgpu; vgpu = info->vgpu;
vdev = kvmgt_vdev(vgpu);
/* /*
* When guest is poweroff, msi_trigger is set to NULL, but vgpu's * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
...@@ -2036,10 +1964,10 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) ...@@ -2036,10 +1964,10 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
* enabled by guest. so if msi_trigger is null, success is still * enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest. * returned and don't inject interrupt into guest.
*/ */
if (vdev->msi_trigger == NULL) if (vgpu->msi_trigger == NULL)
return 0; return 0;
if (eventfd_signal(vdev->msi_trigger, 1) == 1) if (eventfd_signal(vgpu->msi_trigger, 1) == 1)
return 0; return 0;
return -EFAULT; return -EFAULT;
...@@ -2066,7 +1994,6 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, ...@@ -2066,7 +1994,6 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr) unsigned long size, dma_addr_t *dma_addr)
{ {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
struct kvmgt_vdev *vdev;
struct gvt_dma *entry; struct gvt_dma *entry;
int ret; int ret;
...@@ -2074,9 +2001,8 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, ...@@ -2074,9 +2001,8 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
return -EINVAL; return -EINVAL;
vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
vdev = kvmgt_vdev(vgpu);
mutex_lock(&vdev->cache_lock); mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_gfn(vgpu, gfn); entry = __gvt_cache_find_gfn(vgpu, gfn);
if (!entry) { if (!entry) {
...@@ -2104,20 +2030,19 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, ...@@ -2104,20 +2030,19 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
*dma_addr = entry->dma_addr; *dma_addr = entry->dma_addr;
} }
mutex_unlock(&vdev->cache_lock); mutex_unlock(&vgpu->cache_lock);
return 0; return 0;
err_unmap: err_unmap:
gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock: err_unlock:
mutex_unlock(&vdev->cache_lock); mutex_unlock(&vgpu->cache_lock);
return ret; return ret;
} }
static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
{ {
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
struct kvmgt_vdev *vdev;
struct gvt_dma *entry; struct gvt_dma *entry;
int ret = 0; int ret = 0;
...@@ -2125,15 +2050,14 @@ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) ...@@ -2125,15 +2050,14 @@ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
return -ENODEV; return -ENODEV;
info = (struct kvmgt_guest_info *)handle; info = (struct kvmgt_guest_info *)handle;
vdev = kvmgt_vdev(info->vgpu);
mutex_lock(&vdev->cache_lock); mutex_lock(&info->vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
if (entry) if (entry)
kref_get(&entry->ref); kref_get(&entry->ref);
else else
ret = -ENOMEM; ret = -ENOMEM;
mutex_unlock(&vdev->cache_lock); mutex_unlock(&info->vgpu->cache_lock);
return ret; return ret;
} }
...@@ -2150,20 +2074,18 @@ static void __gvt_dma_release(struct kref *ref) ...@@ -2150,20 +2074,18 @@ static void __gvt_dma_release(struct kref *ref)
static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
{ {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
struct kvmgt_vdev *vdev;
struct gvt_dma *entry; struct gvt_dma *entry;
if (!handle_valid(handle)) if (!handle_valid(handle))
return; return;
vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
vdev = kvmgt_vdev(vgpu);
mutex_lock(&vdev->cache_lock); mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry) if (entry)
kref_put(&entry->ref, __gvt_dma_release); kref_put(&entry->ref, __gvt_dma_release);
mutex_unlock(&vdev->cache_lock); mutex_unlock(&vgpu->cache_lock);
} }
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
...@@ -2176,8 +2098,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, ...@@ -2176,8 +2098,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
info = (struct kvmgt_guest_info *)handle; info = (struct kvmgt_guest_info *)handle;
return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group, return vfio_dma_rw(info->vgpu->vfio_group, gpa, buf, len, write);
gpa, buf, len, write);
} }
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
...@@ -2215,7 +2136,6 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) ...@@ -2215,7 +2136,6 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
static const struct intel_gvt_mpt kvmgt_mpt = { static const struct intel_gvt_mpt kvmgt_mpt = {
.host_init = kvmgt_host_init, .host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit, .host_exit = kvmgt_host_exit,
.attach_vgpu = kvmgt_attach_vgpu,
.detach_vgpu = kvmgt_detach_vgpu, .detach_vgpu = kvmgt_detach_vgpu,
.inject_msi = kvmgt_inject_msi, .inject_msi = kvmgt_inject_msi,
.enable_page_track = kvmgt_page_track_add, .enable_page_track = kvmgt_page_track_add,
......
...@@ -71,22 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) ...@@ -71,22 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
intel_gvt_host.mpt->host_exit(dev, gvt); intel_gvt_host.mpt->host_exit(dev, gvt);
} }
/**
* intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
* related stuffs inside hypervisor.
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
{
/* optional to provide */
if (!intel_gvt_host.mpt->attach_vgpu)
return 0;
return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
}
/** /**
* intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
* related stuffs inside hypervisor. * related stuffs inside hypervisor.
......
...@@ -405,13 +405,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -405,13 +405,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
populate_pvinfo_page(vgpu); populate_pvinfo_page(vgpu);
ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
if (ret)
goto out_clean_vgpu_resource;
ret = intel_vgpu_init_gtt(vgpu); ret = intel_vgpu_init_gtt(vgpu);
if (ret) if (ret)
goto out_detach_hypervisor_vgpu; goto out_clean_vgpu_resource;
ret = intel_vgpu_init_opregion(vgpu); ret = intel_vgpu_init_opregion(vgpu);
if (ret) if (ret)
...@@ -454,8 +450,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -454,8 +450,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_vgpu_clean_opregion(vgpu); intel_vgpu_clean_opregion(vgpu);
out_clean_gtt: out_clean_gtt:
intel_vgpu_clean_gtt(vgpu); intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource: out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu); intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio: out_clean_vgpu_mmio:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment