Commit 675e5c4a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Zhi Wang

drm/i915/gvt: remove intel_gvt_ops

Remove these pointless indirect alls by just calling the only instance
of each method directly.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-8-hch@lst.deReviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 8b750bf7
...@@ -39,23 +39,6 @@ ...@@ -39,23 +39,6 @@
#include <linux/vfio.h> #include <linux/vfio.h>
#include <linux/mdev.h> #include <linux/mdev.h>
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
.vgpu_release = intel_gvt_release_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
.emulate_hotplug = intel_vgpu_emulate_hotplug,
};
static void init_device_info(struct intel_gvt *gvt) static void init_device_info(struct intel_gvt *gvt)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
...@@ -252,8 +235,7 @@ static int intel_gvt_init_device(struct drm_i915_private *i915) ...@@ -252,8 +235,7 @@ static int intel_gvt_init_device(struct drm_i915_private *i915)
intel_gvt_debugfs_init(gvt); intel_gvt_debugfs_init(gvt);
ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt, ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt);
&intel_gvt_ops);
if (ret) if (ret)
goto out_destroy_idle_vgpu; goto out_destroy_idle_vgpu;
......
...@@ -556,30 +556,6 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu); ...@@ -556,30 +556,6 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
struct intel_gvt_ops {
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
unsigned int);
int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
unsigned int);
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
struct intel_vgpu_type *);
void (*vgpu_destroy)(struct intel_vgpu *vgpu);
void (*vgpu_release)(struct intel_vgpu *vgpu);
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
unsigned int);
void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
};
enum { enum {
GVT_FAILSAFE_UNSUPPORTED_GUEST, GVT_FAILSAFE_UNSUPPORTED_GUEST,
GVT_FAILSAFE_INSUFFICIENT_RESOURCE, GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
......
...@@ -42,7 +42,7 @@ struct device; ...@@ -42,7 +42,7 @@ struct device;
* both Xen and KVM by providing dedicated hypervisor-related MPT modules. * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/ */
struct intel_gvt_mpt { struct intel_gvt_mpt {
int (*host_init)(struct device *dev, void *gvt, const void *ops); int (*host_init)(struct device *dev, void *gvt);
void (*host_exit)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle); int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu); void (*detach_vgpu)(void *vgpu);
......
...@@ -55,8 +55,6 @@ ...@@ -55,8 +55,6 @@
MODULE_IMPORT_NS(DMA_BUF); MODULE_IMPORT_NS(DMA_BUF);
MODULE_IMPORT_NS(I915_GVT); MODULE_IMPORT_NS(I915_GVT);
static const struct intel_gvt_ops *intel_gvt_ops;
/* helper macros copied from vfio-pci */ /* helper macros copied from vfio-pci */
#define VFIO_PCI_OFFSET_SHIFT 40 #define VFIO_PCI_OFFSET_SHIFT 40
#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
...@@ -621,9 +619,9 @@ static int handle_edid_regs(struct intel_vgpu *vgpu, ...@@ -621,9 +619,9 @@ static int handle_edid_regs(struct intel_vgpu *vgpu,
gvt_vgpu_err("invalid EDID blob\n"); gvt_vgpu_err("invalid EDID blob\n");
return -EINVAL; return -EINVAL;
} }
intel_gvt_ops->emulate_hotplug(vgpu, true); intel_vgpu_emulate_hotplug(vgpu, true);
} else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN) } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
intel_gvt_ops->emulate_hotplug(vgpu, false); intel_vgpu_emulate_hotplug(vgpu, false);
else { else {
gvt_vgpu_err("invalid EDID link state %d\n", gvt_vgpu_err("invalid EDID link state %d\n",
regs->link_state); regs->link_state);
...@@ -825,7 +823,7 @@ static int intel_vgpu_create(struct mdev_device *mdev) ...@@ -825,7 +823,7 @@ static int intel_vgpu_create(struct mdev_device *mdev)
goto out; goto out;
} }
vgpu = intel_gvt_ops->vgpu_create(gvt, type); vgpu = intel_gvt_create_vgpu(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) { if (IS_ERR_OR_NULL(vgpu)) {
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
gvt_err("failed to create intel vgpu: %d\n", ret); gvt_err("failed to create intel vgpu: %d\n", ret);
...@@ -852,7 +850,7 @@ static int intel_vgpu_remove(struct mdev_device *mdev) ...@@ -852,7 +850,7 @@ static int intel_vgpu_remove(struct mdev_device *mdev)
if (handle_valid(vgpu->handle)) if (handle_valid(vgpu->handle))
return -EBUSY; return -EBUSY;
intel_gvt_ops->vgpu_destroy(vgpu); intel_gvt_destroy_vgpu(vgpu);
return 0; return 0;
} }
...@@ -955,7 +953,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) ...@@ -955,7 +953,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev)
if (ret) if (ret)
goto undo_group; goto undo_group;
intel_gvt_ops->vgpu_activate(vgpu); intel_gvt_activate_vgpu(vgpu);
atomic_set(&vdev->released, 0); atomic_set(&vdev->released, 0);
return ret; return ret;
...@@ -1000,7 +998,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) ...@@ -1000,7 +998,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
if (atomic_cmpxchg(&vdev->released, 0, 1)) if (atomic_cmpxchg(&vdev->released, 0, 1))
return; return;
intel_gvt_ops->vgpu_release(vgpu); intel_gvt_release_vgpu(vgpu);
ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY, ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
&vdev->iommu_notifier); &vdev->iommu_notifier);
...@@ -1074,10 +1072,10 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off, ...@@ -1074,10 +1072,10 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
int ret; int ret;
if (is_write) if (is_write)
ret = intel_gvt_ops->emulate_mmio_write(vgpu, ret = intel_vgpu_emulate_mmio_write(vgpu,
bar_start + off, buf, count); bar_start + off, buf, count);
else else
ret = intel_gvt_ops->emulate_mmio_read(vgpu, ret = intel_vgpu_emulate_mmio_read(vgpu,
bar_start + off, buf, count); bar_start + off, buf, count);
return ret; return ret;
} }
...@@ -1133,10 +1131,10 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -1133,10 +1131,10 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
switch (index) { switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX: case VFIO_PCI_CONFIG_REGION_INDEX:
if (is_write) if (is_write)
ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos, ret = intel_vgpu_emulate_cfg_write(vgpu, pos,
buf, count); buf, count);
else else
ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos, ret = intel_vgpu_emulate_cfg_read(vgpu, pos,
buf, count); buf, count);
break; break;
case VFIO_PCI_BAR0_REGION_INDEX: case VFIO_PCI_BAR0_REGION_INDEX:
...@@ -1704,7 +1702,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -1704,7 +1702,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
return ret; return ret;
} else if (cmd == VFIO_DEVICE_RESET) { } else if (cmd == VFIO_DEVICE_RESET) {
intel_gvt_ops->vgpu_reset(vgpu); intel_gvt_reset_vgpu(vgpu);
return 0; return 0;
} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) { } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
struct vfio_device_gfx_plane_info dmabuf; struct vfio_device_gfx_plane_info dmabuf;
...@@ -1717,7 +1715,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -1717,7 +1715,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (dmabuf.argsz < minsz) if (dmabuf.argsz < minsz)
return -EINVAL; return -EINVAL;
ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf); ret = intel_vgpu_query_plane(vgpu, &dmabuf);
if (ret != 0) if (ret != 0)
return ret; return ret;
...@@ -1725,14 +1723,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -1725,14 +1723,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
-EFAULT : 0; -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) { } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
__u32 dmabuf_id; __u32 dmabuf_id;
__s32 dmabuf_fd;
if (get_user(dmabuf_id, (__u32 __user *)arg)) if (get_user(dmabuf_id, (__u32 __user *)arg))
return -EFAULT; return -EFAULT;
return intel_vgpu_get_dmabuf(vgpu, dmabuf_id);
dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
return dmabuf_fd;
} }
return -ENOTTY; return -ENOTTY;
...@@ -1783,7 +1777,7 @@ static struct mdev_parent_ops intel_vgpu_mdev_ops = { ...@@ -1783,7 +1777,7 @@ static struct mdev_parent_ops intel_vgpu_mdev_ops = {
.ioctl = intel_vgpu_ioctl, .ioctl = intel_vgpu_ioctl,
}; };
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) static int kvmgt_host_init(struct device *dev, void *gvt)
{ {
int ret; int ret;
...@@ -1791,7 +1785,6 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) ...@@ -1791,7 +1785,6 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
if (ret) if (ret)
return ret; return ret;
intel_gvt_ops = ops;
intel_vgpu_mdev_ops.supported_type_groups = gvt_vgpu_type_groups; intel_vgpu_mdev_ops.supported_type_groups = gvt_vgpu_type_groups;
ret = mdev_register_device(dev, &intel_vgpu_mdev_ops); ret = mdev_register_device(dev, &intel_vgpu_mdev_ops);
...@@ -1883,7 +1876,7 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1883,7 +1876,7 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct kvmgt_guest_info, track_node); struct kvmgt_guest_info, track_node);
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
intel_gvt_ops->write_protect_handler(info->vgpu, gpa, intel_vgpu_page_track_handler(info->vgpu, gpa,
(void *)val, len); (void *)val, len);
} }
......
...@@ -51,13 +51,12 @@ ...@@ -51,13 +51,12 @@
* Returns: * Returns:
* Zero on success, negative error code if failed * Zero on success, negative error code if failed
*/ */
static inline int intel_gvt_hypervisor_host_init(struct device *dev, static inline int intel_gvt_hypervisor_host_init(struct device *dev, void *gvt)
void *gvt, const void *ops)
{ {
if (!intel_gvt_host.mpt->host_init) if (!intel_gvt_host.mpt->host_init)
return -ENODEV; return -ENODEV;
return intel_gvt_host.mpt->host_init(dev, gvt, ops); return intel_gvt_host.mpt->host_init(dev, gvt);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment