Commit 0c71cc04 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v5.11-rc1' of git://github.com/awilliam/linux-vfio

Pull VFIO updates from Alex Williamson:

 - Fix uninitialized list walk in error path (Eric Auger)

 - Use io_remap_pfn_range() (Jason Gunthorpe)

 - Allow fallback support for NVLink on POWER8 (Alexey Kardashevskiy)

 - Enable mdev request interrupt with CCW support (Eric Farman)

 - Enable interface to iommu_domain from vfio_group (Lu Baolu)

* tag 'vfio-v5.11-rc1' of git://github.com/awilliam/linux-vfio:
  vfio/type1: Add vfio_group_iommu_domain()
  vfio-ccw: Wire in the request callback
  vfio-mdev: Wire in a request handler for mdev parent
  vfio/pci/nvlink2: Do not attempt NPU2 setup on POWER8NVL NPU
  vfio-pci: Use io_remap_pfn_range() for PCI IO memory
  vfio/pci: Move dummy_resources_list init in vfio_pci_probe()
parents de925e2f bdfae1c9
...@@ -394,6 +394,7 @@ static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) ...@@ -394,6 +394,7 @@ static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
switch (info->index) { switch (info->index) {
case VFIO_CCW_IO_IRQ_INDEX: case VFIO_CCW_IO_IRQ_INDEX:
case VFIO_CCW_CRW_IRQ_INDEX: case VFIO_CCW_CRW_IRQ_INDEX:
case VFIO_CCW_REQ_IRQ_INDEX:
info->count = 1; info->count = 1;
info->flags = VFIO_IRQ_INFO_EVENTFD; info->flags = VFIO_IRQ_INFO_EVENTFD;
break; break;
...@@ -424,6 +425,9 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev, ...@@ -424,6 +425,9 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
case VFIO_CCW_CRW_IRQ_INDEX: case VFIO_CCW_CRW_IRQ_INDEX:
ctx = &private->crw_trigger; ctx = &private->crw_trigger;
break; break;
case VFIO_CCW_REQ_IRQ_INDEX:
ctx = &private->req_trigger;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -607,6 +611,27 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, ...@@ -607,6 +611,27 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
} }
} }
/* Request removal of the device*/
static void vfio_ccw_mdev_request(struct mdev_device *mdev, unsigned int count)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev));
if (!private)
return;
if (private->req_trigger) {
if (!(count % 10))
dev_notice_ratelimited(mdev_dev(private->mdev),
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(private->req_trigger, 1);
} else if (count == 0) {
dev_notice(mdev_dev(private->mdev),
"No device request channel registered, blocked until released by user\n");
}
}
static const struct mdev_parent_ops vfio_ccw_mdev_ops = { static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.supported_type_groups = mdev_type_groups, .supported_type_groups = mdev_type_groups,
...@@ -617,6 +642,7 @@ static const struct mdev_parent_ops vfio_ccw_mdev_ops = { ...@@ -617,6 +642,7 @@ static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
.read = vfio_ccw_mdev_read, .read = vfio_ccw_mdev_read,
.write = vfio_ccw_mdev_write, .write = vfio_ccw_mdev_write,
.ioctl = vfio_ccw_mdev_ioctl, .ioctl = vfio_ccw_mdev_ioctl,
.request = vfio_ccw_mdev_request,
}; };
int vfio_ccw_mdev_reg(struct subchannel *sch) int vfio_ccw_mdev_reg(struct subchannel *sch)
......
...@@ -84,7 +84,10 @@ struct vfio_ccw_crw { ...@@ -84,7 +84,10 @@ struct vfio_ccw_crw {
* @irb: irb info received from interrupt * @irb: irb info received from interrupt
* @scsw: scsw info * @scsw: scsw info
* @io_trigger: eventfd ctx for signaling userspace I/O results * @io_trigger: eventfd ctx for signaling userspace I/O results
* @crw_trigger: eventfd ctx for signaling userspace CRW information
* @req_trigger: eventfd ctx for signaling userspace to return device
* @io_work: work for deferral process of I/O handling * @io_work: work for deferral process of I/O handling
* @crw_work: work for deferral process of CRW handling
*/ */
struct vfio_ccw_private { struct vfio_ccw_private {
struct subchannel *sch; struct subchannel *sch;
...@@ -108,6 +111,7 @@ struct vfio_ccw_private { ...@@ -108,6 +111,7 @@ struct vfio_ccw_private {
struct eventfd_ctx *io_trigger; struct eventfd_ctx *io_trigger;
struct eventfd_ctx *crw_trigger; struct eventfd_ctx *crw_trigger;
struct eventfd_ctx *req_trigger;
struct work_struct io_work; struct work_struct io_work;
struct work_struct crw_work; struct work_struct crw_work;
} __aligned(8); } __aligned(8);
......
...@@ -154,6 +154,10 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) ...@@ -154,6 +154,10 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
if (!dev) if (!dev)
return -EINVAL; return -EINVAL;
/* Not mandatory, but its absence could be a problem */
if (!ops->request)
dev_info(dev, "Driver cannot be asked to release device\n");
mutex_lock(&parent_list_lock); mutex_lock(&parent_list_lock);
/* Check for duplicate */ /* Check for duplicate */
......
...@@ -98,6 +98,18 @@ static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma) ...@@ -98,6 +98,18 @@ static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma)
return parent->ops->mmap(mdev, vma); return parent->ops->mmap(mdev, vma);
} }
static void vfio_mdev_request(void *device_data, unsigned int count)
{
struct mdev_device *mdev = device_data;
struct mdev_parent *parent = mdev->parent;
if (parent->ops->request)
parent->ops->request(mdev, count);
else if (count == 0)
dev_notice(mdev_dev(mdev),
"No mdev vendor driver request callback support, blocked until released by user\n");
}
static const struct vfio_device_ops vfio_mdev_dev_ops = { static const struct vfio_device_ops vfio_mdev_dev_ops = {
.name = "vfio-mdev", .name = "vfio-mdev",
.open = vfio_mdev_open, .open = vfio_mdev_open,
...@@ -106,6 +118,7 @@ static const struct vfio_device_ops vfio_mdev_dev_ops = { ...@@ -106,6 +118,7 @@ static const struct vfio_device_ops vfio_mdev_dev_ops = {
.read = vfio_mdev_read, .read = vfio_mdev_read,
.write = vfio_mdev_write, .write = vfio_mdev_write,
.mmap = vfio_mdev_mmap, .mmap = vfio_mdev_mmap,
.request = vfio_mdev_request,
}; };
static int vfio_mdev_probe(struct device *dev) static int vfio_mdev_probe(struct device *dev)
......
...@@ -161,8 +161,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) ...@@ -161,8 +161,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
int i; int i;
struct vfio_pci_dummy_resource *dummy_res; struct vfio_pci_dummy_resource *dummy_res;
INIT_LIST_HEAD(&vdev->dummy_resources_list);
for (i = 0; i < PCI_STD_NUM_BARS; i++) { for (i = 0; i < PCI_STD_NUM_BARS; i++) {
int bar = i + PCI_STD_RESOURCES; int bar = i + PCI_STD_RESOURCES;
...@@ -1635,8 +1633,8 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) ...@@ -1635,8 +1633,8 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
mutex_unlock(&vdev->vma_lock); mutex_unlock(&vdev->vma_lock);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot)) vma->vm_end - vma->vm_start, vma->vm_page_prot))
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
up_out: up_out:
...@@ -1966,6 +1964,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1966,6 +1964,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&vdev->igate); mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock); spin_lock_init(&vdev->irqlock);
mutex_init(&vdev->ioeventfds_lock); mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->dummy_resources_list);
INIT_LIST_HEAD(&vdev->ioeventfds_list); INIT_LIST_HEAD(&vdev->ioeventfds_list);
mutex_init(&vdev->vma_lock); mutex_init(&vdev->vma_lock);
INIT_LIST_HEAD(&vdev->vma_list); INIT_LIST_HEAD(&vdev->vma_list);
......
...@@ -231,7 +231,7 @@ int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev) ...@@ -231,7 +231,7 @@ int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
return -EINVAL; return -EINVAL;
if (of_property_read_u32(npu_node, "memory-region", &mem_phandle)) if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
return -EINVAL; return -ENODEV;
mem_node = of_find_node_by_phandle(mem_phandle); mem_node = of_find_node_by_phandle(mem_phandle);
if (!mem_node) if (!mem_node)
...@@ -393,7 +393,7 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev) ...@@ -393,7 +393,7 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
int ret; int ret;
struct vfio_pci_npu2_data *data; struct vfio_pci_npu2_data *data;
struct device_node *nvlink_dn; struct device_node *nvlink_dn;
u32 nvlink_index = 0; u32 nvlink_index = 0, mem_phandle = 0;
struct pci_dev *npdev = vdev->pdev; struct pci_dev *npdev = vdev->pdev;
struct device_node *npu_node = pci_device_to_OF_node(npdev); struct device_node *npu_node = pci_device_to_OF_node(npdev);
struct pci_controller *hose = pci_bus_to_host(npdev->bus); struct pci_controller *hose = pci_bus_to_host(npdev->bus);
...@@ -408,6 +408,9 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev) ...@@ -408,6 +408,9 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
if (!pnv_pci_get_gpu_dev(vdev->pdev)) if (!pnv_pci_get_gpu_dev(vdev->pdev))
return -ENODEV; return -ENODEV;
if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
return -ENODEV;
/* /*
* NPU2 normally has 8 ATSD registers (for concurrency) and 6 links * NPU2 normally has 8 ATSD registers (for concurrency) and 6 links
* so we can allocate one register per link, using nvlink index as * so we can allocate one register per link, using nvlink index as
......
...@@ -2331,6 +2331,24 @@ int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type, ...@@ -2331,6 +2331,24 @@ int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
} }
EXPORT_SYMBOL(vfio_unregister_notifier); EXPORT_SYMBOL(vfio_unregister_notifier);
struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group)
{
struct vfio_container *container;
struct vfio_iommu_driver *driver;
if (!group)
return ERR_PTR(-EINVAL);
container = group->container;
driver = container->iommu_driver;
if (likely(driver && driver->ops->group_iommu_domain))
return driver->ops->group_iommu_domain(container->iommu_data,
group->iommu_group);
return ERR_PTR(-ENOTTY);
}
EXPORT_SYMBOL_GPL(vfio_group_iommu_domain);
/** /**
* Module/class support * Module/class support
*/ */
......
...@@ -2980,6 +2980,29 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova, ...@@ -2980,6 +2980,29 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
return ret; return ret;
} }
static struct iommu_domain *
vfio_iommu_type1_group_iommu_domain(void *iommu_data,
struct iommu_group *iommu_group)
{
struct iommu_domain *domain = ERR_PTR(-ENODEV);
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *d;
if (!iommu || !iommu_group)
return ERR_PTR(-EINVAL);
mutex_lock(&iommu->lock);
list_for_each_entry(d, &iommu->domain_list, next) {
if (find_iommu_group(d, iommu_group)) {
domain = d->domain;
break;
}
}
mutex_unlock(&iommu->lock);
return domain;
}
static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.name = "vfio-iommu-type1", .name = "vfio-iommu-type1",
.owner = THIS_MODULE, .owner = THIS_MODULE,
...@@ -2993,6 +3016,7 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { ...@@ -2993,6 +3016,7 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.register_notifier = vfio_iommu_type1_register_notifier, .register_notifier = vfio_iommu_type1_register_notifier,
.unregister_notifier = vfio_iommu_type1_unregister_notifier, .unregister_notifier = vfio_iommu_type1_unregister_notifier,
.dma_rw = vfio_iommu_type1_dma_rw, .dma_rw = vfio_iommu_type1_dma_rw,
.group_iommu_domain = vfio_iommu_type1_group_iommu_domain,
}; };
static int __init vfio_iommu_type1_init(void) static int __init vfio_iommu_type1_init(void)
......
...@@ -72,6 +72,9 @@ struct device *mdev_get_iommu_device(struct device *dev); ...@@ -72,6 +72,9 @@ struct device *mdev_get_iommu_device(struct device *dev);
* @mmap: mmap callback * @mmap: mmap callback
* @mdev: mediated device structure * @mdev: mediated device structure
* @vma: vma structure * @vma: vma structure
* @request: request callback to release device
* @mdev: mediated device structure
* @count: request sequence number
* Parent device that support mediated device should be registered with mdev * Parent device that support mediated device should be registered with mdev
* module with mdev_parent_ops structure. * module with mdev_parent_ops structure.
**/ **/
...@@ -92,6 +95,7 @@ struct mdev_parent_ops { ...@@ -92,6 +95,7 @@ struct mdev_parent_ops {
long (*ioctl)(struct mdev_device *mdev, unsigned int cmd, long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg); unsigned long arg);
int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
void (*request)(struct mdev_device *mdev, unsigned int count);
}; };
/* interface for exporting mdev supported type attributes */ /* interface for exporting mdev supported type attributes */
......
...@@ -90,6 +90,8 @@ struct vfio_iommu_driver_ops { ...@@ -90,6 +90,8 @@ struct vfio_iommu_driver_ops {
struct notifier_block *nb); struct notifier_block *nb);
int (*dma_rw)(void *iommu_data, dma_addr_t user_iova, int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
void *data, size_t count, bool write); void *data, size_t count, bool write);
struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
struct iommu_group *group);
}; };
extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
...@@ -126,6 +128,8 @@ extern int vfio_group_unpin_pages(struct vfio_group *group, ...@@ -126,6 +128,8 @@ extern int vfio_group_unpin_pages(struct vfio_group *group,
extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
void *data, size_t len, bool write); void *data, size_t len, bool write);
extern struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group);
/* each type has independent events */ /* each type has independent events */
enum vfio_notify_type { enum vfio_notify_type {
VFIO_IOMMU_NOTIFY = 0, VFIO_IOMMU_NOTIFY = 0,
......
...@@ -820,6 +820,7 @@ enum { ...@@ -820,6 +820,7 @@ enum {
enum { enum {
VFIO_CCW_IO_IRQ_INDEX, VFIO_CCW_IO_IRQ_INDEX,
VFIO_CCW_CRW_IRQ_INDEX, VFIO_CCW_CRW_IRQ_INDEX,
VFIO_CCW_REQ_IRQ_INDEX,
VFIO_CCW_NUM_IRQS VFIO_CCW_NUM_IRQS
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment