Commit 38a5e3fb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v5.18-rc3' of https://github.com/awilliam/linux-vfio

Pull vfio fix from Alex Williamson:

 - Fix VF token checking for vfio-pci variant drivers (Jason Gunthorpe)

* tag 'vfio-v5.18-rc3' of https://github.com/awilliam/linux-vfio:
  vfio/pci: Fix vf_token mechanism when device-specific VF drivers are used
parents 62345e48 1ef3342a
...@@ -36,6 +36,10 @@ static bool nointxmask; ...@@ -36,6 +36,10 @@ static bool nointxmask;
static bool disable_vga; static bool disable_vga;
static bool disable_idle_d3; static bool disable_idle_d3;
/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
static LIST_HEAD(vfio_pci_sriov_pfs);
static inline bool vfio_vga_disabled(void) static inline bool vfio_vga_disabled(void)
{ {
#ifdef CONFIG_VFIO_PCI_VGA #ifdef CONFIG_VFIO_PCI_VGA
...@@ -434,47 +438,17 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev) ...@@ -434,47 +438,17 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
} }
EXPORT_SYMBOL_GPL(vfio_pci_core_disable); EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev)
{
struct pci_dev *physfn = pci_physfn(vdev->pdev);
struct vfio_device *pf_dev;
if (!vdev->pdev->is_virtfn)
return NULL;
pf_dev = vfio_device_get_from_dev(&physfn->dev);
if (!pf_dev)
return NULL;
if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) {
vfio_device_put(pf_dev);
return NULL;
}
return container_of(pf_dev, struct vfio_pci_core_device, vdev);
}
static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val)
{
struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
if (!pf_vdev)
return;
mutex_lock(&pf_vdev->vf_token->lock);
pf_vdev->vf_token->users += val;
WARN_ON(pf_vdev->vf_token->users < 0);
mutex_unlock(&pf_vdev->vf_token->lock);
vfio_device_put(&pf_vdev->vdev);
}
void vfio_pci_core_close_device(struct vfio_device *core_vdev) void vfio_pci_core_close_device(struct vfio_device *core_vdev)
{ {
struct vfio_pci_core_device *vdev = struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev); container_of(core_vdev, struct vfio_pci_core_device, vdev);
vfio_pci_vf_token_user_add(vdev, -1); if (vdev->sriov_pf_core_dev) {
mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
vdev->sriov_pf_core_dev->vf_token->users--;
mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
}
vfio_spapr_pci_eeh_release(vdev->pdev); vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_core_disable(vdev); vfio_pci_core_disable(vdev);
...@@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev) ...@@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
{ {
vfio_pci_probe_mmaps(vdev); vfio_pci_probe_mmaps(vdev);
vfio_spapr_pci_eeh_open(vdev->pdev); vfio_spapr_pci_eeh_open(vdev->pdev);
vfio_pci_vf_token_user_add(vdev, 1);
if (vdev->sriov_pf_core_dev) {
mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
vdev->sriov_pf_core_dev->vf_token->users++;
mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
}
} }
EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable); EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
...@@ -1583,11 +1562,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, ...@@ -1583,11 +1562,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
* *
* If the VF token is provided but unused, an error is generated. * If the VF token is provided but unused, an error is generated.
*/ */
if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
return 0; /* No VF token provided or required */
if (vdev->pdev->is_virtfn) { if (vdev->pdev->is_virtfn) {
struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev); struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
bool match; bool match;
if (!pf_vdev) { if (!pf_vdev) {
...@@ -1600,7 +1576,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, ...@@ -1600,7 +1576,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
} }
if (!vf_token) { if (!vf_token) {
vfio_device_put(&pf_vdev->vdev);
pci_info_ratelimited(vdev->pdev, pci_info_ratelimited(vdev->pdev,
"VF token required to access device\n"); "VF token required to access device\n");
return -EACCES; return -EACCES;
...@@ -1610,8 +1585,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, ...@@ -1610,8 +1585,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
match = uuid_equal(uuid, &pf_vdev->vf_token->uuid); match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
mutex_unlock(&pf_vdev->vf_token->lock); mutex_unlock(&pf_vdev->vf_token->lock);
vfio_device_put(&pf_vdev->vdev);
if (!match) { if (!match) {
pci_info_ratelimited(vdev->pdev, pci_info_ratelimited(vdev->pdev,
"Incorrect VF token provided for device\n"); "Incorrect VF token provided for device\n");
...@@ -1732,8 +1705,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb, ...@@ -1732,8 +1705,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev) static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
{ {
struct pci_dev *pdev = vdev->pdev; struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_core_device *cur;
struct pci_dev *physfn;
int ret; int ret;
if (pdev->is_virtfn) {
/*
* If this VF was created by our vfio_pci_core_sriov_configure()
* then we can find the PF vfio_pci_core_device now, and due to
* the locking in pci_disable_sriov() it cannot change until
* this VF device driver is removed.
*/
physfn = pci_physfn(vdev->pdev);
mutex_lock(&vfio_pci_sriov_pfs_mutex);
list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
if (cur->pdev == physfn) {
vdev->sriov_pf_core_dev = cur;
break;
}
}
mutex_unlock(&vfio_pci_sriov_pfs_mutex);
return 0;
}
/* Not a SRIOV PF */
if (!pdev->is_physfn) if (!pdev->is_physfn)
return 0; return 0;
...@@ -1805,6 +1800,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev, ...@@ -1805,6 +1800,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
INIT_LIST_HEAD(&vdev->ioeventfds_list); INIT_LIST_HEAD(&vdev->ioeventfds_list);
mutex_init(&vdev->vma_lock); mutex_init(&vdev->vma_lock);
INIT_LIST_HEAD(&vdev->vma_list); INIT_LIST_HEAD(&vdev->vma_list);
INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock); init_rwsem(&vdev->memory_lock);
} }
EXPORT_SYMBOL_GPL(vfio_pci_core_init_device); EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
...@@ -1896,7 +1892,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev) ...@@ -1896,7 +1892,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
{ {
struct pci_dev *pdev = vdev->pdev; struct pci_dev *pdev = vdev->pdev;
pci_disable_sriov(pdev); vfio_pci_core_sriov_configure(pdev, 0);
vfio_unregister_group_dev(&vdev->vdev); vfio_unregister_group_dev(&vdev->vdev);
...@@ -1935,21 +1931,49 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected); ...@@ -1935,21 +1931,49 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn) int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
{ {
struct vfio_pci_core_device *vdev;
struct vfio_device *device; struct vfio_device *device;
int ret = 0; int ret = 0;
device_lock_assert(&pdev->dev);
device = vfio_device_get_from_dev(&pdev->dev); device = vfio_device_get_from_dev(&pdev->dev);
if (!device) if (!device)
return -ENODEV; return -ENODEV;
if (nr_virtfn == 0) vdev = container_of(device, struct vfio_pci_core_device, vdev);
pci_disable_sriov(pdev);
else if (nr_virtfn) {
mutex_lock(&vfio_pci_sriov_pfs_mutex);
/*
* The thread that adds the vdev to the list is the only thread
* that gets to call pci_enable_sriov() and we will only allow
* it to be called once without going through
* pci_disable_sriov()
*/
if (!list_empty(&vdev->sriov_pfs_item)) {
ret = -EINVAL;
goto out_unlock;
}
list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
mutex_unlock(&vfio_pci_sriov_pfs_mutex);
ret = pci_enable_sriov(pdev, nr_virtfn); ret = pci_enable_sriov(pdev, nr_virtfn);
if (ret)
goto out_del;
ret = nr_virtfn;
goto out_put;
}
vfio_device_put(device); pci_disable_sriov(pdev);
return ret < 0 ? ret : nr_virtfn; out_del:
mutex_lock(&vfio_pci_sriov_pfs_mutex);
list_del_init(&vdev->sriov_pfs_item);
out_unlock:
mutex_unlock(&vfio_pci_sriov_pfs_mutex);
out_put:
vfio_device_put(device);
return ret;
} }
EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure); EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
......
...@@ -133,6 +133,8 @@ struct vfio_pci_core_device { ...@@ -133,6 +133,8 @@ struct vfio_pci_core_device {
struct mutex ioeventfds_lock; struct mutex ioeventfds_lock;
struct list_head ioeventfds_list; struct list_head ioeventfds_list;
struct vfio_pci_vf_token *vf_token; struct vfio_pci_vf_token *vf_token;
struct list_head sriov_pfs_item;
struct vfio_pci_core_device *sriov_pf_core_dev;
struct notifier_block nb; struct notifier_block nb;
struct mutex vma_lock; struct mutex vma_lock;
struct list_head vma_list; struct list_head vma_list;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment