Commit 3e732ebf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio fixes from Michael Tsirkin:
 "Fixes and cleanups:

   - A couple of mlx5 fixes related to cvq

   - A couple of reverts dropping useless code (code that used it got
     reverted earlier)"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vdpa: mlx5: synchronize driver status with CVQ
  vdpa: mlx5: prevent cvq work from hogging CPU
  Revert "virtio_config: introduce a new .enable_cbs method"
  Revert "virtio: use virtio_device_ready() in virtio_device_restore()"
parents e2a1256b 1c80cf03
...@@ -163,6 +163,7 @@ struct mlx5_vdpa_net { ...@@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
u32 cur_num_vqs; u32 cur_num_vqs;
struct notifier_block nb; struct notifier_block nb;
struct vdpa_callback config_cb; struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
}; };
static void free_resources(struct mlx5_vdpa_net *ndev); static void free_resources(struct mlx5_vdpa_net *ndev);
...@@ -1658,6 +1659,12 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) ...@@ -1658,6 +1659,12 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
mvdev = wqent->mvdev; mvdev = wqent->mvdev;
ndev = to_mlx5_vdpa_ndev(mvdev); ndev = to_mlx5_vdpa_ndev(mvdev);
cvq = &mvdev->cvq; cvq = &mvdev->cvq;
mutex_lock(&ndev->reslock);
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
goto out; goto out;
...@@ -1696,9 +1703,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) ...@@ -1696,9 +1703,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
if (vringh_need_notify_iotlb(&cvq->vring)) if (vringh_need_notify_iotlb(&cvq->vring))
vringh_notify(&cvq->vring); vringh_notify(&cvq->vring);
queue_work(mvdev->wq, &wqent->work);
break;
} }
out: out:
kfree(wqent); mutex_unlock(&ndev->reslock);
} }
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
...@@ -1706,7 +1717,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) ...@@ -1706,7 +1717,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq; struct mlx5_vdpa_virtqueue *mvq;
struct mlx5_vdpa_wq_ent *wqent;
if (!is_index_valid(mvdev, idx)) if (!is_index_valid(mvdev, idx))
return; return;
...@@ -1715,13 +1725,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) ...@@ -1715,13 +1725,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
if (!mvdev->wq || !mvdev->cvq.ready) if (!mvdev->wq || !mvdev->cvq.ready)
return; return;
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); queue_work(mvdev->wq, &ndev->cvq_ent.work);
if (!wqent)
return;
wqent->mvdev = mvdev;
INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
queue_work(mvdev->wq, &wqent->work);
return; return;
} }
...@@ -2180,7 +2184,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb ...@@ -2180,7 +2184,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
goto err_mr; goto err_mr;
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
return 0; goto err_mr;
restore_channels_info(ndev); restore_channels_info(ndev);
err = setup_driver(mvdev); err = setup_driver(mvdev);
...@@ -2195,12 +2199,14 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb ...@@ -2195,12 +2199,14 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
return err; return err;
} }
/* reslock must be held for this function */
static int setup_driver(struct mlx5_vdpa_dev *mvdev) static int setup_driver(struct mlx5_vdpa_dev *mvdev)
{ {
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err; int err;
mutex_lock(&ndev->reslock); WARN_ON(!mutex_is_locked(&ndev->reslock));
if (ndev->setup) { if (ndev->setup) {
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n"); mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
err = 0; err = 0;
...@@ -2230,7 +2236,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev) ...@@ -2230,7 +2236,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
goto err_fwd; goto err_fwd;
} }
ndev->setup = true; ndev->setup = true;
mutex_unlock(&ndev->reslock);
return 0; return 0;
...@@ -2241,23 +2246,23 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev) ...@@ -2241,23 +2246,23 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
err_rqt: err_rqt:
teardown_virtqueues(ndev); teardown_virtqueues(ndev);
out: out:
mutex_unlock(&ndev->reslock);
return err; return err;
} }
/* reslock must be held for this function */
static void teardown_driver(struct mlx5_vdpa_net *ndev) static void teardown_driver(struct mlx5_vdpa_net *ndev)
{ {
mutex_lock(&ndev->reslock);
WARN_ON(!mutex_is_locked(&ndev->reslock));
if (!ndev->setup) if (!ndev->setup)
goto out; return;
remove_fwd_to_tir(ndev); remove_fwd_to_tir(ndev);
destroy_tir(ndev); destroy_tir(ndev);
destroy_rqt(ndev); destroy_rqt(ndev);
teardown_virtqueues(ndev); teardown_virtqueues(ndev);
ndev->setup = false; ndev->setup = false;
out:
mutex_unlock(&ndev->reslock);
} }
static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
...@@ -2278,6 +2283,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) ...@@ -2278,6 +2283,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
print_status(mvdev, status, true); print_status(mvdev, status, true);
mutex_lock(&ndev->reslock);
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) { if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
err = setup_driver(mvdev); err = setup_driver(mvdev);
...@@ -2287,16 +2294,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) ...@@ -2287,16 +2294,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
} }
} else { } else {
mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n"); mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
return; goto err_clear;
} }
} }
ndev->mvdev.status = status; ndev->mvdev.status = status;
mutex_unlock(&ndev->reslock);
return; return;
err_setup: err_setup:
mlx5_vdpa_destroy_mr(&ndev->mvdev); mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
mutex_unlock(&ndev->reslock);
} }
static int mlx5_vdpa_reset(struct vdpa_device *vdev) static int mlx5_vdpa_reset(struct vdpa_device *vdev)
...@@ -2306,6 +2316,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) ...@@ -2306,6 +2316,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
print_status(mvdev, 0, true); print_status(mvdev, 0, true);
mlx5_vdpa_info(mvdev, "performing device reset\n"); mlx5_vdpa_info(mvdev, "performing device reset\n");
mutex_lock(&ndev->reslock);
teardown_driver(ndev); teardown_driver(ndev);
clear_vqs_ready(ndev); clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev); mlx5_vdpa_destroy_mr(&ndev->mvdev);
...@@ -2318,6 +2330,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) ...@@ -2318,6 +2330,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
if (mlx5_vdpa_create_mr(mvdev, NULL)) if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n"); mlx5_vdpa_warn(mvdev, "create MR failed\n");
} }
mutex_unlock(&ndev->reslock);
return 0; return 0;
} }
...@@ -2353,19 +2366,24 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev) ...@@ -2353,19 +2366,24 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb) static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
{ {
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map; bool change_map;
int err; int err;
mutex_lock(&ndev->reslock);
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map); err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
if (err) { if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
return err; goto err;
} }
if (change_map) if (change_map)
return mlx5_vdpa_change_map(mvdev, iotlb); err = mlx5_vdpa_change_map(mvdev, iotlb);
return 0; err:
mutex_unlock(&ndev->reslock);
return err;
} }
static void mlx5_vdpa_free(struct vdpa_device *vdev) static void mlx5_vdpa_free(struct vdpa_device *vdev)
...@@ -2740,6 +2758,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ...@@ -2740,6 +2758,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
if (err) if (err)
goto err_mr; goto err_mr;
ndev->cvq_ent.mvdev = mvdev;
INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
if (!mvdev->wq) { if (!mvdev->wq) {
err = -ENOMEM; err = -ENOMEM;
......
...@@ -526,9 +526,8 @@ int virtio_device_restore(struct virtio_device *dev) ...@@ -526,9 +526,8 @@ int virtio_device_restore(struct virtio_device *dev)
goto err; goto err;
} }
/* If restore didn't do it, mark device DRIVER_OK ourselves. */ /* Finally, tell the device we're all set */
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
virtio_device_ready(dev);
virtio_config_enable(dev); virtio_config_enable(dev);
......
...@@ -23,8 +23,6 @@ struct virtio_shm_region { ...@@ -23,8 +23,6 @@ struct virtio_shm_region {
* any of @get/@set, @get_status/@set_status, or @get_features/ * any of @get/@set, @get_status/@set_status, or @get_features/
* @finalize_features are NOT safe to be called from an atomic * @finalize_features are NOT safe to be called from an atomic
* context. * context.
* @enable_cbs: enable the callbacks
* vdev: the virtio_device
* @get: read the value of a configuration field * @get: read the value of a configuration field
* vdev: the virtio_device * vdev: the virtio_device
* offset: the offset of the configuration field * offset: the offset of the configuration field
...@@ -78,7 +76,6 @@ struct virtio_shm_region { ...@@ -78,7 +76,6 @@ struct virtio_shm_region {
*/ */
typedef void vq_callback_t(struct virtqueue *); typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops { struct virtio_config_ops {
void (*enable_cbs)(struct virtio_device *vdev);
void (*get)(struct virtio_device *vdev, unsigned offset, void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len); void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset, void (*set)(struct virtio_device *vdev, unsigned offset,
...@@ -233,9 +230,6 @@ void virtio_device_ready(struct virtio_device *dev) ...@@ -233,9 +230,6 @@ void virtio_device_ready(struct virtio_device *dev)
{ {
unsigned status = dev->config->get_status(dev); unsigned status = dev->config->get_status(dev);
if (dev->config->enable_cbs)
dev->config->enable_cbs(dev);
BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK); dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment