Commit ea16c514 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin

vhost: move acked_features to VQs

Refactor code to make sure features are only accessed
under VQ mutex. This makes everything simpler, no need
for RCU here anymore.
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 98f9ca0a
...@@ -585,9 +585,9 @@ static void handle_rx(struct vhost_net *net) ...@@ -585,9 +585,9 @@ static void handle_rx(struct vhost_net *net)
vhost_hlen = nvq->vhost_hlen; vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen; sock_hlen = nvq->sock_hlen;
vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
vq->log : NULL; vq->log : NULL;
mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF); mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
while ((sock_len = peek_head_len(sock->sk))) { while ((sock_len = peek_head_len(sock->sk))) {
sock_len += sock_hlen; sock_len += sock_hlen;
...@@ -1051,15 +1051,13 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) ...@@ -1051,15 +1051,13 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
mutex_unlock(&n->dev.mutex); mutex_unlock(&n->dev.mutex);
return -EFAULT; return -EFAULT;
} }
n->dev.acked_features = features;
smp_wmb();
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
mutex_lock(&n->vqs[i].vq.mutex); mutex_lock(&n->vqs[i].vq.mutex);
n->vqs[i].vq.acked_features = features;
n->vqs[i].vhost_hlen = vhost_hlen; n->vqs[i].vhost_hlen = vhost_hlen;
n->vqs[i].sock_hlen = sock_hlen; n->vqs[i].sock_hlen = sock_hlen;
mutex_unlock(&n->vqs[i].vq.mutex); mutex_unlock(&n->vqs[i].vq.mutex);
} }
vhost_net_flush(n);
mutex_unlock(&n->dev.mutex); mutex_unlock(&n->dev.mutex);
return 0; return 0;
} }
......
...@@ -1373,6 +1373,9 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, ...@@ -1373,6 +1373,9 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
{ {
struct vhost_virtqueue *vq;
int i;
if (features & ~VHOST_SCSI_FEATURES) if (features & ~VHOST_SCSI_FEATURES)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1382,9 +1385,13 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) ...@@ -1382,9 +1385,13 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
return -EFAULT; return -EFAULT;
} }
vs->dev.acked_features = features;
smp_wmb(); for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vhost_scsi_flush(vs); vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->acked_features = features;
mutex_unlock(&vq->mutex);
}
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
return 0; return 0;
} }
...@@ -1591,10 +1598,6 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, ...@@ -1591,10 +1598,6 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
return; return;
mutex_lock(&vs->dev.mutex); mutex_lock(&vs->dev.mutex);
if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
mutex_unlock(&vs->dev.mutex);
return;
}
if (plug) if (plug)
reason = VIRTIO_SCSI_EVT_RESET_RESCAN; reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
...@@ -1603,8 +1606,9 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, ...@@ -1603,8 +1606,9 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
tcm_vhost_send_evt(vs, tpg, lun, if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
VIRTIO_SCSI_T_TRANSPORT_RESET, reason); tcm_vhost_send_evt(vs, tpg, lun,
VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
} }
......
...@@ -241,15 +241,18 @@ static long vhost_test_reset_owner(struct vhost_test *n) ...@@ -241,15 +241,18 @@ static long vhost_test_reset_owner(struct vhost_test *n)
static int vhost_test_set_features(struct vhost_test *n, u64 features) static int vhost_test_set_features(struct vhost_test *n, u64 features)
{ {
struct vhost_virtqueue *vq;
mutex_lock(&n->dev.mutex); mutex_lock(&n->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) && if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&n->dev)) { !vhost_log_access_ok(&n->dev)) {
mutex_unlock(&n->dev.mutex); mutex_unlock(&n->dev.mutex);
return -EFAULT; return -EFAULT;
} }
n->dev.acked_features = features; vq = &n->vqs[VHOST_TEST_VQ];
smp_wmb(); mutex_lock(&vq->mutex);
vhost_test_flush(n); vq->acked_features = features;
mutex_unlock(&vq->mutex);
mutex_unlock(&n->dev.mutex); mutex_unlock(&n->dev.mutex);
return 0; return 0;
} }
......
...@@ -191,6 +191,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, ...@@ -191,6 +191,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->log_used = false; vq->log_used = false;
vq->log_addr = -1ull; vq->log_addr = -1ull;
vq->private_data = NULL; vq->private_data = NULL;
vq->acked_features = 0;
vq->log_base = NULL; vq->log_base = NULL;
vq->error_ctx = NULL; vq->error_ctx = NULL;
vq->error = NULL; vq->error = NULL;
...@@ -524,11 +525,13 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, ...@@ -524,11 +525,13 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
for (i = 0; i < d->nvqs; ++i) { for (i = 0; i < d->nvqs; ++i) {
int ok; int ok;
bool log;
mutex_lock(&d->vqs[i]->mutex); mutex_lock(&d->vqs[i]->mutex);
log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
/* If ring is inactive, will check when it's enabled. */ /* If ring is inactive, will check when it's enabled. */
if (d->vqs[i]->private_data) if (d->vqs[i]->private_data)
ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log);
log_all);
else else
ok = 1; ok = 1;
mutex_unlock(&d->vqs[i]->mutex); mutex_unlock(&d->vqs[i]->mutex);
...@@ -538,12 +541,12 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, ...@@ -538,12 +541,12 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
return 1; return 1;
} }
static int vq_access_ok(struct vhost_dev *d, unsigned int num, static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
struct vring_desc __user *desc, struct vring_desc __user *desc,
struct vring_avail __user *avail, struct vring_avail __user *avail,
struct vring_used __user *used) struct vring_used __user *used)
{ {
size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return access_ok(VERIFY_READ, desc, num * sizeof *desc) && return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
access_ok(VERIFY_READ, avail, access_ok(VERIFY_READ, avail,
sizeof *avail + num * sizeof *avail->ring + s) && sizeof *avail + num * sizeof *avail->ring + s) &&
...@@ -565,16 +568,16 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok); ...@@ -565,16 +568,16 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok);
/* Verify access for write logging. */ /* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */ /* Caller should have vq mutex and device mutex */
static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, static int vq_log_access_ok(struct vhost_virtqueue *vq,
void __user *log_base) void __user *log_base)
{ {
struct vhost_memory *mp; struct vhost_memory *mp;
size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
mp = rcu_dereference_protected(vq->dev->memory, mp = rcu_dereference_protected(vq->dev->memory,
lockdep_is_held(&vq->mutex)); lockdep_is_held(&vq->mutex));
return vq_memory_access_ok(log_base, mp, return vq_memory_access_ok(log_base, mp,
vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr, (!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used + sizeof *vq->used +
vq->num * sizeof *vq->used->ring + s)); vq->num * sizeof *vq->used->ring + s));
...@@ -584,8 +587,8 @@ static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, ...@@ -584,8 +587,8 @@ static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
/* Caller should have vq mutex and device mutex */ /* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq) int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{ {
return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
vq_log_access_ok(vq->dev, vq, vq->log_base); vq_log_access_ok(vq, vq->log_base);
} }
EXPORT_SYMBOL_GPL(vhost_vq_access_ok); EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
...@@ -612,8 +615,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) ...@@ -612,8 +615,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -EFAULT; return -EFAULT;
} }
if (!memory_access_ok(d, newmem, if (!memory_access_ok(d, newmem, 0)) {
vhost_has_feature(d, VHOST_F_LOG_ALL))) {
kfree(newmem); kfree(newmem);
return -EFAULT; return -EFAULT;
} }
...@@ -726,7 +728,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -726,7 +728,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
* If it is not, we don't as size might not have been setup. * If it is not, we don't as size might not have been setup.
* We will verify when backend is configured. */ * We will verify when backend is configured. */
if (vq->private_data) { if (vq->private_data) {
if (!vq_access_ok(d, vq->num, if (!vq_access_ok(vq, vq->num,
(void __user *)(unsigned long)a.desc_user_addr, (void __user *)(unsigned long)a.desc_user_addr,
(void __user *)(unsigned long)a.avail_user_addr, (void __user *)(unsigned long)a.avail_user_addr,
(void __user *)(unsigned long)a.used_user_addr)) { (void __user *)(unsigned long)a.used_user_addr)) {
...@@ -866,7 +868,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) ...@@ -866,7 +868,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
vq = d->vqs[i]; vq = d->vqs[i];
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */ /* If ring is inactive, will check when it's enabled. */
if (vq->private_data && !vq_log_access_ok(d, vq, base)) if (vq->private_data && !vq_log_access_ok(vq, base))
r = -EFAULT; r = -EFAULT;
else else
vq->log_base = base; vq->log_base = base;
...@@ -1434,11 +1436,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) ...@@ -1434,11 +1436,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
* interrupts. */ * interrupts. */
smp_mb(); smp_mb();
if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx)) unlikely(vq->avail_idx == vq->last_avail_idx))
return true; return true;
if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__u16 flags; __u16 flags;
if (__get_user(flags, &vq->avail->flags)) { if (__get_user(flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags"); vq_err(vq, "Failed to get flags");
...@@ -1499,7 +1501,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) ...@@ -1499,7 +1501,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
return false; return false;
vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq); r = vhost_update_used_flags(vq);
if (r) { if (r) {
vq_err(vq, "Failed to enable notification at %p: %d\n", vq_err(vq, "Failed to enable notification at %p: %d\n",
...@@ -1536,7 +1538,7 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) ...@@ -1536,7 +1538,7 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (vq->used_flags & VRING_USED_F_NO_NOTIFY) if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
return; return;
vq->used_flags |= VRING_USED_F_NO_NOTIFY; vq->used_flags |= VRING_USED_F_NO_NOTIFY;
if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq); r = vhost_update_used_flags(vq);
if (r) if (r)
vq_err(vq, "Failed to enable notification at %p: %d\n", vq_err(vq, "Failed to enable notification at %p: %d\n",
......
...@@ -105,6 +105,7 @@ struct vhost_virtqueue { ...@@ -105,6 +105,7 @@ struct vhost_virtqueue {
struct vring_used_elem *heads; struct vring_used_elem *heads;
/* Protected by virtqueue mutex. */ /* Protected by virtqueue mutex. */
void *private_data; void *private_data;
unsigned acked_features;
/* Log write descriptors */ /* Log write descriptors */
void __user *log_base; void __user *log_base;
struct vhost_log *log; struct vhost_log *log;
...@@ -117,7 +118,6 @@ struct vhost_dev { ...@@ -117,7 +118,6 @@ struct vhost_dev {
struct vhost_memory __rcu *memory; struct vhost_memory __rcu *memory;
struct mm_struct *mm; struct mm_struct *mm;
struct mutex mutex; struct mutex mutex;
unsigned acked_features;
struct vhost_virtqueue **vqs; struct vhost_virtqueue **vqs;
int nvqs; int nvqs;
struct file *log_file; struct file *log_file;
...@@ -174,13 +174,8 @@ enum { ...@@ -174,13 +174,8 @@ enum {
(1ULL << VHOST_F_LOG_ALL), (1ULL << VHOST_F_LOG_ALL),
}; };
static inline int vhost_has_feature(struct vhost_dev *dev, int bit) static inline int vhost_has_feature(struct vhost_virtqueue *vq, int bit)
{ {
unsigned acked_features; return vq->acked_features & (1 << bit);
/* TODO: check that we are running from vhost_worker or dev mutex is
* held? */
acked_features = rcu_dereference_index_check(dev->acked_features, 1);
return acked_features & (1 << bit);
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment