Commit 64056a94 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux

Pull virtio updates from Rusty Russell:
 "Nothing exciting: virtio-blk users might see a bit of a boost from the
  doubling of the default queue length though"

* tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux:
  virtio-blk: base queue-depth on virtqueue ringsize or module param
  Revert a02bbb1c: MAINTAINERS: add virtio-dev ML for virtio
  virtio: fail adding buffer on broken queues.
  virtio-rng: don't crash if virtqueue is broken.
  virtio_balloon: don't crash if virtqueue is broken.
  virtio_blk: don't crash, report error if virtqueue is broken.
  virtio_net: don't crash if virtqueue is broken.
  virtio_balloon: don't softlockup on huge balloon changes.
  virtio: Use pci_enable_msix_exact() instead of pci_enable_msix()
  MAINTAINERS: virtio-dev is subscribers only
  tools/virtio: add a missing )
  tools/virtio: fix missing kmemleak_ignore symbol
  tools/virtio: update internal copies of headers
parents 7474043e fc4324b4
...@@ -9432,7 +9432,6 @@ F: include/media/videobuf2-* ...@@ -9432,7 +9432,6 @@ F: include/media/videobuf2-*
VIRTIO CONSOLE DRIVER VIRTIO CONSOLE DRIVER
M: Amit Shah <amit.shah@redhat.com> M: Amit Shah <amit.shah@redhat.com>
L: virtio-dev@lists.oasis-open.org
L: virtualization@lists.linux-foundation.org L: virtualization@lists.linux-foundation.org
S: Maintained S: Maintained
F: drivers/char/virtio_console.c F: drivers/char/virtio_console.c
...@@ -9442,7 +9441,6 @@ F: include/uapi/linux/virtio_console.h ...@@ -9442,7 +9441,6 @@ F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS VIRTIO CORE, NET AND BLOCK DRIVERS
M: Rusty Russell <rusty@rustcorp.com.au> M: Rusty Russell <rusty@rustcorp.com.au>
M: "Michael S. Tsirkin" <mst@redhat.com> M: "Michael S. Tsirkin" <mst@redhat.com>
L: virtio-dev@lists.oasis-open.org
L: virtualization@lists.linux-foundation.org L: virtualization@lists.linux-foundation.org
S: Maintained S: Maintained
F: drivers/virtio/ F: drivers/virtio/
...@@ -9455,7 +9453,6 @@ F: include/uapi/linux/virtio_*.h ...@@ -9455,7 +9453,6 @@ F: include/uapi/linux/virtio_*.h
VIRTIO HOST (VHOST) VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com> M: "Michael S. Tsirkin" <mst@redhat.com>
L: kvm@vger.kernel.org L: kvm@vger.kernel.org
L: virtio-dev@lists.oasis-open.org
L: virtualization@lists.linux-foundation.org L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
......
...@@ -158,6 +158,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) ...@@ -158,6 +158,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
unsigned long flags; unsigned long flags;
unsigned int num; unsigned int num;
const bool last = (req->cmd_flags & REQ_END) != 0; const bool last = (req->cmd_flags & REQ_END) != 0;
int err;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
...@@ -198,11 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) ...@@ -198,11 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
} }
spin_lock_irqsave(&vblk->vq_lock, flags); spin_lock_irqsave(&vblk->vq_lock, flags);
if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vq); virtqueue_kick(vblk->vq);
spin_unlock_irqrestore(&vblk->vq_lock, flags); spin_unlock_irqrestore(&vblk->vq_lock, flags);
blk_mq_stop_hw_queue(hctx); blk_mq_stop_hw_queue(hctx);
return BLK_MQ_RQ_QUEUE_BUSY; /* Out of mem doesn't actually happen, since we fall back
* to direct descriptors */
if (err == -ENOMEM || err == -ENOSPC)
return BLK_MQ_RQ_QUEUE_BUSY;
return BLK_MQ_RQ_QUEUE_ERROR;
} }
if (last) if (last)
...@@ -485,10 +491,11 @@ static struct blk_mq_ops virtio_mq_ops = { ...@@ -485,10 +491,11 @@ static struct blk_mq_ops virtio_mq_ops = {
static struct blk_mq_reg virtio_mq_reg = { static struct blk_mq_reg virtio_mq_reg = {
.ops = &virtio_mq_ops, .ops = &virtio_mq_ops,
.nr_hw_queues = 1, .nr_hw_queues = 1,
.queue_depth = 64, .queue_depth = 0, /* Set in virtblk_probe */
.numa_node = NUMA_NO_NODE, .numa_node = NUMA_NO_NODE,
.flags = BLK_MQ_F_SHOULD_MERGE, .flags = BLK_MQ_F_SHOULD_MERGE,
}; };
module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444);
static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
struct request *rq, unsigned int nr) struct request *rq, unsigned int nr)
...@@ -553,6 +560,13 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -553,6 +560,13 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_vq; goto out_free_vq;
} }
/* Default queue sizing is to fill the ring. */
if (!virtio_mq_reg.queue_depth) {
virtio_mq_reg.queue_depth = vblk->vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
virtio_mq_reg.queue_depth /= 2;
}
virtio_mq_reg.cmd_size = virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) + sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems; sizeof(struct scatterlist) * sg_elems;
......
...@@ -47,8 +47,7 @@ static void register_buffer(u8 *buf, size_t size) ...@@ -47,8 +47,7 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size); sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */ /* There should always be room for one buffer. */
if (virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL) < 0) virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL);
BUG();
virtqueue_kick(vq); virtqueue_kick(vq);
} }
......
...@@ -938,7 +938,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, ...@@ -938,7 +938,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sgs[out_num] = &stat; sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC) < 0); virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
if (unlikely(!virtqueue_kick(vi->cvq))) if (unlikely(!virtqueue_kick(vi->cvq)))
return status == VIRTIO_NET_OK; return status == VIRTIO_NET_OK;
......
...@@ -108,8 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) ...@@ -108,8 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
/* We should always be able to add one buffer to an empty queue. */ /* We should always be able to add one buffer to an empty queue. */
if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0) virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
BUG();
virtqueue_kick(vq); virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */ /* When host has read buffer, this completes via balloon_ack */
...@@ -258,8 +257,7 @@ static void stats_handle_request(struct virtio_balloon *vb) ...@@ -258,8 +257,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
if (!virtqueue_get_buf(vq, &len)) if (!virtqueue_get_buf(vq, &len))
return; return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats)); sg_init_one(&sg, vb->stats, sizeof(vb->stats));
if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0) virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
BUG();
virtqueue_kick(vq); virtqueue_kick(vq);
} }
...@@ -310,6 +308,12 @@ static int balloon(void *_vballoon) ...@@ -310,6 +308,12 @@ static int balloon(void *_vballoon)
else if (diff < 0) else if (diff < 0)
leak_balloon(vb, -diff); leak_balloon(vb, -diff);
update_balloon_size(vb); update_balloon_size(vb);
/*
* For large balloon changes, we could spend a lot of time
* and always have work to do. Be nice if preempt disabled.
*/
cond_resched();
} }
return 0; return 0;
} }
...@@ -338,7 +342,7 @@ static int init_vqs(struct virtio_balloon *vb) ...@@ -338,7 +342,7 @@ static int init_vqs(struct virtio_balloon *vb)
/* /*
* Prime this virtqueue with one buffer so the hypervisor can * Prime this virtqueue with one buffer so the hypervisor can
* use it to signal us later. * use it to signal us later (it can't be broken yet!).
*/ */
sg_init_one(&sg, vb->stats, sizeof vb->stats); sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
......
...@@ -333,10 +333,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, ...@@ -333,10 +333,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
for (i = 0; i < nvectors; ++i) for (i = 0; i < nvectors; ++i)
vp_dev->msix_entries[i].entry = i; vp_dev->msix_entries[i].entry = i;
/* pci_enable_msix returns positive if we can't get this many. */ err = pci_enable_msix_exact(vp_dev->pci_dev,
err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); vp_dev->msix_entries, nvectors);
if (err > 0)
err = -ENOSPC;
if (err) if (err)
goto error; goto error;
vp_dev->msix_enabled = 1; vp_dev->msix_enabled = 1;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/kmemleak.h>
#ifdef DEBUG #ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */ /* For development, we want to crash whenever the ring is screwed. */
...@@ -203,6 +204,11 @@ static inline int virtqueue_add(struct virtqueue *_vq, ...@@ -203,6 +204,11 @@ static inline int virtqueue_add(struct virtqueue *_vq,
BUG_ON(data == NULL); BUG_ON(data == NULL);
if (unlikely(vq->broken)) {
END_USE(vq);
return -EIO;
}
#ifdef DEBUG #ifdef DEBUG
{ {
ktime_t now = ktime_get(); ktime_t now = ktime_get();
...@@ -309,7 +315,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, ...@@ -309,7 +315,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
* Caller must ensure we don't call this with other virtqueue operations * Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted). * at the same time (except where noted).
* *
* Returns zero or a negative error (ie. ENOSPC, ENOMEM). * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/ */
int virtqueue_add_sgs(struct virtqueue *_vq, int virtqueue_add_sgs(struct virtqueue *_vq,
struct scatterlist *sgs[], struct scatterlist *sgs[],
...@@ -347,7 +353,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_sgs); ...@@ -347,7 +353,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
* Caller must ensure we don't call this with other virtqueue operations * Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted). * at the same time (except where noted).
* *
* Returns zero or a negative error (ie. ENOSPC, ENOMEM). * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/ */
int virtqueue_add_outbuf(struct virtqueue *vq, int virtqueue_add_outbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num, struct scatterlist sg[], unsigned int num,
...@@ -369,7 +375,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); ...@@ -369,7 +375,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
* Caller must ensure we don't call this with other virtqueue operations * Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted). * at the same time (except where noted).
* *
* Returns zero or a negative error (ie. ENOSPC, ENOMEM). * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/ */
int virtqueue_add_inbuf(struct virtqueue *vq, int virtqueue_add_inbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num, struct scatterlist sg[], unsigned int num,
......
static inline void kmemleak_ignore(const void *ptr)
{
}
...@@ -63,7 +63,7 @@ int virtqueue_add_inbuf(struct virtqueue *vq, ...@@ -63,7 +63,7 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
void *data, void *data,
gfp_t gfp); gfp_t gfp);
void virtqueue_kick(struct virtqueue *vq); bool virtqueue_kick(struct virtqueue *vq);
void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
...@@ -79,7 +79,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, ...@@ -79,7 +79,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
struct virtio_device *vdev, struct virtio_device *vdev,
bool weak_barriers, bool weak_barriers,
void *pages, void *pages,
void (*notify)(struct virtqueue *vq), bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq),
const char *name); const char *name);
void vring_del_virtqueue(struct virtqueue *vq); void vring_del_virtqueue(struct virtqueue *vq);
......
...@@ -172,7 +172,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq, ...@@ -172,7 +172,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
GFP_ATOMIC); GFP_ATOMIC);
if (likely(r == 0)) { if (likely(r == 0)) {
++started; ++started;
if (unlikely(!virtqueue_kick(vq->vq)) if (unlikely(!virtqueue_kick(vq->vq)))
r = -1; r = -1;
} }
} else } else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment