Commit 35c99ffa authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:

 - enable packed ring support for s390

 - several fixes

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio/s390: enable packed ring
  virtio/s390: DMA support for virtio-ccw
  virtio/s390: use vring_create_virtqueue
  virtio/virtio_ring: do some comment fixes
  vhost-scsi: remove incorrect memory barrier
  tools/virtio/ringtest: Remove bogus definition of BUG_ON()
  virtio_ring: Fix potential mem leak in virtqueue_add_indirect_packed
parents 81413776 050f4c4d
...@@ -66,6 +66,7 @@ struct virtio_ccw_device { ...@@ -66,6 +66,7 @@ struct virtio_ccw_device {
bool device_lost; bool device_lost;
unsigned int config_ready; unsigned int config_ready;
void *airq_info; void *airq_info;
u64 dma_mask;
}; };
struct vq_info_block_legacy { struct vq_info_block_legacy {
...@@ -108,7 +109,6 @@ struct virtio_rev_info { ...@@ -108,7 +109,6 @@ struct virtio_rev_info {
struct virtio_ccw_vq_info { struct virtio_ccw_vq_info {
struct virtqueue *vq; struct virtqueue *vq;
int num; int num;
void *queue;
union { union {
struct vq_info_block s; struct vq_info_block s;
struct vq_info_block_legacy l; struct vq_info_block_legacy l;
...@@ -423,7 +423,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) ...@@ -423,7 +423,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
struct virtio_ccw_vq_info *info = vq->priv; struct virtio_ccw_vq_info *info = vq->priv;
unsigned long flags; unsigned long flags;
unsigned long size;
int ret; int ret;
unsigned int index = vq->index; unsigned int index = vq->index;
...@@ -461,8 +460,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) ...@@ -461,8 +460,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
ret, index); ret, index);
vring_del_virtqueue(vq); vring_del_virtqueue(vq);
size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
free_pages_exact(info->queue, size);
kfree(info->info_block); kfree(info->info_block);
kfree(info); kfree(info);
} }
...@@ -494,8 +491,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, ...@@ -494,8 +491,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
int err; int err;
struct virtqueue *vq = NULL; struct virtqueue *vq = NULL;
struct virtio_ccw_vq_info *info; struct virtio_ccw_vq_info *info;
unsigned long size = 0; /* silence the compiler */ u64 queue;
unsigned long flags; unsigned long flags;
bool may_reduce;
/* Allocate queue. */ /* Allocate queue. */
info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
...@@ -516,37 +514,34 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, ...@@ -516,37 +514,34 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = info->num; err = info->num;
goto out_err; goto out_err;
} }
size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); may_reduce = vcdev->revision > 0;
info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN,
if (info->queue == NULL) { vdev, true, may_reduce, ctx,
dev_warn(&vcdev->cdev->dev, "no queue\n"); virtio_ccw_kvm_notify, callback, name);
err = -ENOMEM;
goto out_err;
}
vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
true, ctx, info->queue, virtio_ccw_kvm_notify,
callback, name);
if (!vq) { if (!vq) {
/* For now, we fail if we can't get the requested size. */ /* For now, we fail if we can't get the requested size. */
dev_warn(&vcdev->cdev->dev, "no vq\n"); dev_warn(&vcdev->cdev->dev, "no vq\n");
err = -ENOMEM; err = -ENOMEM;
goto out_err; goto out_err;
} }
/* it may have been reduced */
info->num = virtqueue_get_vring_size(vq);
/* Register it with the host. */ /* Register it with the host. */
queue = virtqueue_get_desc_addr(vq);
if (vcdev->revision == 0) { if (vcdev->revision == 0) {
info->info_block->l.queue = (__u64)info->queue; info->info_block->l.queue = queue;
info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
info->info_block->l.index = i; info->info_block->l.index = i;
info->info_block->l.num = info->num; info->info_block->l.num = info->num;
ccw->count = sizeof(info->info_block->l); ccw->count = sizeof(info->info_block->l);
} else { } else {
info->info_block->s.desc = (__u64)info->queue; info->info_block->s.desc = queue;
info->info_block->s.index = i; info->info_block->s.index = i;
info->info_block->s.num = info->num; info->info_block->s.num = info->num;
info->info_block->s.avail = (__u64)virtqueue_get_avail(vq); info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
info->info_block->s.used = (__u64)virtqueue_get_used(vq); info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
ccw->count = sizeof(info->info_block->s); ccw->count = sizeof(info->info_block->s);
} }
ccw->cmd_code = CCW_CMD_SET_VQ; ccw->cmd_code = CCW_CMD_SET_VQ;
...@@ -572,8 +567,6 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, ...@@ -572,8 +567,6 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
if (vq) if (vq)
vring_del_virtqueue(vq); vring_del_virtqueue(vq);
if (info) { if (info) {
if (info->queue)
free_pages_exact(info->queue, size);
kfree(info->info_block); kfree(info->info_block);
} }
kfree(info); kfree(info);
...@@ -780,12 +773,8 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev) ...@@ -780,12 +773,8 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
static void ccw_transport_features(struct virtio_device *vdev) static void ccw_transport_features(struct virtio_device *vdev)
{ {
/* /*
* Packed ring isn't enabled on virtio_ccw for now, * Currently nothing to do here.
* because virtio_ccw uses some legacy accessors,
* e.g. virtqueue_get_avail() and virtqueue_get_used()
* which aren't available in packed ring currently.
*/ */
__virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
} }
static int virtio_ccw_finalize_features(struct virtio_device *vdev) static int virtio_ccw_finalize_features(struct virtio_device *vdev)
...@@ -1266,6 +1255,16 @@ static int virtio_ccw_online(struct ccw_device *cdev) ...@@ -1266,6 +1255,16 @@ static int virtio_ccw_online(struct ccw_device *cdev)
ret = -ENOMEM; ret = -ENOMEM;
goto out_free; goto out_free;
} }
vcdev->vdev.dev.parent = &cdev->dev;
cdev->dev.dma_mask = &vcdev->dma_mask;
/* we are fine with common virtio infrastructure using 64 bit DMA */
ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n");
goto out_free;
}
vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
GFP_DMA | GFP_KERNEL); GFP_DMA | GFP_KERNEL);
if (!vcdev->config_block) { if (!vcdev->config_block) {
...@@ -1280,7 +1279,6 @@ static int virtio_ccw_online(struct ccw_device *cdev) ...@@ -1280,7 +1279,6 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */ vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
vcdev->vdev.dev.parent = &cdev->dev;
vcdev->vdev.dev.release = virtio_ccw_release_dev; vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops; vcdev->vdev.config = &virtio_ccw_config_ops;
vcdev->cdev = cdev; vcdev->cdev = cdev;
......
...@@ -1443,7 +1443,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ...@@ -1443,7 +1443,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
tpg->tv_tpg_vhost_count++; tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs; tpg->vhost_scsi = vs;
vs_tpg[tpg->tport_tpgt] = tpg; vs_tpg[tpg->tport_tpgt] = tpg;
smp_mb__after_atomic();
match = true; match = true;
} }
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
......
...@@ -1004,6 +1004,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, ...@@ -1004,6 +1004,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
if (unlikely(vq->vq.num_free < 1)) { if (unlikely(vq->vq.num_free < 1)) {
pr_debug("Can't add buf len 1 - avail = 0\n"); pr_debug("Can't add buf len 1 - avail = 0\n");
kfree(desc);
END_USE(vq); END_USE(vq);
return -ENOSPC; return -ENOSPC;
} }
...@@ -1718,10 +1719,10 @@ static inline int virtqueue_add(struct virtqueue *_vq, ...@@ -1718,10 +1719,10 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/** /**
* virtqueue_add_sgs - expose buffers to other end * virtqueue_add_sgs - expose buffers to other end
* @vq: the struct virtqueue we're talking about. * @_vq: the struct virtqueue we're talking about.
* @sgs: array of terminated scatterlists. * @sgs: array of terminated scatterlists.
* @out_num: the number of scatterlists readable by other side * @out_sgs: the number of scatterlists readable by other side
* @in_num: the number of scatterlists which are writable (after readable ones) * @in_sgs: the number of scatterlists which are writable (after readable ones)
* @data: the token identifying the buffer. * @data: the token identifying the buffer.
* @gfp: how to do memory allocations (if necessary). * @gfp: how to do memory allocations (if necessary).
* *
...@@ -1821,7 +1822,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); ...@@ -1821,7 +1822,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/** /**
* virtqueue_kick_prepare - first half of split virtqueue_kick call. * virtqueue_kick_prepare - first half of split virtqueue_kick call.
* @vq: the struct virtqueue * @_vq: the struct virtqueue
* *
* Instead of virtqueue_kick(), you can do: * Instead of virtqueue_kick(), you can do:
* if (virtqueue_kick_prepare(vq)) * if (virtqueue_kick_prepare(vq))
...@@ -1841,7 +1842,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); ...@@ -1841,7 +1842,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
/** /**
* virtqueue_notify - second half of split virtqueue_kick call. * virtqueue_notify - second half of split virtqueue_kick call.
* @vq: the struct virtqueue * @_vq: the struct virtqueue
* *
* This does not need to be serialized. * This does not need to be serialized.
* *
...@@ -1885,8 +1886,9 @@ EXPORT_SYMBOL_GPL(virtqueue_kick); ...@@ -1885,8 +1886,9 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
/** /**
* virtqueue_get_buf - get the next used buffer * virtqueue_get_buf - get the next used buffer
* @vq: the struct virtqueue we're talking about. * @_vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer * @len: the length written into the buffer
* @ctx: extra context for the token
* *
* If the device wrote data into the buffer, @len will be set to the * If the device wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer * amount written. This means you don't need to clear the buffer
...@@ -1916,7 +1918,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) ...@@ -1916,7 +1918,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
EXPORT_SYMBOL_GPL(virtqueue_get_buf); EXPORT_SYMBOL_GPL(virtqueue_get_buf);
/** /**
* virtqueue_disable_cb - disable callbacks * virtqueue_disable_cb - disable callbacks
* @vq: the struct virtqueue we're talking about. * @_vq: the struct virtqueue we're talking about.
* *
* Note that this is not necessarily synchronous, hence unreliable and only * Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization. * useful as an optimization.
...@@ -1936,7 +1938,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb); ...@@ -1936,7 +1938,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/** /**
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
* @vq: the struct virtqueue we're talking about. * @_vq: the struct virtqueue we're talking about.
* *
* This re-enables callbacks; it returns current queue state * This re-enables callbacks; it returns current queue state
* in an opaque unsigned value. This value should be later tested by * in an opaque unsigned value. This value should be later tested by
...@@ -1957,7 +1959,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); ...@@ -1957,7 +1959,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
/** /**
* virtqueue_poll - query pending used buffers * virtqueue_poll - query pending used buffers
* @vq: the struct virtqueue we're talking about. * @_vq: the struct virtqueue we're talking about.
* @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
* *
* Returns "true" if there are pending used buffers in the queue. * Returns "true" if there are pending used buffers in the queue.
...@@ -1976,7 +1978,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll); ...@@ -1976,7 +1978,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll);
/** /**
* virtqueue_enable_cb - restart callbacks after disable_cb. * virtqueue_enable_cb - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about. * @_vq: the struct virtqueue we're talking about.
* *
* This re-enables callbacks; it returns "false" if there are pending * This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver * buffers in the queue, to detect a possible race between the driver
...@@ -1995,7 +1997,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb); ...@@ -1995,7 +1997,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
/** /**
* virtqueue_enable_cb_delayed - restart callbacks after disable_cb. * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about. * @_vq: the struct virtqueue we're talking about.
* *
* This re-enables callbacks but hints to the other side to delay * This re-enables callbacks but hints to the other side to delay
* interrupts until most of the available buffers have been processed; * interrupts until most of the available buffers have been processed;
...@@ -2017,7 +2019,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); ...@@ -2017,7 +2019,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
/** /**
* virtqueue_detach_unused_buf - detach first unused buffer * virtqueue_detach_unused_buf - detach first unused buffer
* @vq: the struct virtqueue we're talking about. * @_vq: the struct virtqueue we're talking about.
* *
* Returns NULL or the "data" token handed to virtqueue_add_*(). * Returns NULL or the "data" token handed to virtqueue_add_*().
* This is not valid on an active queue; it is useful only for device * This is not valid on an active queue; it is useful only for device
...@@ -2249,7 +2251,7 @@ EXPORT_SYMBOL_GPL(vring_transport_features); ...@@ -2249,7 +2251,7 @@ EXPORT_SYMBOL_GPL(vring_transport_features);
/** /**
* virtqueue_get_vring_size - return the size of the virtqueue's vring * virtqueue_get_vring_size - return the size of the virtqueue's vring
* @vq: the struct virtqueue containing the vring of interest. * @_vq: the struct virtqueue containing the vring of interest.
* *
* Returns the size of the vring. This is mainly used for boasting to * Returns the size of the vring. This is mainly used for boasting to
* userspace. Unlike other operations, this need not be serialized. * userspace. Unlike other operations, this need not be serialized.
......
...@@ -90,23 +90,6 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq); ...@@ -90,23 +90,6 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq);
dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq); dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq);
dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq); dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq);
/*
* Legacy accessors -- in almost all cases, these are the wrong functions
* to use.
*/
static inline void *virtqueue_get_desc(struct virtqueue *vq)
{
return virtqueue_get_vring(vq)->desc;
}
static inline void *virtqueue_get_avail(struct virtqueue *vq)
{
return virtqueue_get_vring(vq)->avail;
}
static inline void *virtqueue_get_used(struct virtqueue *vq)
{
return virtqueue_get_vring(vq)->used;
}
/** /**
* virtio_device - representation of a device using virtio * virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus * @index: unique position on the virtio bus
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
#define SIZE_MAX (~(size_t)0) #define SIZE_MAX (~(size_t)0)
#define KMALLOC_MAX_SIZE SIZE_MAX #define KMALLOC_MAX_SIZE SIZE_MAX
#define BUG_ON(x) assert(x)
typedef pthread_spinlock_t spinlock_t; typedef pthread_spinlock_t spinlock_t;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment