Commit 4100a7b8 authored by Gerd Hoffmann's avatar Gerd Hoffmann

drm/virtio: cleanup queue functions

Make the queue functions return void, none of
the call sites checks the return value.
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarChia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190813082509.29324-2-kraxel@redhat.com
parent a02c4c25
...@@ -252,7 +252,7 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) ...@@ -252,7 +252,7 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
wake_up(&vgdev->cursorq.ack_queue); wake_up(&vgdev->cursorq.ack_queue);
} }
static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf) struct virtio_gpu_vbuffer *vbuf)
__releases(&vgdev->ctrlq.qlock) __releases(&vgdev->ctrlq.qlock)
__acquires(&vgdev->ctrlq.qlock) __acquires(&vgdev->ctrlq.qlock)
...@@ -263,7 +263,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, ...@@ -263,7 +263,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
int ret; int ret;
if (!vgdev->vqs_ready) if (!vgdev->vqs_ready)
return -ENODEV; return;
sg_init_one(&vcmd, vbuf->buf, vbuf->size); sg_init_one(&vcmd, vbuf->buf, vbuf->size);
sgs[outcnt + incnt] = &vcmd; sgs[outcnt + incnt] = &vcmd;
...@@ -294,30 +294,22 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, ...@@ -294,30 +294,22 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
virtqueue_kick(vq); virtqueue_kick(vq);
} }
if (!ret)
ret = vq->num_free;
return ret;
} }
static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf) struct virtio_gpu_vbuffer *vbuf)
{ {
int rc;
spin_lock(&vgdev->ctrlq.qlock); spin_lock(&vgdev->ctrlq.qlock);
rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
spin_unlock(&vgdev->ctrlq.qlock); spin_unlock(&vgdev->ctrlq.qlock);
return rc;
} }
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf, struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_ctrl_hdr *hdr, struct virtio_gpu_ctrl_hdr *hdr,
struct virtio_gpu_fence *fence) struct virtio_gpu_fence *fence)
{ {
struct virtqueue *vq = vgdev->ctrlq.vq; struct virtqueue *vq = vgdev->ctrlq.vq;
int rc;
again: again:
spin_lock(&vgdev->ctrlq.qlock); spin_lock(&vgdev->ctrlq.qlock);
...@@ -338,12 +330,11 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, ...@@ -338,12 +330,11 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (fence) if (fence)
virtio_gpu_fence_emit(vgdev, hdr, fence); virtio_gpu_fence_emit(vgdev, hdr, fence);
rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
spin_unlock(&vgdev->ctrlq.qlock); spin_unlock(&vgdev->ctrlq.qlock);
return rc;
} }
static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf) struct virtio_gpu_vbuffer *vbuf)
{ {
struct virtqueue *vq = vgdev->cursorq.vq; struct virtqueue *vq = vgdev->cursorq.vq;
...@@ -352,7 +343,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, ...@@ -352,7 +343,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
int outcnt; int outcnt;
if (!vgdev->vqs_ready) if (!vgdev->vqs_ready)
return -ENODEV; return;
sg_init_one(&ccmd, vbuf->buf, vbuf->size); sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd; sgs[0] = &ccmd;
...@@ -374,10 +365,6 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, ...@@ -374,10 +365,6 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
} }
spin_unlock(&vgdev->cursorq.qlock); spin_unlock(&vgdev->cursorq.qlock);
if (!ret)
ret = vq->num_free;
return ret;
} }
/* just create gem objects for userspace and long lived objects, /* just create gem objects for userspace and long lived objects,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment