Commit 803a7ab7 authored by Hans Verkuil's avatar Hans Verkuil Committed by Mauro Carvalho Chehab

media: v4l2-mem2mem: add vb2_m2m_request_queue

For mem2mem devices we have to make sure that v4l2_m2m_try_schedule()
is called whenever a request is queued.

We do that by creating a vb2_m2m_request_queue() helper that should
be used instead of the 'normal' vb2_request_queue() helper. The m2m
helper function will call v4l2_m2m_try_schedule() as needed.

In addition we also avoid calling v4l2_m2m_try_schedule() when preparing
or queueing a buffer for a request since that is no longer needed.
Instead this helper function will do that when the request is actually
queued.
Signed-off-by: default avatarHans Verkuil <hans.verkuil@cisco.com>
Reviewed-by: default avatarMauro Carvalho Chehab <mchehab+samsung@kernel.org>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+samsung@kernel.org>
parent 9ac8090f
...@@ -387,7 +387,7 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) ...@@ -387,7 +387,7 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
if (m2m_dev->m2m_ops->job_abort) if (m2m_dev->m2m_ops->job_abort)
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
wait_event(m2m_ctx->finished, wait_event(m2m_ctx->finished,
!(m2m_ctx->job_flags & TRANS_RUNNING)); !(m2m_ctx->job_flags & TRANS_RUNNING));
} else if (m2m_ctx->job_flags & TRANS_QUEUED) { } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
...@@ -478,8 +478,14 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, ...@@ -478,8 +478,14 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
int ret; int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
if (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
(buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
dprintk("%s: requests cannot be used with capture buffers\n",
__func__);
return -EPERM;
}
ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
if (!ret) if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
v4l2_m2m_try_schedule(m2m_ctx); v4l2_m2m_try_schedule(m2m_ctx);
return ret; return ret;
...@@ -501,14 +507,9 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, ...@@ -501,14 +507,9 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
{ {
struct video_device *vdev = video_devdata(file); struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq; struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
if (!ret)
v4l2_m2m_try_schedule(m2m_ctx);
return ret;
} }
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
...@@ -952,6 +953,52 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, ...@@ -952,6 +953,52 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
} }
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
void vb2_m2m_request_queue(struct media_request *req)
{
struct media_request_object *obj, *obj_safe;
struct v4l2_m2m_ctx *m2m_ctx = NULL;
/*
* Queue all objects. Note that buffer objects are at the end of the
* objects list, after all other object types. Once buffer objects
* are queued, the driver might delete them immediately (if the driver
* processes the buffer at once), so we have to use
* list_for_each_entry_safe() to handle the case where the object we
* queue is deleted.
*/
list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
struct v4l2_m2m_ctx *m2m_ctx_obj;
struct vb2_buffer *vb;
if (!obj->ops->queue)
continue;
if (vb2_request_object_is_buffer(obj)) {
/* Sanity checks */
vb = container_of(obj, struct vb2_buffer, req_obj);
WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
m2m_ctx_obj = container_of(vb->vb2_queue,
struct v4l2_m2m_ctx,
out_q_ctx.q);
WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
m2m_ctx = m2m_ctx_obj;
}
/*
* The buffer we queue here can in theory be immediately
* unbound, hence the use of list_for_each_entry_safe()
* above and why we call the queue op last.
*/
obj->ops->queue(obj);
}
WARN_ON(!m2m_ctx);
if (m2m_ctx)
v4l2_m2m_try_schedule(m2m_ctx);
}
EXPORT_SYMBOL_GPL(vb2_m2m_request_queue);
/* Videobuf2 ioctl helpers */ /* Videobuf2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
......
...@@ -622,6 +622,10 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) ...@@ -622,6 +622,10 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx); return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
} }
/* v4l2 request helper */
void vb2_m2m_request_queue(struct media_request *req);
/* v4l2 ioctl helpers */ /* v4l2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment