Commit 911ea8ec authored by Pi-Hsun Shih's avatar Pi-Hsun Shih Committed by Mauro Carvalho Chehab

media: v4l2-mem2mem: add v4l2_m2m_suspend, v4l2_m2m_resume

Add two functions that can be used to stop new jobs from being queued /
continue running queued job. This can be used while a driver using m2m
helper is going to suspend / wake up from resume, and can ensure that
there's no job running in suspend process.
Signed-off-by: default avatarPi-Hsun Shih <pihsun@chromium.org>
Signed-off-by: default avatarJerry-ch Chen <jerry-ch.chen@mediatek.corp-partner.google.com>
Reviewed-by: default avatarTomasz Figa <tfiga@chromium.org>
Signed-off-by: default avatarHans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+huawei@kernel.org>
parent 1847f68e
...@@ -43,6 +43,10 @@ module_param(debug, bool, 0644); ...@@ -43,6 +43,10 @@ module_param(debug, bool, 0644);
#define TRANS_ABORT (1 << 2) #define TRANS_ABORT (1 << 2)
/* The job queue is not running new jobs */
#define QUEUE_PAUSED (1 << 0)
/* Offset base for buffers on the destination queue - used to distinguish /* Offset base for buffers on the destination queue - used to distinguish
* between source and destination buffers when mmapping - they receive the same * between source and destination buffers when mmapping - they receive the same
* offsets but for different queues */ * offsets but for different queues */
...@@ -84,6 +88,7 @@ static const char * const m2m_entity_name[] = { ...@@ -84,6 +88,7 @@ static const char * const m2m_entity_name[] = {
* @job_queue: instances queued to run * @job_queue: instances queued to run
* @job_spinlock: protects job_queue * @job_spinlock: protects job_queue
* @job_work: worker to run queued jobs. * @job_work: worker to run queued jobs.
* @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
* @m2m_ops: driver callbacks * @m2m_ops: driver callbacks
*/ */
struct v4l2_m2m_dev { struct v4l2_m2m_dev {
...@@ -101,6 +106,7 @@ struct v4l2_m2m_dev { ...@@ -101,6 +106,7 @@ struct v4l2_m2m_dev {
struct list_head job_queue; struct list_head job_queue;
spinlock_t job_spinlock; spinlock_t job_spinlock;
struct work_struct job_work; struct work_struct job_work;
unsigned long job_queue_flags;
const struct v4l2_m2m_ops *m2m_ops; const struct v4l2_m2m_ops *m2m_ops;
}; };
...@@ -263,6 +269,12 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) ...@@ -263,6 +269,12 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
return; return;
} }
if (m2m_dev->job_queue_flags & QUEUE_PAUSED) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Running new jobs is paused\n");
return;
}
m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
struct v4l2_m2m_ctx, queue); struct v4l2_m2m_ctx, queue);
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
...@@ -504,6 +516,7 @@ void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, ...@@ -504,6 +516,7 @@ void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
if (WARN_ON(!src_buf || !dst_buf)) if (WARN_ON(!src_buf || !dst_buf))
goto unlock; goto unlock;
v4l2_m2m_buf_done(src_buf, state);
dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
if (!dst_buf->is_held) { if (!dst_buf->is_held) {
v4l2_m2m_dst_buf_remove(m2m_ctx); v4l2_m2m_dst_buf_remove(m2m_ctx);
...@@ -528,6 +541,34 @@ void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, ...@@ -528,6 +541,34 @@ void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
} }
EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev)
{
unsigned long flags;
struct v4l2_m2m_ctx *curr_ctx;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
m2m_dev->job_queue_flags |= QUEUE_PAUSED;
curr_ctx = m2m_dev->curr_ctx;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
if (curr_ctx)
wait_event(curr_ctx->finished,
!(curr_ctx->job_flags & TRANS_RUNNING));
}
EXPORT_SYMBOL(v4l2_m2m_suspend);
void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev)
{
unsigned long flags;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
m2m_dev->job_queue_flags &= ~QUEUE_PAUSED;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
v4l2_m2m_try_run(m2m_dev);
}
EXPORT_SYMBOL(v4l2_m2m_resume);
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs) struct v4l2_requestbuffers *reqbufs)
{ {
......
...@@ -304,6 +304,28 @@ v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx, ...@@ -304,6 +304,28 @@ v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx,
void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *vbuf); struct vb2_v4l2_buffer *vbuf);
/**
* v4l2_m2m_suspend() - stop new jobs from being run and wait for current job
* to finish
*
* @m2m_dev: opaque pointer to the internal data to handle M2M context
*
* Called by a driver in the suspend hook. Stop new jobs from being run, and
* wait for current running job to finish.
*/
void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
/**
* v4l2_m2m_resume() - resume job running and try to run a queued job
*
* @m2m_dev: opaque pointer to the internal data to handle M2M context
*
* Called by a driver in the resume hook. This reverts the operation of
* v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if
* there is any.
*/
void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
/** /**
* v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment