mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-11-27 17:41:49 +00:00
media: v4l2-mem2mem: add v4l2_m2m_suspend, v4l2_m2m_resume
Add two functions that can be used to stop new jobs from being queued / continue running queued job. This can be used while a driver using m2m helper is going to suspend / wake up from resume, and can ensure that there's no job running in suspend process. Signed-off-by: Pi-Hsun Shih <pihsun@chromium.org> Signed-off-by: Jerry-ch Chen <jerry-ch.chen@mediatek.corp-partner.google.com> Reviewed-by: Tomasz Figa <tfiga@chromium.org> Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
This commit is contained in:
parent
1847f68e39
commit
911ea8ec42
@ -43,6 +43,10 @@ module_param(debug, bool, 0644);
|
||||
#define TRANS_ABORT (1 << 2)
|
||||
|
||||
|
||||
/* The job queue is not running new jobs */
|
||||
#define QUEUE_PAUSED (1 << 0)
|
||||
|
||||
|
||||
/* Offset base for buffers on the destination queue - used to distinguish
|
||||
* between source and destination buffers when mmapping - they receive the same
|
||||
* offsets but for different queues */
|
||||
@ -84,6 +88,7 @@ static const char * const m2m_entity_name[] = {
|
||||
* @job_queue: instances queued to run
|
||||
* @job_spinlock: protects job_queue
|
||||
* @job_work: worker to run queued jobs.
|
||||
* @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
|
||||
* @m2m_ops: driver callbacks
|
||||
*/
|
||||
struct v4l2_m2m_dev {
|
||||
@ -101,6 +106,7 @@ struct v4l2_m2m_dev {
|
||||
struct list_head job_queue;
|
||||
spinlock_t job_spinlock;
|
||||
struct work_struct job_work;
|
||||
unsigned long job_queue_flags;
|
||||
|
||||
const struct v4l2_m2m_ops *m2m_ops;
|
||||
};
|
||||
@ -263,6 +269,12 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
|
||||
return;
|
||||
}
|
||||
|
||||
if (m2m_dev->job_queue_flags & QUEUE_PAUSED) {
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
dprintk("Running new jobs is paused\n");
|
||||
return;
|
||||
}
|
||||
|
||||
m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
|
||||
struct v4l2_m2m_ctx, queue);
|
||||
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
|
||||
@ -504,6 +516,7 @@ void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
|
||||
|
||||
if (WARN_ON(!src_buf || !dst_buf))
|
||||
goto unlock;
|
||||
v4l2_m2m_buf_done(src_buf, state);
|
||||
dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
|
||||
if (!dst_buf->is_held) {
|
||||
v4l2_m2m_dst_buf_remove(m2m_ctx);
|
||||
@ -528,6 +541,34 @@ unlock:
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
|
||||
|
||||
void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct v4l2_m2m_ctx *curr_ctx;
|
||||
|
||||
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
|
||||
m2m_dev->job_queue_flags |= QUEUE_PAUSED;
|
||||
curr_ctx = m2m_dev->curr_ctx;
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
|
||||
if (curr_ctx)
|
||||
wait_event(curr_ctx->finished,
|
||||
!(curr_ctx->job_flags & TRANS_RUNNING));
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_m2m_suspend);
|
||||
|
||||
void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
|
||||
m2m_dev->job_queue_flags &= ~QUEUE_PAUSED;
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
|
||||
v4l2_m2m_try_run(m2m_dev);
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_m2m_resume);
|
||||
|
||||
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
struct v4l2_requestbuffers *reqbufs)
|
||||
{
|
||||
|
@ -304,6 +304,28 @@ v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx,
|
||||
void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
|
||||
struct vb2_v4l2_buffer *vbuf);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_suspend() - stop new jobs from being run and wait for current job
|
||||
* to finish
|
||||
*
|
||||
* @m2m_dev: opaque pointer to the internal data to handle M2M context
|
||||
*
|
||||
* Called by a driver in the suspend hook. Stop new jobs from being run, and
|
||||
* wait for current running job to finish.
|
||||
*/
|
||||
void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_resume() - resume job running and try to run a queued job
|
||||
*
|
||||
* @m2m_dev: opaque pointer to the internal data to handle M2M context
|
||||
*
|
||||
* Called by a driver in the resume hook. This reverts the operation of
|
||||
* v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if
|
||||
* there is any.
|
||||
*/
|
||||
void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user