diff options
Diffstat (limited to 'drivers/media/v4l2-core/v4l2-mem2mem.c')
| -rw-r--r-- | drivers/media/v4l2-core/v4l2-mem2mem.c | 280 |
1 files changed, 245 insertions, 35 deletions
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 438ea45d107..178ce96556c 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -41,6 +41,8 @@ module_param(debug, bool, 0644); #define TRANS_QUEUED (1 << 0) /* Instance is currently running in hardware */ #define TRANS_RUNNING (1 << 1) +/* Instance is currently aborting */ +#define TRANS_ABORT (1 << 2) /* Offset base for buffers on the destination queue - used to distinguish @@ -62,7 +64,7 @@ struct v4l2_m2m_dev { struct list_head job_queue; spinlock_t job_spinlock; - struct v4l2_m2m_ops *m2m_ops; + const struct v4l2_m2m_ops *m2m_ops; }; static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, @@ -196,6 +198,10 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) * 2) at least one destination buffer has to be queued, * 3) streaming has to be on. * + * If a queue is buffered (for example a decoder hardware ringbuffer that has + * to be drained before doing streamoff), allow scheduling without v4l2 buffers + * on that queue. + * * There may also be additional, custom requirements. In such case the driver * should supply a custom callback (job_ready in v4l2_m2m_ops) that should * return 1 if the instance is ready. @@ -205,7 +211,7 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) { struct v4l2_m2m_dev *m2m_dev; - unsigned long flags_job, flags; + unsigned long flags_job, flags_out, flags_cap; m2m_dev = m2m_ctx->m2m_dev; dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); @@ -217,26 +223,42 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) } spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + + /* If the context is aborted then don't schedule it */ + if (m2m_ctx->job_flags & TRANS_ABORT) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); + dprintk("Aborted context\n"); + return; + } + if (m2m_ctx->job_flags & TRANS_QUEUED) { spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("On job queue already\n"); return; } - spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); - if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); + if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) + && !m2m_ctx->out_q_ctx.buffered) { + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, + flags_out); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("No input buffers available\n"); return; } - if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); + if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) + && !m2m_ctx->cap_q_ctx.buffered) { + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, + flags_cap); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, + flags_out); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("No output buffers available\n"); return; } - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); if (m2m_dev->m2m_ops->job_ready && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { @@ -254,6 +276,41 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) } /** + * v4l2_m2m_cancel_job() - cancel pending jobs for the context + * + * In case of streamoff or release called on any context, + * 1] If the context is currently running, then abort job will be called + * 2] If the context is queued, then the context will be removed from + * the job_queue + */ +static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct v4l2_m2m_dev *m2m_dev; + unsigned long flags; + + m2m_dev = m2m_ctx->m2m_dev; + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + + m2m_ctx->job_flags |= TRANS_ABORT; + if (m2m_ctx->job_flags & TRANS_RUNNING) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); + dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); + wait_event(m2m_ctx->finished, + !(m2m_ctx->job_flags & TRANS_RUNNING)); + } else if (m2m_ctx->job_flags & TRANS_QUEUED) { + list_del(&m2m_ctx->queue); + m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("m2m_ctx: %p had been on queue and was removed\n", + m2m_ctx); + } else { + /* Do nothing, was not on queue/running */ + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + } +} + +/** * v4l2_m2m_job_finish() - inform the framework that a job has been finished * and have it clean up * @@ -369,6 +426,20 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); /** + * v4l2_m2m_create_bufs() - create a source or destination buffer, depending + * on the type + */ +int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_create_buffers *create) +{ + struct vb2_queue *vq; + + vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); + return vb2_create_bufs(vq, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); + +/** * v4l2_m2m_expbuf() - export a source or destination buffer, depending on * the type */ @@ -405,10 +476,40 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type) { - struct vb2_queue *vq; + struct v4l2_m2m_dev *m2m_dev; + struct v4l2_m2m_queue_ctx *q_ctx; + unsigned long flags_job, flags; + int ret; - vq = v4l2_m2m_get_vq(m2m_ctx, type); - return vb2_streamoff(vq, type); + /* wait until the current context is dequeued from job_queue */ + v4l2_m2m_cancel_job(m2m_ctx); + + q_ctx = get_queue_ctx(m2m_ctx, type); + ret = vb2_streamoff(&q_ctx->q, type); + if (ret) + return ret; + + m2m_dev = m2m_ctx->m2m_dev; + spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + /* We should not be scheduled anymore, since we're dropping a queue. */ + if (m2m_ctx->job_flags & TRANS_QUEUED) + list_del(&m2m_ctx->queue); + m2m_ctx->job_flags = 0; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + /* Drop queue, since streamoff returns device to the same state as after + * calling reqbufs. */ + INIT_LIST_HEAD(&q_ctx->rdy_queue); + q_ctx->num_rdy = 0; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + if (m2m_dev->curr_ctx == m2m_ctx) { + m2m_dev->curr_ctx = NULL; + wake_up(&m2m_ctx->finished); + } + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); @@ -457,12 +558,18 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, if (m2m_ctx->m2m_dev->m2m_ops->unlock) m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); + else if (m2m_ctx->q_lock) + mutex_unlock(m2m_ctx->q_lock); - poll_wait(file, &src_q->done_wq, wait); - poll_wait(file, &dst_q->done_wq, wait); + if (list_empty(&src_q->done_list)) + poll_wait(file, &src_q->done_wq, wait); + if (list_empty(&dst_q->done_list)) + poll_wait(file, &dst_q->done_wq, wait); if (m2m_ctx->m2m_dev->m2m_ops->lock) m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); + else if (m2m_ctx->q_lock) + mutex_lock(m2m_ctx->q_lock); spin_lock_irqsave(&src_q->done_lock, flags); if (!list_empty(&src_q->done_list)) @@ -519,7 +626,7 @@ EXPORT_SYMBOL(v4l2_m2m_mmap); * * Usually called from driver's probe() function. */ -struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops) +struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) { struct v4l2_m2m_dev *m2m_dev; @@ -590,6 +697,13 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, if (ret) goto err; + /* + * If both queues use same mutex assign it as the common buffer + * queues lock to the m2m context. This lock is used in the + * v4l2_m2m_ioctl_* helpers. + */ + if (out_q_ctx->q.lock == cap_q_ctx->q.lock) + m2m_ctx->q_lock = out_q_ctx->q.lock; return m2m_ctx; err: @@ -605,27 +719,8 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); */ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) { - struct v4l2_m2m_dev *m2m_dev; - unsigned long flags; - - m2m_dev = m2m_ctx->m2m_dev; - - spin_lock_irqsave(&m2m_dev->job_spinlock, flags); - if (m2m_ctx->job_flags & TRANS_RUNNING) { - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); - m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); - dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); - wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING)); - } else if (m2m_ctx->job_flags & TRANS_QUEUED) { - list_del(&m2m_ctx->queue); - m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); - dprintk("m2m_ctx: %p had been on queue and was removed\n", - m2m_ctx); - } else { - /* Do nothing, was not on queue/running */ - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); - } + /* wait until the current context is dequeued from job_queue */ + v4l2_m2m_cancel_job(m2m_ctx); vb2_queue_release(&m2m_ctx->cap_q_ctx.q); vb2_queue_release(&m2m_ctx->out_q_ctx.q); @@ -656,3 +751,118 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb) } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); +/* Videobuf2 ioctl helpers */ + +int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *rb) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); + +int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); + +int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); + +int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); + +int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); + +int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *eb) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); + +int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, + enum v4l2_buf_type type) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_streamon(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); + +int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, + enum v4l2_buf_type type) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); + +/* + * v4l2_file_operations helpers. It is assumed here same lock is used + * for the output and the capture buffer queue. + */ + +int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct v4l2_fh *fh = file->private_data; + struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; + int ret; + + if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock)) + return -ERESTARTSYS; + + ret = v4l2_m2m_mmap(file, m2m_ctx, vma); + + if (m2m_ctx->q_lock) + mutex_unlock(m2m_ctx->q_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); + +unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait) +{ + struct v4l2_fh *fh = file->private_data; + struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; + unsigned int ret; + + if (m2m_ctx->q_lock) + mutex_lock(m2m_ctx->q_lock); + + ret = v4l2_m2m_poll(file, m2m_ctx, wait); + + if (m2m_ctx->q_lock) + mutex_unlock(m2m_ctx->q_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); + |
