// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Memory-to-memory device framework for Video for Linux 2 and videobuf.
*
* Helper functions for devices that use videobuf buffers for both their
* source and destination.
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <pawel@osciak.com>
* Marek Szyprowski, <m.szyprowski@samsung.com>
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/media-device.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
MODULE_LICENSE("GPL");
static bool debug;
module_param(debug, bool, 0644);
#define dprintk(fmt, arg...) \
do { \
if (debug) \
printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
} while (0)
/* Instance is already queued on the job_queue */
#define TRANS_QUEUED (1 << 0)
/* Instance is currently running in hardware */
#define TRANS_RUNNING (1 << 1)
/* Instance is currently aborting */
#define TRANS_ABORT (1 << 2)
/* Offset base for buffers on the destination queue - used to distinguish
* between source and destination buffers when mmapping - they receive the same
* offsets but for different queues */
#define DST_QUEUE_OFF_BASE (1 << 30)
enum v4l2_m2m_entity_type {
MEM2MEM_ENT_TYPE_SOURCE,
MEM2MEM_ENT_TYPE_SINK,
MEM2MEM_ENT_TYPE_PROC
};
static const char * const m2m_entity_name[] = {
"source",
"sink",
"proc"
};
/**
* struct v4l2_m2m_dev - per-device context
* @source: &struct media_entity pointer with the source entity
* Used only when the M2M device is registered via
* v4l2_m2m_unregister_media_controller().
* @source_pad: &struct media_pad with the source pad.
* Used only when the M2M device is registered via
* v4l2_m2m_unregister_media_controller().
* @sink: &struct media_entity pointer with the sink entity
* Used only when the M2M device is registered via
* v4l2_m2m_unregister_media_controller().
* @sink_pad: &struct media_pad with the sink pad.
* Used only when the M2M device is registered via
* v4l2_m2m_unregister_media_controller().
* @proc: &struct media_entity pointer with the M2M device itself.
* @proc_pads: &struct media_pad with the @proc pads.
* Used only when the M2M device is registered via
* v4l2_m2m_unregister_media_controller().
* @intf_devnode: &struct media_intf devnode pointer with the interface
* with controls the M2M device.
* @curr_ctx: currently running instance
* @job_queue: instances queued to run
* @job_spinlock: protects job_queue
* @job_work: worker to run queued jobs.
* @m2m_ops: driver callbacks
*/
struct v4l2_m2m_dev {
struct v4l2_m2m_ctx *curr_ctx;
#ifdef CONFIG_MEDIA_CONTROLLER
struct media_entity *source;
struct media_pad source_pad;
struct media_entity sink;
struct media_pad sink_pad;
struct media_entity proc;
struct media_pad proc_pads[2];
struct media_intf_devnode *intf_devnode;
#endif
struct list_head job_queue;
spinlock_t job_spinlock;
struct work_struct job_work;
const struct v4l2_m2m_ops *m2m_ops;
};
static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
if (V4L2_TYPE_IS_OUTPUT(type))
return &m2m_ctx->out_q_ctx;
else
return &m2m_ctx->cap_q_ctx;
}
struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
struct v4l2_m2m_queue_ctx *q_ctx;
q_ctx = get_queue_ctx(m2m_ctx, type);
if (!q_ctx)
return NULL;
return &q_ctx->q;
}
EXPORT_SYMBOL(v4l2_m2m_get_vq);
struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
if (list_empty(&q_ctx->rdy_queue)) {
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return NULL;
}
b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return &b->vb;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
if (list_empty(&q_ctx->rdy_queue)) {
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return NULL;
}
b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return &b->vb;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct <