linux-media.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Philipp Zabel <p.zabel@pengutronix.de>
To: linux-media@vger.kernel.org
Cc: Steve Longerbeam <steve_longerbeam@mentor.com>,
	Sascha Hauer <s.hauer@pengutronix.de>,
	Michael Olbrich <m.olbrich@pengutronix.de>,
	Philipp Zabel <p.zabel@pengutronix.de>
Subject: [RFC PATCH 09/26] [media] imx-ipu: Add i.MX IPUv3 scaler driver
Date: Thu, 12 Jun 2014 19:06:23 +0200	[thread overview]
Message-ID: <1402592800-2925-10-git-send-email-p.zabel@pengutronix.de> (raw)
In-Reply-To: <1402592800-2925-1-git-send-email-p.zabel@pengutronix.de>

From: Sascha Hauer <s.hauer@pengutronix.de>

This patch adds support for hardware accelerated scaling and color
space conversion between memory buffers using the IPUv3 IC.
Since the maximum output size of the IC unit is 1024x1024 pixels, multiple
IC tasks with overlapping tiles are used to scale and convert larger frames.

The IC operates with a burst size of at least 8 pixels. Depending on the
scaling factor, up to 7 junk pixels may be written after the end of the
frame. The sizeimage is increased accordingly.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: Michael Olbrich <m.olbrich@pengutronix.de>
Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
---
 drivers/media/platform/imx/Kconfig          |   9 +
 drivers/media/platform/imx/Makefile         |   1 +
 drivers/media/platform/imx/imx-ipu-scaler.c | 825 ++++++++++++++++++++++++++++
 drivers/media/platform/imx/imx-ipu.c        |   2 +-
 drivers/media/platform/imx/imx-ipu.h        |   1 +
 5 files changed, 837 insertions(+), 1 deletion(-)
 create mode 100644 drivers/media/platform/imx/imx-ipu-scaler.c

diff --git a/drivers/media/platform/imx/Kconfig b/drivers/media/platform/imx/Kconfig
index a90c973..4694367 100644
--- a/drivers/media/platform/imx/Kconfig
+++ b/drivers/media/platform/imx/Kconfig
@@ -1,2 +1,11 @@
 config VIDEO_IMX_IPU_COMMON
 	tristate
+
+config VIDEO_IMX_IPU_SCALER
+	tristate "i.MX5/6 IPUv3 based image scaler driver"
+	depends on VIDEO_DEV && IMX_IPUV3_CORE
+	select VIDEOBUF2_DMA_CONTIG
+	select VIDEO_IMX_IPU_COMMON
+	select V4L2_MEM2MEM_DEV
+	---help---
+	  This is a v4l2 scaler video driver for the IPUv3 on i.MX5/6.
diff --git a/drivers/media/platform/imx/Makefile b/drivers/media/platform/imx/Makefile
index 5de119c..f20aa0b 100644
--- a/drivers/media/platform/imx/Makefile
+++ b/drivers/media/platform/imx/Makefile
@@ -1 +1,2 @@
 obj-$(CONFIG_VIDEO_IMX_IPU_COMMON)	+= imx-ipu.o
+obj-$(CONFIG_VIDEO_IMX_IPU_SCALER)	+= imx-ipu-scaler.o
diff --git a/drivers/media/platform/imx/imx-ipu-scaler.c b/drivers/media/platform/imx/imx-ipu-scaler.c
new file mode 100644
index 0000000..ced209a
--- /dev/null
+++ b/drivers/media/platform/imx/imx-ipu-scaler.c
@@ -0,0 +1,825 @@
+/*
+ * i.MX IPUv3 scaler driver
+ *
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix
+ *
+ * based on the mem2mem test driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <video/imx-ipu-v3.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "imx-ipu.h"
+
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 4096
+#define MAX_H 4096
+#define DIM_ALIGN_MASK 0x08 /* 8-alignment for dimensions */
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE	(1 << 0)
+#define MEM2MEM_OUTPUT	(1 << 1)
+
+#define MEM2MEM_NAME		"imx-ipuv3-scale"
+
+/* Per queue */
+#define MEM2MEM_DEF_NUM_BUFS	VIDEO_MAX_FRAME
+/* In bytes, per queue */
+#define MEM2MEM_VID_MEM_LIMIT	(64 * 1024 * 1024)
+
+#define fh_to_ctx(__fh)	container_of(__fh, struct ipu_scale_ctx, fh)
+
+enum {
+	V4L2_M2M_SRC = 0,
+	V4L2_M2M_DST = 1,
+};
+
+struct ipu_scale_dev {
+	struct v4l2_device	v4l2_dev;
+	struct video_device	*vfd;
+	struct device		*dev;
+	struct ipu_soc		*ipu;
+
+	atomic_t		num_inst;
+	spinlock_t		irqlock;
+
+	struct v4l2_m2m_dev	*m2m_dev;
+	struct mutex		dev_mutex;
+};
+
+/* Per-queue, driver-specific private data */
+struct ipu_scale_q_data {
+	struct v4l2_pix_format	cur_fmt;
+	struct v4l2_rect	rect;
+};
+
+struct ipu_scale_ctx {
+	struct ipu_scale_dev	*ipu_scaler;
+
+	struct v4l2_fh		fh;
+	struct vb2_alloc_ctx	*alloc_ctx;
+	struct ipu_scale_q_data	q_data[2];
+	struct work_struct	work;
+	struct completion	completion;
+	struct work_struct	skip_run;
+	int			error;
+	int			aborting;
+};
+
+static struct ipu_scale_q_data *get_q_data(struct ipu_scale_ctx *ctx,
+					   enum v4l2_buf_type type)
+{
+	switch (type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		return &ctx->q_data[V4L2_M2M_SRC];
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		return &ctx->q_data[V4L2_M2M_DST];
+	default:
+		BUG();
+	}
+	return NULL;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+	struct ipu_scale_ctx *ctx = priv;
+
+	if (ctx->aborting)
+		return 0;
+
+	if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < 1
+	    || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < 1) {
+		dev_dbg(ctx->ipu_scaler->dev, "Not enough buffers available\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+static void job_abort(void *priv)
+{
+	struct ipu_scale_ctx *ctx = priv;
+
+	ctx->aborting = 1;
+}
+
+static void ipu_complete(void *priv, int err)
+{
+	struct ipu_scale_dev *ipu_scaler = priv;
+	struct ipu_scale_ctx *curr_ctx;
+
+	curr_ctx = v4l2_m2m_get_curr_priv(ipu_scaler->m2m_dev);
+
+	if (NULL == curr_ctx) {
+		dev_dbg(ipu_scaler->dev,
+			"Instance released before the end of transaction\n");
+		return;
+	}
+
+	curr_ctx->error = err;
+	complete(&curr_ctx->completion);
+}
+
+static void device_run(void *priv)
+{
+	struct ipu_scale_ctx *ctx = priv;
+
+	schedule_work(&ctx->work);
+}
+
+static void ipu_scaler_work(struct work_struct *work)
+{
+	struct ipu_scale_ctx *ctx = container_of(work, struct ipu_scale_ctx,
+						 work);
+	struct ipu_scale_dev *ipu_scaler = ctx->ipu_scaler;
+	struct vb2_buffer *src_buf, *dst_buf;
+	struct ipu_scale_q_data *q_data;
+	struct v4l2_pix_format *pix;
+	struct ipu_image in, out;
+	int err = -ETIMEDOUT;
+	unsigned long flags;
+
+	/*
+	 * If streamoff dequeued all buffers before we could get the lock,
+	 * just bail out immediately.
+	 */
+	if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) ||
+		!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+		WARN_ON(1);
+		schedule_work(&ctx->skip_run);
+		return;
+	}
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+	q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	pix = &q_data->cur_fmt;
+
+	in.pix.width = pix->width;
+	in.pix.height = pix->height;
+	in.pix.bytesperline = pix->bytesperline;
+	in.pix.pixelformat = pix->pixelformat;
+	in.rect = q_data->rect;
+	in.phys = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+
+	q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	pix = &q_data->cur_fmt;
+
+	out.pix.width = pix->width;
+	out.pix.height = pix->height;
+	out.pix.bytesperline = pix->bytesperline;
+	out.pix.pixelformat = pix->pixelformat;
+	out.rect = q_data->rect;
+	out.phys = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+
+	ipu_image_convert(ipu_scaler->ipu, &in, &out, ipu_complete, ipu_scaler);
+
+	if (!wait_for_completion_timeout(&ctx->completion,
+					 msecs_to_jiffies(300))) {
+		dev_err(ipu_scaler->dev,
+			"Timeout waiting for scaling result\n");
+		err = -ETIMEDOUT;
+	} else {
+		err = ctx->error;
+	}
+
+	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+	dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
+	dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
+
+	spin_lock_irqsave(&ipu_scaler->irqlock, flags);
+	v4l2_m2m_buf_done(src_buf, err ? VB2_BUF_STATE_ERROR :
+					 VB2_BUF_STATE_DONE);
+	v4l2_m2m_buf_done(dst_buf, err ? VB2_BUF_STATE_ERROR :
+					 VB2_BUF_STATE_DONE);
+	spin_unlock_irqrestore(&ipu_scaler->irqlock, flags);
+
+	v4l2_m2m_job_finish(ipu_scaler->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
+	strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+	strncpy(cap->bus_info, "platform:" MEM2MEM_NAME,
+		sizeof(cap->bus_info) - 1);
+	/*
+	 * This is only a mem-to-mem video device. The capture and output
+	 * device capability flags are left for backward compatibility and
+	 * are scheduled for removal.
+	 */
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+			   V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+	return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return ipu_enum_fmt(file, priv, f);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return ipu_enum_fmt(file, priv, f);
+}
+
+static int vidioc_g_fmt(struct ipu_scale_ctx *ctx, struct v4l2_format *f)
+{
+	struct vb2_queue *vq;
+	struct ipu_scale_q_data *q_data;
+	struct v4l2_pix_format *pix;
+
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(ctx, f->type);
+	pix = &q_data->cur_fmt;
+
+	return ipu_g_fmt(f, pix);
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	int ret;
+
+	ret = ipu_try_fmt(file, priv, f);
+
+	/*
+	 * Leave enough output space for worst-case overhead caused by 8 pixel
+	 * burst size: 7 RGBA pixels.
+	 */
+	f->fmt.pix.sizeimage += 7 * 4;
+
+	return ret;
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	f->fmt.pix.width &= ~0x7;
+	return ipu_try_fmt(file, priv, f);
+}
+
+static int vidioc_s_fmt(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct ipu_scale_q_data *q_data;
+	struct vb2_queue *vq;
+	struct ipu_scale_ctx *ctx = fh_to_ctx(priv);
+	int ret;
+
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(ctx, f->type);
+	if (!q_data)
+		return -EINVAL;
+
+	if (vb2_is_busy(vq)) {
+		v4l2_err(&ctx->ipu_scaler->v4l2_dev, "%s queue busy\n",
+			 __func__);
+		return -EBUSY;
+	}
+
+	ret = ipu_s_fmt(file, priv, f, &q_data->cur_fmt);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Leave enough output space for worst-case overhead caused by 8 pixel
+	 * burst size: 7 RGBA pixels.
+	 */
+	f->fmt.pix.sizeimage += 7 * 4;
+	q_data->cur_fmt.sizeimage = f->fmt.pix.sizeimage;
+
+	/* Reset cropping/composing rectangle */
+	q_data->rect.left = 0;
+	q_data->rect.top = 0;
+	q_data->rect.width = q_data->cur_fmt.width;
+	q_data->rect.height = q_data->cur_fmt.height;
+
+	return 0;
+}
+
+static int vidioc_g_selection(struct file *file, void *priv,
+			      struct v4l2_selection *s)
+{
+	struct ipu_scale_ctx *ctx = fh_to_ctx(priv);
+	struct ipu_scale_q_data *q_data;
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_CROP:
+		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = q_data->cur_fmt.width;
+		s->r.height = q_data->cur_fmt.height;
+		break;
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = q_data->cur_fmt.width;
+		s->r.height = q_data->cur_fmt.height;
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+		s->r = q_data->rect;
+		break;
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+	case V4L2_SEL_TGT_COMPOSE_PADDED:
+		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = q_data->cur_fmt.width;
+		s->r.height = q_data->cur_fmt.height;
+		break;
+	}
+
+	return 0;
+}
+
+static int vidioc_s_selection(struct file *file, void *priv,
+			      struct v4l2_selection *s)
+{
+	struct ipu_scale_ctx *ctx = fh_to_ctx(priv);
+	struct ipu_scale_q_data *q_data;
+	struct vb2_queue *vq;
+
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, s->type);
+	if (!vq)
+		return -EINVAL;
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_CROP:
+		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* The input's frame width to the IC must be a multiple of 8 pixels
+	 * When performing resizing the frame width must be multiple of burst
+	 * size - 8 or 16 pixels as defined by CB#_BURST_16 parameter.
+	 */
+	if (s->flags & V4L2_SEL_FLAG_GE)
+		s->r.width = round_up(s->r.width, 8);
+	if (s->flags & V4L2_SEL_FLAG_LE)
+		s->r.width = round_down(s->r.width, 8);
+	s->r.width = clamp_t(unsigned int, s->r.width, 8,
+			     round_down(q_data->cur_fmt.width, 8));
+	s->r.height = clamp_t(unsigned int, s->r.height, 1,
+			      q_data->cur_fmt.height);
+	s->r.left = clamp_t(unsigned int, s->r.left, 0,
+			    q_data->cur_fmt.width - s->r.width);
+	s->r.top = clamp_t(unsigned int, s->r.top, 0,
+			   q_data->cur_fmt.height - s->r.height);
+
+	/* V4L2_SEL_FLAG_KEEP_CONFIG is only valid for subdevices */
+	q_data->rect = s->r;
+
+	return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *fh,
+				  struct v4l2_frmsizeenum *fsize)
+{
+	return ipu_enum_framesizes(file, fh, fsize);
+}
+
+static const struct v4l2_ioctl_ops ipu_scale_ioctl_ops = {
+	.vidioc_querycap	= vidioc_querycap,
+
+	.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap	= vidioc_g_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap	= vidioc_try_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap	= vidioc_s_fmt,
+
+	.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+	.vidioc_g_fmt_vid_out	= vidioc_g_fmt_vid_out,
+	.vidioc_try_fmt_vid_out	= vidioc_try_fmt_vid_out,
+	.vidioc_s_fmt_vid_out	= vidioc_s_fmt,
+
+	.vidioc_g_selection	= vidioc_g_selection,
+	.vidioc_s_selection	= vidioc_s_selection,
+
+	.vidioc_reqbufs		= v4l2_m2m_ioctl_reqbufs,
+	.vidioc_querybuf	= v4l2_m2m_ioctl_querybuf,
+
+	.vidioc_qbuf		= v4l2_m2m_ioctl_qbuf,
+	.vidioc_expbuf		= v4l2_m2m_ioctl_expbuf,
+	.vidioc_dqbuf		= v4l2_m2m_ioctl_dqbuf,
+	.vidioc_create_bufs	= v4l2_m2m_ioctl_create_bufs,
+
+	.vidioc_streamon	= v4l2_m2m_ioctl_streamon,
+	.vidioc_streamoff	= v4l2_m2m_ioctl_streamoff,
+
+	.vidioc_enum_framesizes = vidioc_enum_framesizes,
+};
+
+static void ipu_scale_skip_run(struct work_struct *work)
+{
+	struct ipu_scale_ctx *ctx = container_of(work, struct ipu_scale_ctx,
+						 skip_run);
+
+	v4l2_m2m_job_finish(ctx->ipu_scaler->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+
+/*
+ * Queue operations
+ */
+
+static int ipu_scale_queue_setup(struct vb2_queue *vq,
+		const struct v4l2_format *fmt,
+		unsigned int *nbuffers,
+		unsigned int *nplanes, unsigned int sizes[],
+		void *alloc_ctxs[])
+{
+	struct ipu_scale_ctx *ctx = vb2_get_drv_priv(vq);
+	struct ipu_scale_q_data *q_data;
+	unsigned int size, count = *nbuffers;
+	struct v4l2_pix_format *pix;
+
+	q_data = get_q_data(ctx, vq->type);
+	pix = &q_data->cur_fmt;
+
+	size = pix->sizeimage;
+
+	while (size * count > MEM2MEM_VID_MEM_LIMIT)
+		(count)--;
+
+	*nplanes = 1;
+	*nbuffers = count;
+	sizes[0] = size;
+
+	ctx->alloc_ctx = vb2_dma_contig_init_ctx(ctx->ipu_scaler->dev);
+	if (IS_ERR(ctx->alloc_ctx))
+		return PTR_ERR(ctx->alloc_ctx);
+
+	alloc_ctxs[0] = ctx->alloc_ctx;
+
+	dev_dbg(ctx->ipu_scaler->dev, "get %d buffer(s) of size %d each.\n",
+		count, size);
+
+	return 0;
+}
+
+static int ipu_scale_buf_prepare(struct vb2_buffer *vb)
+{
+	struct ipu_scale_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct ipu_scale_q_data *q_data;
+	struct v4l2_pix_format *pix;
+
+	dev_dbg(ctx->ipu_scaler->dev, "type: %d\n", vb->vb2_queue->type);
+
+	q_data = get_q_data(ctx, vb->vb2_queue->type);
+	pix = &q_data->cur_fmt;
+
+	if (vb2_plane_size(vb, 0) < pix->sizeimage) {
+		dev_dbg(ctx->ipu_scaler->dev,
+				"%s data will not fit into plane (%lu < %lu)\n",
+				__func__, vb2_plane_size(vb, 0),
+				(long)pix->sizeimage);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(vb, 0, pix->sizeimage);
+
+	return 0;
+}
+
+static void ipu_scale_buf_queue(struct vb2_buffer *vb)
+{
+	struct ipu_scale_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+}
+
+static int ipu_scale_stop_streaming(struct vb2_queue *q)
+{
+	struct ipu_scale_ctx *ctx = vb2_get_drv_priv(q);
+	struct vb2_buffer *buf;
+
+	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+			v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+	} else {
+		while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+			v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+	}
+
+	return 0;
+}
+
+static struct vb2_ops ipu_scale_qops = {
+	.queue_setup	= ipu_scale_queue_setup,
+	.buf_prepare	= ipu_scale_buf_prepare,
+	.buf_queue	= ipu_scale_buf_queue,
+	.wait_prepare	= vb2_ops_wait_prepare,
+	.wait_finish	= vb2_ops_wait_finish,
+	.stop_streaming = ipu_scale_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+		      struct vb2_queue *dst_vq)
+{
+	struct ipu_scale_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+	src_vq->drv_priv = ctx;
+	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	src_vq->ops = &ipu_scale_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->lock = &ctx->ipu_scaler->dev_mutex;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+	dst_vq->drv_priv = ctx;
+	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	dst_vq->ops = &ipu_scale_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->lock = &ctx->ipu_scaler->dev_mutex;
+
+	return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int ipu_scale_open(struct file *file)
+{
+	struct ipu_scale_dev *ipu_scaler = video_drvdata(file);
+	struct ipu_scale_ctx *ctx = NULL;
+	const int width = 720;
+	const int height = 576;
+	int i;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	INIT_WORK(&ctx->skip_run, ipu_scale_skip_run);
+	INIT_WORK(&ctx->work, ipu_scaler_work);
+	init_completion(&ctx->completion);
+	v4l2_fh_init(&ctx->fh, video_devdata(file));
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+	ctx->ipu_scaler = ipu_scaler;
+
+	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(ipu_scaler->m2m_dev, ctx,
+					    &queue_init);
+	if (IS_ERR(ctx->fh.m2m_ctx)) {
+		int ret = PTR_ERR(ctx->fh.m2m_ctx);
+
+		kfree(ctx);
+		return ret;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ctx->q_data[i].cur_fmt.width = width;
+		ctx->q_data[i].cur_fmt.height = height;
+		ctx->q_data[i].cur_fmt.bytesperline = width;
+		ctx->q_data[i].cur_fmt.pixelformat = V4L2_PIX_FMT_YUV420;
+		ctx->q_data[i].cur_fmt.sizeimage = width * height * 3 / 2;
+		ctx->q_data[i].cur_fmt.colorspace = V4L2_COLORSPACE_REC709;
+		ctx->q_data[i].rect.left = 0;
+		ctx->q_data[i].rect.top = 0;
+		ctx->q_data[i].rect.width = width;
+		ctx->q_data[i].rect.height = height;
+	}
+
+	atomic_inc(&ipu_scaler->num_inst);
+
+	dev_dbg(ipu_scaler->dev, "Created instance %p, m2m_ctx: %p\n",
+			ctx, ctx->fh.m2m_ctx);
+
+	return 0;
+}
+
+static int ipu_scale_release(struct file *file)
+{
+	struct ipu_scale_dev *ipu_scaler = video_drvdata(file);
+	struct ipu_scale_ctx *ctx = fh_to_ctx(file->private_data);
+
+	dev_dbg(ipu_scaler->dev, "Releasing instance %p\n", ctx);
+
+	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+
+	atomic_dec(&ipu_scaler->num_inst);
+
+	return 0;
+}
+
+static const struct v4l2_file_operations ipu_scale_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ipu_scale_open,
+	.release	= ipu_scale_release,
+	.poll		= v4l2_m2m_fop_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= v4l2_m2m_fop_mmap,
+};
+
+static struct video_device ipu_scale_videodev = {
+	.name		= MEM2MEM_NAME,
+	.fops		= &ipu_scale_fops,
+	.ioctl_ops	= &ipu_scale_ioctl_ops,
+	.minor		= -1,
+	.release	= video_device_release,
+	.vfl_dir	= VFL_DIR_M2M,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+	.device_run	= device_run,
+	.job_ready	= job_ready,
+	.job_abort	= job_abort,
+};
+
+static u64 vout_dmamask = ~(u32)0;
+
+static int ipu_scale_probe(struct platform_device *pdev)
+{
+	struct ipu_scale_dev *ipu_scaler;
+	struct video_device *vfd;
+	struct ipu_soc *ipu = dev_get_drvdata(pdev->dev.parent);
+	int ret;
+
+	pdev->dev.dma_mask = &vout_dmamask;
+	pdev->dev.coherent_dma_mask = 0xffffffff;
+
+	ipu_scaler = devm_kzalloc(&pdev->dev, sizeof(*ipu_scaler), GFP_KERNEL);
+	if (!ipu_scaler)
+		return -ENOMEM;
+
+	ipu_scaler->ipu = ipu;
+	ipu_scaler->dev = &pdev->dev;
+
+	spin_lock_init(&ipu_scaler->irqlock);
+	mutex_init(&ipu_scaler->dev_mutex);
+
+	ret = v4l2_device_register(&pdev->dev, &ipu_scaler->v4l2_dev);
+	if (ret)
+		return ret;
+
+	atomic_set(&ipu_scaler->num_inst, 0);
+
+	vfd = video_device_alloc();
+	if (!vfd) {
+		dev_err(ipu_scaler->dev, "Failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto unreg_dev;
+	}
+
+	*vfd = ipu_scale_videodev;
+	vfd->lock = &ipu_scaler->dev_mutex;
+	vfd->v4l2_dev = &ipu_scaler->v4l2_dev;
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+	if (ret) {
+		dev_err(ipu_scaler->dev, "Failed to register video device\n");
+		goto rel_vdev;
+	}
+
+	video_set_drvdata(vfd, ipu_scaler);
+	snprintf(vfd->name, sizeof(vfd->name), "%s", ipu_scale_videodev.name);
+	ipu_scaler->vfd = vfd;
+	dev_dbg(ipu_scaler->dev, "Device registered as /dev/video%d\n",
+		vfd->num);
+
+	platform_set_drvdata(pdev, ipu_scaler);
+
+	ipu_scaler->m2m_dev = v4l2_m2m_init(&m2m_ops);
+	if (IS_ERR(ipu_scaler->m2m_dev)) {
+		dev_err(ipu_scaler->dev, "Failed to init mem2mem device\n");
+		ret = PTR_ERR(ipu_scaler->m2m_dev);
+		goto err_m2m;
+	}
+
+	return 0;
+
+	v4l2_m2m_release(ipu_scaler->m2m_dev);
+err_m2m:
+	video_unregister_device(ipu_scaler->vfd);
+rel_vdev:
+	video_device_release(vfd);
+unreg_dev:
+	v4l2_device_unregister(&ipu_scaler->v4l2_dev);
+
+	return ret;
+}
+
+static int ipu_scale_remove(struct platform_device *pdev)
+{
+	struct ipu_scale_dev *ipu_scaler =
+		(struct ipu_scale_dev *)platform_get_drvdata(pdev);
+
+	v4l2_m2m_release(ipu_scaler->m2m_dev);
+	video_unregister_device(ipu_scaler->vfd);
+	v4l2_device_unregister(&ipu_scaler->v4l2_dev);
+	kfree(ipu_scaler);
+
+	return 0;
+}
+
+static const struct platform_device_id ipu_scale_id[] = {
+	{ "imx-ipuv3-scaler" },
+	{}
+};
+MODULE_DEVICE_TABLE(platform, ipu_scale_id);
+
+static struct platform_driver ipu_scale_pdrv = {
+	.probe		= ipu_scale_probe,
+	.remove		= ipu_scale_remove,
+	.driver		= {
+		.name	= "imx-ipuv3-scaler",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static void __exit ipu_scale_exit(void)
+{
+	platform_driver_unregister(&ipu_scale_pdrv);
+}
+
+static int __init ipu_scale_init(void)
+{
+	return  platform_driver_register(&ipu_scale_pdrv);
+}
+
+module_init(ipu_scale_init);
+module_exit(ipu_scale_exit);
+
+MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/imx/imx-ipu.c b/drivers/media/platform/imx/imx-ipu.c
index c1b8637..402cc8b 100644
--- a/drivers/media/platform/imx/imx-ipu.c
+++ b/drivers/media/platform/imx/imx-ipu.c
@@ -102,7 +102,7 @@ struct ipu_fmt *ipu_find_fmt_rgb(unsigned int pixelformat)
 }
 EXPORT_SYMBOL_GPL(ipu_find_fmt_rgb);
 
-static struct ipu_fmt *ipu_find_fmt(unsigned long pixelformat)
+struct ipu_fmt *ipu_find_fmt(unsigned long pixelformat)
 {
 	struct ipu_fmt *fmt;
 
diff --git a/drivers/media/platform/imx/imx-ipu.h b/drivers/media/platform/imx/imx-ipu.h
index 51c0982..288ee79 100644
--- a/drivers/media/platform/imx/imx-ipu.h
+++ b/drivers/media/platform/imx/imx-ipu.h
@@ -16,6 +16,7 @@ int ipu_enum_fmt_yuv(struct file *file, void *fh,
 		struct v4l2_fmtdesc *f);
 struct ipu_fmt *ipu_find_fmt_rgb(unsigned int pixelformat);
 struct ipu_fmt *ipu_find_fmt_yuv(unsigned int pixelformat);
+struct ipu_fmt *ipu_find_fmt(unsigned long pixelformat);
 int ipu_try_fmt(struct file *file, void *fh,
 		struct v4l2_format *f);
 int ipu_try_fmt_rgb(struct file *file, void *fh,
-- 
2.0.0.rc2


  parent reply	other threads:[~2014-06-12 17:06 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-06-12 17:06 [RFC PATCH 00/26] i.MX5/6 IPUv3 CSI/IC Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 01/26] gpu: ipu-v3: Add IC support Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 02/26] gpu: ipu-v3: Register IC with IPUv3 Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 03/26] gpu: ipu-v3: Add function to setup CP channel as interlaced Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 04/26] gpu: ipu-v3: Add ipu_cpmem_get_buffer function Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 05/26] gpu: ipu-v3: Add support for partial interleaved YCbCr 4:2:0 (NV12) format Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 06/26] gpu: ipu-v3: Add support for planar YUV 4:2:2 (YUV422P) format Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 07/26] imx-drm: currently only IPUv3 is supported, make it mandatory Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 08/26] [media] imx-ipu: add ipu media common code Philipp Zabel
2014-06-12 17:06 ` Philipp Zabel [this message]
2014-06-12 17:06 ` [RFC PATCH 10/26] [media] imx-ipu: Add i.MX IPUv3 deinterlacer driver Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 11/26] [media] v4l2: subdev: Add v4l2_device_register_subdev_node function Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 12/26] [media] v4l2: Fix V4L2_CID_PIXEL_RATE Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 13/26] [media] v4l2 async: remove from notifier list Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 14/26] [media] Add i.MX SoC wide media device driver Philipp Zabel
2014-06-24 10:04   ` Dave Müller
2014-06-24 14:05   ` Dave Müller
2014-06-12 17:06 ` [RFC PATCH 15/26] [media] imx-ipu: Add i.MX IPUv3 capture driver Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 16/26] [media] ipuv3-csi: Skip 3 lines for NTSC BT.656 Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 17/26] [media] ipuv3-csi: Pass ipucsi to v4l2_media_subdev_s_power Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 18/26] [media] ipuv3-csi: make subdev controls available on video device Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 19/26] [media] imx-ipuv3-csi: Add support for temporarily stopping the stream on sync loss Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 20/26] [media] imx-ipuv3-csi: Export sync lock event to userspace Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 21/26] [media] v4l2-subdev.h: Add lock status notification Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 22/26] [media] v4l2-subdev: Export v4l2_subdev_fops Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 23/26] mfd: syscon: add child device support Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 24/26] [media] imx: Add video switch Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 25/26] ARM: dts: Add IPU aliases on i.MX6 Philipp Zabel
2014-06-12 17:06 ` [RFC PATCH 26/26] ARM: dts: imx6qdl: Add mipi_ipu1/2 multiplexers, mipi_csi, and their connections Philipp Zabel
2014-08-05  6:52 ` [RFC PATCH 00/26] i.MX5/6 IPUv3 CSI/IC Zahari Doychev
2015-10-27 13:10 ` Fabio Estevam
2015-12-01 12:08   ` Fabio Estevam
2015-12-14 15:07     ` Tim Harvey

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1402592800-2925-10-git-send-email-p.zabel@pengutronix.de \
    --to=p.zabel@pengutronix.de \
    --cc=linux-media@vger.kernel.org \
    --cc=m.olbrich@pengutronix.de \
    --cc=s.hauer@pengutronix.de \
    --cc=steve_longerbeam@mentor.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).