linux-media.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Niklas Söderlund" <niklas.soderlund+renesas@ragnatech.se>
To: hverkuil@xs4all.nl, linux-media@vger.kernel.org
Cc: "Niklas Söderlund" <niklas.soderlund+renesas@ragnatech.se>
Subject: [v4l-utils PATCHv2] libv4lconvert: Add support for V4L2_PIX_FMT_{NV16,NV61}
Date: Sat, 12 Mar 2016 01:45:05 +0100	[thread overview]
Message-ID: <1457743505-7161-1-git-send-email-niklas.soderlund+renesas@ragnatech.se> (raw)
In-Reply-To: <56E332A8.2080004@xs4all.nl>

NV16 and NV61 are two-plane versions of the YUV 4:2:2 formats YUYV and
YVYU. Support both formats by merging the two planes into a one and
falling through to the V4L2_PIX_FMT_{YUYV,YVYU} code path.

Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
---

I'm sorry this is a bit of a hack. The support for NV16 are scarce and
this allowed me use it in qv4l2 so I thought it might help someone else.
I'm not to sure about the entry in supported_src_pixfmts[] is it correct
to set 'needs conversion' for my use case?

Changes since v1
- Add NV61 support
- Fixed s/YUVU/YUYV/g in commit message


 lib/libv4lconvert/libv4lconvert-priv.h |  3 +++
 lib/libv4lconvert/libv4lconvert.c      | 31 +++++++++++++++++++++++++++++++
 lib/libv4lconvert/rgbyuv.c             | 15 +++++++++++++++
 3 files changed, 49 insertions(+)

diff --git a/lib/libv4lconvert/libv4lconvert-priv.h b/lib/libv4lconvert/libv4lconvert-priv.h
index b77e3d3..1740efc 100644
--- a/lib/libv4lconvert/libv4lconvert-priv.h
+++ b/lib/libv4lconvert/libv4lconvert-priv.h
@@ -129,6 +129,9 @@ void v4lconvert_yuyv_to_bgr24(const unsigned char *src, unsigned char *dst,
 void v4lconvert_yuyv_to_yuv420(const unsigned char *src, unsigned char *dst,
 		int width, int height, int stride, int yvu);

+void v4lconvert_nv16_to_yuyv(const unsigned char *src, unsigned char *dest,
+		int width, int height);
+
 void v4lconvert_yvyu_to_rgb24(const unsigned char *src, unsigned char *dst,
 		int width, int height, int stride);

diff --git a/lib/libv4lconvert/libv4lconvert.c b/lib/libv4lconvert/libv4lconvert.c
index f62aea1..d3d8936 100644
--- a/lib/libv4lconvert/libv4lconvert.c
+++ b/lib/libv4lconvert/libv4lconvert.c
@@ -98,6 +98,8 @@ static const struct v4lconvert_pixfmt supported_src_pixfmts[] = {
 	{ V4L2_PIX_FMT_YUYV,		16,	 5,	 4,	0 },
 	{ V4L2_PIX_FMT_YVYU,		16,	 5,	 4,	0 },
 	{ V4L2_PIX_FMT_UYVY,		16,	 5,	 4,	0 },
+	{ V4L2_PIX_FMT_NV16,		16,	 5,	 4,	1 },
+	{ V4L2_PIX_FMT_NV61,		16,	 5,	 4,	1 },
 	/* yuv 4:2:0 formats */
 	{ V4L2_PIX_FMT_SPCA501,		12,      6,	 3,	1 },
 	{ V4L2_PIX_FMT_SPCA505,		12,	 6,	 3,	1 },
@@ -1229,6 +1231,20 @@ static int v4lconvert_convert_pixfmt(struct v4lconvert_data *data,
 		}
 		break;

+	case V4L2_PIX_FMT_NV16: {
+		unsigned char *tmpbuf;
+
+		tmpbuf = v4lconvert_alloc_buffer(width * height * 2,
+				&data->convert_pixfmt_buf, &data->convert_pixfmt_buf_size);
+		if (!tmpbuf)
+			return v4lconvert_oom_error(data);
+
+		v4lconvert_nv16_to_yuyv(src, tmpbuf, width, height);
+		src_pix_fmt = V4L2_PIX_FMT_YUYV;
+		src = tmpbuf;
+		bytesperline = bytesperline * 2;
+		/* fall through */
+	}
 	case V4L2_PIX_FMT_YUYV:
 		if (src_size < (width * height * 2)) {
 			V4LCONVERT_ERR("short yuyv data frame\n");
@@ -1251,6 +1267,21 @@ static int v4lconvert_convert_pixfmt(struct v4lconvert_data *data,
 		}
 		break;

+	case V4L2_PIX_FMT_NV61: {
+		unsigned char *tmpbuf;
+
+		tmpbuf = v4lconvert_alloc_buffer(width * height * 2,
+				&data->convert_pixfmt_buf, &data->convert_pixfmt_buf_size);
+		if (!tmpbuf)
+			return v4lconvert_oom_error(data);
+
+		/* Note NV61 is NV16 with U and V swapped so this becomes yvyu. */
+		v4lconvert_nv16_to_yuyv(src, tmpbuf, width, height);
+		src_pix_fmt = V4L2_PIX_FMT_YVYU;
+		src = tmpbuf;
+		bytesperline = bytesperline * 2;
+		/* fall through */
+	}
 	case V4L2_PIX_FMT_YVYU:
 		if (src_size < (width * height * 2)) {
 			V4LCONVERT_ERR("short yvyu data frame\n");
diff --git a/lib/libv4lconvert/rgbyuv.c b/lib/libv4lconvert/rgbyuv.c
index 695255a..a0f8256 100644
--- a/lib/libv4lconvert/rgbyuv.c
+++ b/lib/libv4lconvert/rgbyuv.c
@@ -295,6 +295,21 @@ void v4lconvert_yuyv_to_yuv420(const unsigned char *src, unsigned char *dest,
 	}
 }

+void v4lconvert_nv16_to_yuyv(const unsigned char *src, unsigned char *dest,
+		int width, int height)
+{
+	const unsigned char *y, *cbcr;
+	int count = 0;
+
+	y = src;
+	cbcr = src + width*height;
+
+	while (count++ < width*height) {
+		*dest++ = *y++;
+		*dest++ = *cbcr++;
+	}
+}
+
 void v4lconvert_yvyu_to_bgr24(const unsigned char *src, unsigned char *dest,
 		int width, int height, int stride)
 {
--
2.7.2


  reply	other threads:[~2016-03-12  0:46 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-11 20:40 [v4l-utils PATCH] libv4lconvert: Add support for V4L2_PIX_FMT_NV16 Niklas Söderlund
2016-03-11 21:03 ` Hans Verkuil
2016-03-12  0:45   ` Niklas Söderlund [this message]
2016-03-12 10:50     ` [v4l-utils PATCHv2] libv4lconvert: Add support for V4L2_PIX_FMT_{NV16,NV61} Hans de Goede

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1457743505-7161-1-git-send-email-niklas.soderlund+renesas@ragnatech.se \
    --to=niklas.soderlund+renesas@ragnatech.se \
    --cc=hverkuil@xs4all.nl \
    --cc=linux-media@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).