linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: wangtao <tao.wangtao@honor.com>
To: <sumit.semwal@linaro.org>, <christian.koenig@amd.com>,
	<kraxel@redhat.com>, <vivek.kasireddy@intel.com>,
	<viro@zeniv.linux.org.uk>, <brauner@kernel.org>,
	<hughd@google.com>, <akpm@linux-foundation.org>,
	<amir73il@gmail.com>
Cc: <benjamin.gaignard@collabora.com>, <Brian.Starkey@arm.com>,
	<jstultz@google.com>, <tjmercier@google.com>, <jack@suse.cz>,
	<baolin.wang@linux.alibaba.com>, <linux-media@vger.kernel.org>,
	<dri-devel@lists.freedesktop.org>,
	<linaro-mm-sig@lists.linaro.org>, <linux-kernel@vger.kernel.org>,
	<linux-fsdevel@vger.kernel.org>, <linux-mm@kvack.org>,
	<bintian.wang@honor.com>, <yipengxiang@honor.com>,
	<liulu.liu@honor.com>, <feng.han@honor.com>,
	wangtao <tao.wangtao@honor.com>
Subject: [PATCH v4 3/4] udmabuf: Implement udmabuf direct I/O
Date: Tue, 3 Jun 2025 17:52:44 +0800	[thread overview]
Message-ID: <20250603095245.17478-4-tao.wangtao@honor.com> (raw)
In-Reply-To: <20250603095245.17478-1-tao.wangtao@honor.com>

Construct bio_vec from folios, then call the other file's
r/w callbacks for IO operations.
Test data shows direct I/O copy_file_range improves performance by
over 50% vs direct I/O mmap&read (2557 vs 1534).

Test data:
|    32x32MB Read 1024MB  |Creat-ms|Close-ms|  I/O-ms|I/O-MB/s| I/O%
|-------------------------|--------|--------|--------|--------|-----
| 1)Beg udmabuf buffer R/W|    580 |    323 |   1238 |    867 | 100%
| 2)     dmabuf buffer R/W|     48 |      5 |   1149 |    934 | 107%
| 3) udma+memfd buffer R/W|    597 |    340 |   2157 |    497 |  57%
| 4) udma+memfd direct R/W|    573 |    340 |    700 |   1534 | 176%
| 5) u+mfd buffer sendfile|    577 |    340 |   1204 |    891 | 102%
| 6) u+mfd direct sendfile|    567 |    339 |   2272 |    472 |  54%
| 7)   u+mfd buffer splice|    570 |    337 |   1114 |    964 | 111%
| 8)   u+mfd direct splice|    564 |    335 |    793 |   1355 | 156%
| 9)  udmabuf buffer c_f_r|    577 |    323 |   1059 |   1014 | 116%
|10)  udmabuf direct c_f_r|    582 |    325 |    420 |   2557 | 294%
|11)End udmabuf buffer R/W|    586 |    323 |   1188 |    903 | 104%

Signed-off-by: wangtao <tao.wangtao@honor.com>
---
 drivers/dma-buf/udmabuf.c | 54 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index e74e36a8ecda..511567b15340 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -15,6 +15,8 @@
 #include <linux/udmabuf.h>
 #include <linux/vmalloc.h>
 #include <linux/iosys-map.h>
+#include <linux/bvec.h>
+#include <linux/uio.h>
 
 static int list_limit = 1024;
 module_param(list_limit, int, 0644);
@@ -284,6 +286,55 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
 	return 0;
 }
 
+static ssize_t udmabuf_rw_file(struct dma_buf *dmabuf, loff_t my_pos,
+			struct file *other, loff_t pos,
+			size_t count, bool is_write)
+{
+	struct udmabuf *ubuf = dmabuf->priv;
+	loff_t my_end = my_pos + count, bv_beg, bv_end = 0;
+	size_t i, bv_off, bv_len, bv_idx = 0;
+	struct bio_vec *bvec;
+	struct kiocb kiocb;
+	struct iov_iter iter;
+	unsigned int direction = is_write ? ITER_SOURCE : ITER_DEST;
+	ssize_t ret = 0;
+	struct folio *folio;
+
+	bvec = kvcalloc(ubuf->nr_pinned, sizeof(*bvec), GFP_KERNEL);
+	if (!bvec)
+		return -ENOMEM;
+
+	init_sync_kiocb(&kiocb, other);
+	kiocb.ki_pos = pos;
+
+	for (i = 0; i < ubuf->nr_pinned; i++) {
+		folio = ubuf->pinned_folios[i];
+		bv_beg = bv_end;
+		if (bv_beg >= my_end)
+			break;
+		bv_end += folio_size(folio);
+		if (bv_end <= my_pos)
+			continue;
+
+		bv_len = min(bv_end, my_end) - max(my_pos, bv_beg);
+		bv_off = my_pos > bv_beg ? my_pos - bv_beg : 0;
+		bvec_set_page(&bvec[bv_idx], &folio->page, bv_len, bv_off);
+		++bv_idx;
+	}
+
+	if (bv_idx > 0) {
+		/* start R/W. */
+		iov_iter_bvec(&iter, direction, bvec, bv_idx, count);
+		if (is_write)
+			ret = other->f_op->write_iter(&kiocb, &iter);
+		else
+			ret = other->f_op->read_iter(&kiocb, &iter);
+	}
+	kvfree(bvec);
+
+	return ret;
+}
+
 static const struct dma_buf_ops udmabuf_ops = {
 	.cache_sgt_mapping = true,
 	.map_dma_buf	   = map_udmabuf,
@@ -294,6 +345,7 @@ static const struct dma_buf_ops udmabuf_ops = {
 	.vunmap		   = vunmap_udmabuf,
 	.begin_cpu_access  = begin_cpu_udmabuf,
 	.end_cpu_access    = end_cpu_udmabuf,
+	.rw_file = udmabuf_rw_file,
 };
 
 #define SEALS_WANTED (F_SEAL_SHRINK)
@@ -455,6 +507,8 @@ static long udmabuf_create(struct miscdevice *device,
 		ret = PTR_ERR(dmabuf);
 		goto err;
 	}
+	/* Support direct I/O */
+	dmabuf->file->f_mode |= FMODE_CAN_ODIRECT;
 	/*
 	 * Ownership of ubuf is held by the dmabuf from here.
 	 * If the following dma_buf_fd() fails, dma_buf_put() cleans up both the
-- 
2.17.1



  parent reply	other threads:[~2025-06-03  9:54 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-03  9:52 [PATCH v4 0/4] Implement dmabuf direct I/O via copy_file_range wangtao
2025-06-03  9:52 ` [PATCH v4 1/4] fs: allow cross-FS copy_file_range for memory file with direct I/O wangtao
2025-06-03 10:56   ` Amir Goldstein
2025-06-03 12:38     ` wangtao
2025-06-03 12:43       ` Amir Goldstein
2025-06-03  9:52 ` [PATCH v4 2/4] dmabuf: Implement copy_file_range callback for dmabuf direct I/O prep wangtao
2025-06-03 10:42   ` Christian König
2025-06-03 12:26     ` wangtao
2025-06-03 13:04   ` Christoph Hellwig
2025-06-03  9:52 ` wangtao [this message]
2025-06-03  9:52 ` [PATCH v4 4/4] dmabuf:system_heap Implement system_heap dmabuf direct I/O wangtao
2025-06-03 13:00 ` [PATCH v4 0/4] Implement dmabuf direct I/O via copy_file_range Christoph Hellwig
2025-06-03 13:14   ` Christian König
2025-06-03 13:19     ` Christoph Hellwig
2025-06-03 14:18       ` Christian König
2025-06-03 14:28         ` Christoph Hellwig
2025-06-03 15:55           ` Christian König
2025-06-03 16:01             ` Christoph Hellwig
2025-06-06  9:59               ` wangtao
2025-06-06  9:52       ` wangtao
2025-06-06 11:20         ` Christian König
2025-06-09  4:35           ` Christoph Hellwig
2025-06-09  9:32             ` wangtao
2025-06-10 10:52               ` Christian König
2025-06-10 13:37                 ` Christoph Hellwig
2025-06-13  9:43                   ` wangtao
2025-06-16  5:24                     ` Christoph Hellwig
2025-06-10 13:34               ` Christoph Hellwig
2025-06-13  9:33                 ` wangtao
2025-06-16  5:25                   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250603095245.17478-4-tao.wangtao@honor.com \
    --to=tao.wangtao@honor.com \
    --cc=Brian.Starkey@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=amir73il@gmail.com \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=benjamin.gaignard@collabora.com \
    --cc=bintian.wang@honor.com \
    --cc=brauner@kernel.org \
    --cc=christian.koenig@amd.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=feng.han@honor.com \
    --cc=hughd@google.com \
    --cc=jack@suse.cz \
    --cc=jstultz@google.com \
    --cc=kraxel@redhat.com \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=liulu.liu@honor.com \
    --cc=sumit.semwal@linaro.org \
    --cc=tjmercier@google.com \
    --cc=viro@zeniv.linux.org.uk \
    --cc=vivek.kasireddy@intel.com \
    --cc=yipengxiang@honor.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).