linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: Matthew Wilcox <willy@infradead.org>,
	"David S. Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>
Cc: David Howells <dhowells@redhat.com>,
	Al Viro <viro@zeniv.linux.org.uk>,
	Christoph Hellwig <hch@infradead.org>,
	Jens Axboe <axboe@kernel.dk>, Jeff Layton <jlayton@kernel.org>,
	Christian Brauner <brauner@kernel.org>,
	Chuck Lever III <chuck.lever@oracle.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	netdev@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Ilya Dryomov <idryomov@gmail.com>, Xiubo Li <xiubli@redhat.com>,
	ceph-devel@vger.kernel.org
Subject: [RFC PATCH v2 31/48] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage
Date: Wed, 29 Mar 2023 15:13:37 +0100	[thread overview]
Message-ID: <20230329141354.516864-32-dhowells@redhat.com> (raw)
In-Reply-To: <20230329141354.516864-1-dhowells@redhat.com>

Use sendmsg() and MSG_SPLICE_PAGES rather than sendpage in ceph when
transmitting data.  For the moment, this can only transmit one page at a
time because of the architecture of net/ceph/, but if
write_partial_message_data() can be given a bvec[] at a time by the
iteration code, this would allow pages to be sent in a batch.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Xiubo Li <xiubli@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: "David S. Miller" <davem@davemloft.net>
cc: Eric Dumazet <edumazet@google.com>
cc: Jakub Kicinski <kuba@kernel.org>
cc: Paolo Abeni <pabeni@redhat.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: Matthew Wilcox <willy@infradead.org>
cc: ceph-devel@vger.kernel.org
cc: netdev@vger.kernel.org
---
 net/ceph/messenger_v1.c | 58 ++++++++++++++---------------------------
 1 file changed, 19 insertions(+), 39 deletions(-)

diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
index d664cb1593a7..b2d801a49122 100644
--- a/net/ceph/messenger_v1.c
+++ b/net/ceph/messenger_v1.c
@@ -74,37 +74,6 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
 	return r;
 }
 
-/*
- * @more: either or both of MSG_MORE and MSG_SENDPAGE_NOTLAST
- */
-static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
-			     int offset, size_t size, int more)
-{
-	ssize_t (*sendpage)(struct socket *sock, struct page *page,
-			    int offset, size_t size, int flags);
-	int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
-	int ret;
-
-	/*
-	 * sendpage cannot properly handle pages with page_count == 0,
-	 * we need to fall back to sendmsg if that's the case.
-	 *
-	 * Same goes for slab pages: skb_can_coalesce() allows
-	 * coalescing neighboring slab objects into a single frag which
-	 * triggers one of hardened usercopy checks.
-	 */
-	if (sendpage_ok(page))
-		sendpage = sock->ops->sendpage;
-	else
-		sendpage = sock_no_sendpage;
-
-	ret = sendpage(sock, page, offset, size, flags);
-	if (ret == -EAGAIN)
-		ret = 0;
-
-	return ret;
-}
-
 static void con_out_kvec_reset(struct ceph_connection *con)
 {
 	BUG_ON(con->v1.out_skip);
@@ -464,7 +433,6 @@ static int write_partial_message_data(struct ceph_connection *con)
 	struct ceph_msg *msg = con->out_msg;
 	struct ceph_msg_data_cursor *cursor = &msg->cursor;
 	bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
-	int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
 	u32 crc;
 
 	dout("%s %p msg %p\n", __func__, con, msg);
@@ -482,6 +450,10 @@ static int write_partial_message_data(struct ceph_connection *con)
 	 */
 	crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
 	while (cursor->total_resid) {
+		struct bio_vec bvec;
+		struct msghdr msghdr = {
+			.msg_flags = MSG_SPLICE_PAGES | MSG_SENDPAGE_NOTLAST,
+		};
 		struct page *page;
 		size_t page_offset;
 		size_t length;
@@ -494,9 +466,12 @@ static int write_partial_message_data(struct ceph_connection *con)
 
 		page = ceph_msg_data_next(cursor, &page_offset, &length);
 		if (length == cursor->total_resid)
-			more = MSG_MORE;
-		ret = ceph_tcp_sendpage(con->sock, page, page_offset, length,
-					more);
+			msghdr.msg_flags |= MSG_MORE;
+
+		bvec_set_page(&bvec, page, length, page_offset);
+		iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, length);
+
+		ret = sock_sendmsg(con->sock, &msghdr);
 		if (ret <= 0) {
 			if (do_datacrc)
 				msg->footer.data_crc = cpu_to_le32(crc);
@@ -526,7 +501,10 @@ static int write_partial_message_data(struct ceph_connection *con)
  */
 static int write_partial_skip(struct ceph_connection *con)
 {
-	int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
+	struct bio_vec bvec;
+	struct msghdr msghdr = {
+		.msg_flags = MSG_SPLICE_PAGES | MSG_SENDPAGE_NOTLAST | MSG_MORE,
+	};
 	int ret;
 
 	dout("%s %p %d left\n", __func__, con, con->v1.out_skip);
@@ -534,9 +512,11 @@ static int write_partial_skip(struct ceph_connection *con)
 		size_t size = min(con->v1.out_skip, (int)PAGE_SIZE);
 
 		if (size == con->v1.out_skip)
-			more = MSG_MORE;
-		ret = ceph_tcp_sendpage(con->sock, ceph_zero_page, 0, size,
-					more);
+			msghdr.msg_flags &= ~MSG_SENDPAGE_NOTLAST;
+		bvec_set_page(&bvec, ZERO_PAGE(0), size, 0);
+		iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size);
+
+		ret = sock_sendmsg(con->sock, &msghdr);
 		if (ret <= 0)
 			goto out;
 		con->v1.out_skip -= ret;



  parent reply	other threads:[~2023-03-29 14:15 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <e128356a-f56f-4c02-7437-dfea38e4194b@suse.de>
2023-03-29 14:13 ` [RFC PATCH v2 00/48] splice, net: Replace sendpage with sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-29 14:13   ` [RFC PATCH v2 01/48] netfs: Fix netfs_extract_iter_to_sg() for ITER_UBUF/IOVEC David Howells
2023-03-29 14:13   ` [RFC PATCH v2 02/48] iov_iter: Remove last_offset member David Howells
2023-03-29 14:13   ` [RFC PATCH v2 03/48] iov_iter: Add an iterator-of-iterators David Howells
2023-03-29 14:13   ` [RFC PATCH v2 04/48] net: Declare MSG_SPLICE_PAGES internal sendmsg() flag David Howells
2023-03-30 14:28     ` Willem de Bruijn
2023-03-30 15:07     ` David Howells
2023-03-30 17:51       ` Willem de Bruijn
2023-03-29 14:13   ` [RFC PATCH v2 05/48] mm: Move the page fragment allocator from page_alloc.c into its own file David Howells
2023-03-29 14:13   ` [RFC PATCH v2 06/48] mm: Make the page_frag_cache allocator use multipage folios David Howells
2023-03-29 14:13   ` [RFC PATCH v2 07/48] mm: Make the page_frag_cache allocator use per-cpu David Howells
2023-03-29 14:13   ` [RFC PATCH v2 08/48] tcp: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 09/48] tcp: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13   ` [RFC PATCH v2 10/48] tcp: Convert do_tcp_sendpages() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 11/48] tcp_bpf: Inline do_tcp_sendpages as it's now a wrapper around tcp_sendmsg David Howells
2023-03-29 14:13   ` [RFC PATCH v2 12/48] espintcp: Inline do_tcp_sendpages() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 13/48] tls: " David Howells
2023-03-29 14:13   ` [RFC PATCH v2 14/48] siw: " David Howells
2023-03-29 14:13   ` [RFC PATCH v2 15/48] tcp: Fold do_tcp_sendpages() into tcp_sendpage_locked() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 16/48] ip, udp: Support MSG_SPLICE_PAGES David Howells
2023-03-30 14:20     ` Willem de Bruijn
2023-03-30 14:39     ` David Howells
2023-03-30 17:46       ` Willem de Bruijn
2023-03-30 15:11     ` David Howells
2023-03-30 17:55       ` Willem de Bruijn
2023-03-30 19:49       ` David Howells
2023-03-29 14:13   ` [RFC PATCH v2 17/48] ip, udp: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13   ` [RFC PATCH v2 18/48] udp: Convert udp_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 19/48] af_unix: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 20/48] af_unix: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13   ` [RFC PATCH v2 21/48] crypto: af_alg: Pin pages rather than ref'ing if appropriate David Howells
2023-03-29 14:13   ` [RFC PATCH v2 22/48] crypto: af_alg: Use netfs_extract_iter_to_sg() to create scatterlists David Howells
2023-03-29 14:13   ` [RFC PATCH v2 23/48] crypto: af_alg: Indent the loop in af_alg_sendmsg() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 24/48] crypto: af_alg: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 25/48] crypto: af_alg: Convert af_alg_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 26/48] crypto: af_alg/hash: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 27/48] splice, net: Use sendmsg(MSG_SPLICE_PAGES) rather than ->sendpage() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 28/48] splice: Reimplement splice_to_socket() to pass multiple bufs to sendmsg() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 29/48] Remove file->f_op->sendpage David Howells
2023-03-29 14:13   ` [RFC PATCH v2 30/48] siw: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage to transmit David Howells
2023-03-29 15:18     ` Bernard Metzler
2023-03-29 15:32     ` David Howells
2023-03-29 14:13   ` David Howells [this message]
2023-03-29 14:13   ` [RFC PATCH v2 32/48] iscsi: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-03-29 14:13   ` [RFC PATCH v2 33/48] iscsi: Assume "sendpage" is okay in iscsi_tcp_segment_map() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 34/48] tcp_bpf: Make tcp_bpf_sendpage() go through tcp_bpf_sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-29 14:13   ` [RFC PATCH v2 35/48] net: Use sendmsg(MSG_SPLICE_PAGES) not sendpage in skb_send_sock() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 36/48] algif: Remove hash_sendpage*() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 37/48] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-03-30  1:45     ` Xiubo Li
2023-03-30  6:48     ` David Howells
2023-03-31 13:05       ` Xiubo Li
2023-04-03  3:27       ` Xiubo Li
2023-04-03  8:32       ` David Howells
2023-04-10  0:38         ` Xiubo Li
2023-03-29 14:13   ` [RFC PATCH v2 38/48] rds: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-03-29 14:13   ` [RFC PATCH v2 39/48] dlm: " David Howells
2023-03-29 14:13   ` [RFC PATCH v2 40/48] sunrpc: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage David Howells
2023-03-29 15:28     ` Chuck Lever III
2023-03-29 19:58     ` David Howells
2023-03-30  9:29     ` David Howells
2023-03-30  9:41     ` David Howells
2023-03-30 13:16       ` Chuck Lever III
2023-03-30 13:01     ` David Howells
2023-03-30 13:16     ` David Howells
2023-03-30 13:27       ` Chuck Lever III
2023-03-30 14:26       ` David Howells
2023-03-30 16:36         ` Chuck Lever III
2023-04-14 14:41           ` Daire Byrne
2023-03-29 14:13   ` [RFC PATCH v2 41/48] sunrpc: Rely on TCP sendmsg + MSG_SPLICE_PAGES to copy unspliceable data David Howells
2023-03-29 14:13   ` [RFC PATCH v2 42/48] nvme: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage David Howells
2023-03-29 14:13   ` [RFC PATCH v2 43/48] kcm: " David Howells
2023-03-29 14:13   ` [RFC PATCH v2 44/48] smc: Drop smc_sendpage() in favour of smc_sendmsg() + MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 45/48] ocfs2: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 46/48] drbd: Use sendmsg(MSG_SPLICE_PAGES) rather than sendmsg() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 47/48] drdb: Send an entire bio in a single sendmsg David Howells
     [not found]   ` <20230329141354.516864-49-dhowells@redhat.com>
2023-03-29 14:39     ` [RFC PATCH v2 48/48] sock: Remove ->sendpage*() in favour of sendmsg(MSG_SPLICE_PAGES) David Howells

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230329141354.516864-32-dhowells@redhat.com \
    --to=dhowells@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=brauner@kernel.org \
    --cc=ceph-devel@vger.kernel.org \
    --cc=chuck.lever@oracle.com \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hch@infradead.org \
    --cc=idryomov@gmail.com \
    --cc=jlayton@kernel.org \
    --cc=kuba@kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=torvalds@linux-foundation.org \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=xiubli@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).