From: David Howells <dhowells@redhat.com>
To: netdev@vger.kernel.org
Cc: "David Howells" <dhowells@redhat.com>,
"Alexander Duyck" <alexander.duyck@gmail.com>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Paolo Abeni" <pabeni@redhat.com>,
"Willem de Bruijn" <willemdebruijn.kernel@gmail.com>,
"David Ahern" <dsahern@kernel.org>,
"Matthew Wilcox" <willy@infradead.org>,
"Jens Axboe" <axboe@kernel.dk>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
"Philipp Reisner" <philipp.reisner@linbit.com>,
"Lars Ellenberg" <lars.ellenberg@linbit.com>,
"Christoph Böhmwalder" <christoph.boehmwalder@linbit.com>,
drbd-dev@lists.linbit.com, linux-block@vger.kernel.org
Subject: [PATCH net-next v2 14/17] drdb: Send an entire bio in a single sendmsg
Date: Sat, 17 Jun 2023 13:11:43 +0100 [thread overview]
Message-ID: <20230617121146.716077-15-dhowells@redhat.com> (raw)
In-Reply-To: <20230617121146.716077-1-dhowells@redhat.com>
Since _drdb_sendpage() is now using sendmsg to send the pages rather
sendpage, pass the entire bio in one go using a bvec iterator instead of
doing it piecemeal.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Philipp Reisner <philipp.reisner@linbit.com>
cc: Lars Ellenberg <lars.ellenberg@linbit.com>
cc: "Christoph Böhmwalder" <christoph.boehmwalder@linbit.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: "David S. Miller" <davem@davemloft.net>
cc: Eric Dumazet <edumazet@google.com>
cc: Jakub Kicinski <kuba@kernel.org>
cc: Paolo Abeni <pabeni@redhat.com>
cc: drbd-dev@lists.linbit.com
cc: linux-block@vger.kernel.org
cc: netdev@vger.kernel.org
---
Notes:
ver #2)
- Use "unsigned int" rather than "unsigned".
drivers/block/drbd/drbd_main.c | 77 +++++++++++-----------------------
1 file changed, 25 insertions(+), 52 deletions(-)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 8a01a18a2550..beba74ae093b 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1520,28 +1520,15 @@ static void drbd_update_congested(struct drbd_connection *connection)
* As a workaround, we disable sendpage on pages
* with page_count == 0 or PageSlab.
*/
-static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
- int offset, size_t size, unsigned msg_flags)
-{
- struct socket *socket;
- void *addr;
- int err;
-
- socket = peer_device->connection->data.socket;
- addr = kmap(page) + offset;
- err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
- kunmap(page);
- if (!err)
- peer_device->device->send_cnt += size >> 9;
- return err;
-}
-
-static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
- int offset, size_t size, unsigned msg_flags)
+static int _drbd_send_pages(struct drbd_peer_device *peer_device,
+ struct iov_iter *iter, unsigned int msg_flags)
{
struct socket *socket = peer_device->connection->data.socket;
- struct bio_vec bvec;
- struct msghdr msg = { .msg_flags = msg_flags, };
+ struct msghdr msg = {
+ .msg_flags = msg_flags | MSG_NOSIGNAL,
+ .msg_iter = *iter,
+ };
+ size_t size = iov_iter_count(iter);
int err = -EIO;
/* e.g. XFS meta- & log-data is in slab pages, which have a
@@ -1550,11 +1537,8 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
* put_page(); and would cause either a VM_BUG directly, or
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
- if (!drbd_disable_sendpage && sendpage_ok(page))
- msg.msg_flags |= MSG_NOSIGNAL | MSG_SPLICE_PAGES;
-
- bvec_set_page(&bvec, page, offset, size);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
+ if (drbd_disable_sendpage)
+ msg.msg_flags &= ~(MSG_NOSIGNAL | MSG_SPLICE_PAGES);
drbd_update_congested(peer_device->connection);
do {
@@ -1587,39 +1571,22 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{
- struct bio_vec bvec;
- struct bvec_iter iter;
+ struct iov_iter iter;
- /* hint all but last page with MSG_MORE */
- bio_for_each_segment(bvec, bio, iter) {
- int err;
+ iov_iter_bvec(&iter, ITER_SOURCE, bio->bi_io_vec, bio->bi_vcnt,
+ bio->bi_iter.bi_size);
- err = _drbd_no_send_page(peer_device, bvec.bv_page,
- bvec.bv_offset, bvec.bv_len,
- bio_iter_last(bvec, iter)
- ? 0 : MSG_MORE);
- if (err)
- return err;
- }
- return 0;
+ return _drbd_send_pages(peer_device, &iter, 0);
}
static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{
- struct bio_vec bvec;
- struct bvec_iter iter;
+ struct iov_iter iter;
- /* hint all but last page with MSG_MORE */
- bio_for_each_segment(bvec, bio, iter) {
- int err;
+ iov_iter_bvec(&iter, ITER_SOURCE, bio->bi_io_vec, bio->bi_vcnt,
+ bio->bi_iter.bi_size);
- err = _drbd_send_page(peer_device, bvec.bv_page,
- bvec.bv_offset, bvec.bv_len,
- bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
- if (err)
- return err;
- }
- return 0;
+ return _drbd_send_pages(peer_device, &iter, MSG_SPLICE_PAGES);
}
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
@@ -1631,10 +1598,16 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
/* hint all but last page with MSG_MORE */
page_chain_for_each(page) {
+ struct iov_iter iter;
+ struct bio_vec bvec;
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- err = _drbd_send_page(peer_device, page, 0, l,
- page_chain_next(page) ? MSG_MORE : 0);
+ bvec_set_page(&bvec, page, 0, l);
+ iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, l);
+
+ err = _drbd_send_pages(peer_device, &iter,
+ MSG_SPLICE_PAGES |
+ (page_chain_next(page) ? MSG_MORE : 0));
if (err)
return err;
len -= l;
next prev parent reply other threads:[~2023-06-17 12:14 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-17 12:11 [PATCH net-next v2 00/17] splice, net: Switch over users of sendpage() and remove it David Howells
2023-06-17 12:11 ` [PATCH net-next v2 01/17] net: Copy slab data for sendmsg(MSG_SPLICE_PAGES) David Howells
2023-06-18 16:43 ` Willem de Bruijn
2023-06-17 12:11 ` [PATCH net-next v2 02/17] net: Display info about MSG_SPLICE_PAGES memory handling in proc David Howells
2023-06-17 12:11 ` [PATCH net-next v2 03/17] tcp_bpf, smc, tls, espintcp: Reduce MSG_SENDPAGE_NOTLAST usage David Howells
2023-06-17 12:11 ` [PATCH net-next v2 04/17] siw: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage to transmit David Howells
2023-06-17 12:11 ` [PATCH net-next v2 05/17] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-06-17 12:11 ` [PATCH net-next v2 06/17] net: Use sendmsg(MSG_SPLICE_PAGES) not sendpage in skb_send_sock() David Howells
2023-06-17 12:11 ` [PATCH net-next v2 07/17] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-06-17 12:11 ` [PATCH net-next v2 08/17] rds: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-06-17 12:11 ` [PATCH net-next v2 09/17] dlm: " David Howells
2023-06-17 12:11 ` [PATCH net-next v2 10/17] nvme: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage David Howells
2023-06-18 16:47 ` Willem de Bruijn
2023-06-18 17:28 ` David Howells
2023-06-19 8:25 ` Sagi Grimberg
2023-06-20 13:00 ` Sagi Grimberg
2023-06-19 9:28 ` David Howells
2023-06-19 11:46 ` Willem de Bruijn
2023-06-17 12:11 ` [PATCH net-next v2 11/17] smc: Drop smc_sendpage() in favour of smc_sendmsg() + MSG_SPLICE_PAGES David Howells
2023-06-17 12:11 ` [PATCH net-next v2 12/17] ocfs2: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-06-17 12:11 ` [PATCH net-next v2 13/17] drbd: " David Howells
2023-06-17 12:11 ` David Howells [this message]
2023-06-17 12:11 ` [PATCH net-next v2 15/17] iscsi: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-06-17 12:11 ` [PATCH net-next v2 16/17] sock: Remove ->sendpage*() in favour of sendmsg(MSG_SPLICE_PAGES) David Howells
2023-06-17 12:11 ` [PATCH net-next v2 17/17] net: Kill MSG_SENDPAGE_NOTLAST David Howells
2023-06-18 16:54 ` Willem de Bruijn
2023-06-19 12:05 ` David Howells
2023-06-20 12:59 ` Willem de Bruijn
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230617121146.716077-15-dhowells@redhat.com \
--to=dhowells@redhat.com \
--cc=alexander.duyck@gmail.com \
--cc=axboe@kernel.dk \
--cc=christoph.boehmwalder@linbit.com \
--cc=davem@davemloft.net \
--cc=drbd-dev@lists.linbit.com \
--cc=dsahern@kernel.org \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=lars.ellenberg@linbit.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=philipp.reisner@linbit.com \
--cc=willemdebruijn.kernel@gmail.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).