From: Manish Mishra <manish.mishra@nutanix.com>
To: qemu-devel@nongnu.org
Cc: leobras@redhat.com, peterx@redhat.com, berrange@redhat.com,
Manish Mishra <manish.mishra@nutanix.com>
Subject: [PATCH] QIOChannelSocket: Flush zerocopy socket error queue on ENOBUF failure for sendmsg
Date: Fri, 21 Feb 2025 04:44:48 -0500 [thread overview]
Message-ID: <20250221094448.206845-1-manish.mishra@nutanix.com> (raw)
We allocate extra metadata SKBs in case of zerocopy send. This metadata memory
is accounted for in the OPTMEM limit. If there is any error with sending
zerocopy data or if zerocopy was skipped, these metadata SKBs are queued in the
socket error queue. This error queue is freed when userspace reads it.
Usually, if there are continuous failures, we merge the metadata into a single
SKB and free another one. However, if there is any out-of-order processing or
an intermittent zerocopy failures, this error chain can grow significantly,
exhausting the OPTMEM limit. As a result, all new sendmsg requests fail to
allocate any new SKB, leading to an ENOBUF error.
To workaround this, if we encounter an ENOBUF error with a zerocopy sendmsg,
we flush the error queue and retry once more.
Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
---
include/io/channel-socket.h | 1 +
io/channel-socket.c | 52 ++++++++++++++++++++++++++++++++-----
2 files changed, 46 insertions(+), 7 deletions(-)
diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
index ab15577d38..6cfc66eb5b 100644
--- a/include/io/channel-socket.h
+++ b/include/io/channel-socket.h
@@ -49,6 +49,7 @@ struct QIOChannelSocket {
socklen_t remoteAddrLen;
ssize_t zero_copy_queued;
ssize_t zero_copy_sent;
+ bool new_zero_copy_sent_success;
};
diff --git a/io/channel-socket.c b/io/channel-socket.c
index 608bcf066e..c7f576290f 100644
--- a/io/channel-socket.c
+++ b/io/channel-socket.c
@@ -37,6 +37,11 @@
#define SOCKET_MAX_FDS 16
+#ifdef QEMU_MSG_ZEROCOPY
+static int qio_channel_socket_flush_internal(QIOChannel *ioc,
+ Error **errp);
+#endif
+
SocketAddress *
qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
Error **errp)
@@ -65,6 +70,7 @@ qio_channel_socket_new(void)
sioc->fd = -1;
sioc->zero_copy_queued = 0;
sioc->zero_copy_sent = 0;
+ sioc->new_zero_copy_sent_success = FALSE;
ioc = QIO_CHANNEL(sioc);
qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
@@ -566,6 +572,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
size_t fdsize = sizeof(int) * nfds;
struct cmsghdr *cmsg;
int sflags = 0;
+ bool zero_copy_flush_pending = TRUE;
memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
@@ -612,9 +619,21 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
goto retry;
case ENOBUFS:
if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
- error_setg_errno(errp, errno,
- "Process can't lock enough memory for using MSG_ZEROCOPY");
- return -1;
+ if (zero_copy_flush_pending) {
+ ret = qio_channel_socket_flush_internal(ioc, errp);
+ if (ret < 0) {
+ error_setg_errno(errp, errno,
+ "Zerocopy flush failed");
+ return -1;
+ }
+ zero_copy_flush_pending = FALSE;
+ goto retry;
+ } else {
+ error_setg_errno(errp, errno,
+ "Process can't lock enough memory for "
+ "using MSG_ZEROCOPY");
+ return -1;
+ }
}
break;
}
@@ -725,8 +744,8 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
#ifdef QEMU_MSG_ZEROCOPY
-static int qio_channel_socket_flush(QIOChannel *ioc,
- Error **errp)
+static int qio_channel_socket_flush_internal(QIOChannel *ioc,
+ Error **errp)
{
QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
struct msghdr msg = {};
@@ -791,15 +810,34 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
/* No errors, count successfully finished sendmsg()*/
sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1;
- /* If any sendmsg() succeeded using zero copy, return 0 at the end */
+ /* If any sendmsg() succeeded using zero copy, mark zerocopy success */
if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) {
- ret = 0;
+ sioc->new_zero_copy_sent_success = TRUE;
}
}
return ret;
}
+static int qio_channel_socket_flush(QIOChannel *ioc,
+ Error **errp)
+{
+ QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
+ int ret;
+
+ ret = qio_channel_socket_flush_internal(ioc, errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (sioc->new_zero_copy_sent_success) {
+ sioc->new_zero_copy_sent_success = FALSE;
+ ret = 0;
+ }
+
+ return ret;
+}
+
#endif /* QEMU_MSG_ZEROCOPY */
static int
--
2.43.0
next reply other threads:[~2025-02-21 9:47 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-21 9:44 Manish Mishra [this message]
2025-02-24 20:35 ` [PATCH] QIOChannelSocket: Flush zerocopy socket error queue on ENOBUF failure for sendmsg Peter Xu
2025-02-27 16:54 ` Manish
2025-02-25 9:07 ` Daniel P. Berrangé
2025-02-27 17:00 ` Manish
2025-02-27 17:56 ` Peter Xu
2025-02-27 18:58 ` Manish
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250221094448.206845-1-manish.mishra@nutanix.com \
--to=manish.mishra@nutanix.com \
--cc=berrange@redhat.com \
--cc=leobras@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).