From: Fabiano Rosas <farosas@suse.de>
To: Manish Mishra <manish.mishra@nutanix.com>, qemu-devel@nongnu.org
Cc: berrange@redhat.com, peterx@redhat.com, leobras@redhat.com,
Manish Mishra <manish.mishra@nutanix.com>
Subject: Re: [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
Date: Mon, 14 Apr 2025 11:26:09 -0300 [thread overview]
Message-ID: <871ptuhla6.fsf@suse.de> (raw)
In-Reply-To: <20250403082121.366851-1-manish.mishra@nutanix.com>
Manish Mishra <manish.mishra@nutanix.com> writes:
> We allocate extra metadata SKBs in case of a zerocopy send. This metadata
> memory is accounted for in the OPTMEM limit. If there is any error while
> sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
> queued in the socket error queue. This error queue is freed when userspace
> reads it.
>
> Usually, if there are continuous failures, we merge the metadata into a single
> SKB and free another one. As a result, it never exceeds the OPTMEM limit.
> However, if there is any out-of-order processing or intermittent zerocopy
> failures, this error chain can grow significantly, exhausting the OPTMEM limit.
> As a result, all new sendmsg requests fail to allocate any new SKB, leading to
> an ENOBUF error. Depending on the amount of data queued before the flush
> (i.e., large live migration iterations), even large OPTMEM limits are prone to
> failure.
>
> To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
> we flush the error queue and retry once more.
>
> Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
> ---
> include/io/channel-socket.h | 5 +++
> io/channel-socket.c | 74 ++++++++++++++++++++++++++++++-------
> 2 files changed, 65 insertions(+), 14 deletions(-)
>
> V2:
> 1. Removed the dirty_sync_missed_zero_copy migration stat.
> 2. Made the call to qio_channel_socket_flush_internal() from
> qio_channel_socket_writev() non-blocking.
>
> V3:
> 1. Add the dirty_sync_missed_zero_copy migration stat again.
>
> V4:
> 1. Minor nit to rename s/zero_copy_flush_pending/zerocopy_flushed_once.
>
> diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
> index ab15577d38..2c48b972e8 100644
> --- a/include/io/channel-socket.h
> +++ b/include/io/channel-socket.h
> @@ -49,6 +49,11 @@ struct QIOChannelSocket {
> socklen_t remoteAddrLen;
> ssize_t zero_copy_queued;
> ssize_t zero_copy_sent;
> + /**
> + * This flag indicates whether any new data was successfully sent with
> + * zerocopy since the last qio_channel_socket_flush() call.
> + */
> + bool new_zero_copy_sent_success;
> };
>
>
> diff --git a/io/channel-socket.c b/io/channel-socket.c
> index 608bcf066e..d5882c16fe 100644
> --- a/io/channel-socket.c
> +++ b/io/channel-socket.c
> @@ -37,6 +37,12 @@
>
> #define SOCKET_MAX_FDS 16
>
> +#ifdef QEMU_MSG_ZEROCOPY
> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
> + bool block,
> + Error **errp);
> +#endif
> +
> SocketAddress *
> qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
> Error **errp)
> @@ -65,6 +71,7 @@ qio_channel_socket_new(void)
> sioc->fd = -1;
> sioc->zero_copy_queued = 0;
> sioc->zero_copy_sent = 0;
> + sioc->new_zero_copy_sent_success = FALSE;
>
> ioc = QIO_CHANNEL(sioc);
> qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
> @@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
> size_t fdsize = sizeof(int) * nfds;
> struct cmsghdr *cmsg;
> int sflags = 0;
> + bool zerocopy_flushed_once = FALSE;
>
> memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
>
> @@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
> goto retry;
> case ENOBUFS:
> if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
> - error_setg_errno(errp, errno,
> - "Process can't lock enough memory for using MSG_ZEROCOPY");
> - return -1;
> + /**
> + * Socket error queueing may exhaust the OPTMEM limit. Try
> + * flushing the error queue once.
> + */
> + if (!zerocopy_flushed_once) {
> + ret = qio_channel_socket_flush_internal(ioc, false, errp);
I'm not following this closely so I might have missed some disussion,
but let me point out that the previous version had a comment regarding
hardcoding 'false' here that I don't see addressed nor any comments
explaining why it wasn't addressed.
> + if (ret < 0) {
> + error_setg_errno(errp, errno,
> + "Zerocopy flush failed");
> + return -1;
> + }
> + zerocopy_flushed_once = TRUE;
> + goto retry;
> + } else {
> + error_setg_errno(errp, errno,
> + "Process can't lock enough memory for "
> + "using MSG_ZEROCOPY");
> + return -1;
> + }
> }
> break;
> }
> @@ -725,8 +749,9 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>
>
> #ifdef QEMU_MSG_ZEROCOPY
> -static int qio_channel_socket_flush(QIOChannel *ioc,
> - Error **errp)
> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
> + bool block,
> + Error **errp)
> {
> QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
> struct msghdr msg = {};
> @@ -734,7 +759,6 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
> struct cmsghdr *cm;
> char control[CMSG_SPACE(sizeof(*serr))];
> int received;
> - int ret;
>
> if (sioc->zero_copy_queued == sioc->zero_copy_sent) {
> return 0;
> @@ -744,16 +768,19 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
> msg.msg_controllen = sizeof(control);
> memset(control, 0, sizeof(control));
>
> - ret = 1;
> -
> while (sioc->zero_copy_sent < sioc->zero_copy_queued) {
> received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE);
> if (received < 0) {
> switch (errno) {
> case EAGAIN:
> - /* Nothing on errqueue, wait until something is available */
> - qio_channel_wait(ioc, G_IO_ERR);
> - continue;
> + if (block) {
> + /* Nothing on errqueue, wait until something is
> + * available.
> + */
> + qio_channel_wait(ioc, G_IO_ERR);
> + continue;
> + }
> + return 0;
> case EINTR:
> continue;
> default:
> @@ -791,13 +818,32 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
> /* No errors, count successfully finished sendmsg()*/
> sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1;
>
> - /* If any sendmsg() succeeded using zero copy, return 0 at the end */
> + /* If any sendmsg() succeeded using zero copy, mark zerocopy success */
> if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) {
> - ret = 0;
> + sioc->new_zero_copy_sent_success = TRUE;
> }
> }
>
> - return ret;
> + return 0;
> +}
> +
> +static int qio_channel_socket_flush(QIOChannel *ioc,
> + Error **errp)
> +{
> + QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
> + int ret;
> +
> + ret = qio_channel_socket_flush_internal(ioc, true, errp);
> + if (ret < 0) {
> + return ret;
> + }
> +
> + if (sioc->new_zero_copy_sent_success) {
> + sioc->new_zero_copy_sent_success = FALSE;
> + return 0;
> + }
> +
> + return 1;
> }
>
> #endif /* QEMU_MSG_ZEROCOPY */
next prev parent reply other threads:[~2025-04-14 14:27 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-03 8:21 [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF Manish Mishra
2025-04-14 14:26 ` Fabiano Rosas [this message]
2025-04-15 9:20 ` Manish
2025-04-15 9:26 ` Daniel P. Berrangé
2025-04-15 9:33 ` Manish
2025-04-15 9:27 ` Daniel P. Berrangé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=871ptuhla6.fsf@suse.de \
--to=farosas@suse.de \
--cc=berrange@redhat.com \
--cc=leobras@redhat.com \
--cc=manish.mishra@nutanix.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).