qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Manish Mishra <manish.mishra@nutanix.com>
To: qemu-devel@nongnu.org
Cc: leobras@redhat.com, berrange@redhat.com, farosas@suse.de,
	peterx@redhat.com, Manish Mishra <manish.mishra@nutanix.com>
Subject: [PATCH v2] QIOChannelSocket: Flush zerocopy socket error queue on ENOBUF failure for sendmsg
Date: Sun,  9 Mar 2025 21:15:00 -0400	[thread overview]
Message-ID: <20250310011500.240782-1-manish.mishra@nutanix.com> (raw)

We allocate extra metadata SKBs in case of a zerocopy send. This metadata
memory is accounted for in the OPTMEM limit. If there is any error while
sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
queued in the socket error queue. This error queue is freed when userspace
reads it.

Usually, if there are continuous failures, we merge the metadata into a single
SKB and free another one. As a result, it never exceeds the OPTMEM limit.
However, if there is any out-of-order processing or intermittent zerocopy
failures, this error chain can grow significantly, exhausting the OPTMEM limit.
As a result, all new sendmsg requests fail to allocate any new SKB, leading to
an ENOBUF error. Depending on the amount of data queued before the flush
(i.e., large live migration iterations), even large OPTMEM limits are prone to
failure.

To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
we flush the error queue and retry once more.

Additionally, this patch removes the dirty_sync_missed_zero_copy migration
stat. This stat is not used anywhere and does not seem useful. Removing it
simplifies the patch.

V2:
  1. Removed the dirty_sync_missed_zero_copy migration stat.
  2. Made the call to qio_channel_socket_flush_internal() from
     qio_channel_socket_writev() non-blocking.

Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
---
 include/io/channel.h           |  3 +-
 io/channel-socket.c            | 57 ++++++++++++++++++++++++----------
 migration/migration-hmp-cmds.c |  5 ---
 migration/migration-stats.h    |  5 ---
 migration/migration.c          |  2 --
 migration/multifd.c            |  3 --
 6 files changed, 41 insertions(+), 34 deletions(-)

diff --git a/include/io/channel.h b/include/io/channel.h
index 62b657109c..c393764ab7 100644
--- a/include/io/channel.h
+++ b/include/io/channel.h
@@ -980,8 +980,7 @@ int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc,
  * If not implemented, acts as a no-op, and returns 0.
  *
  * Returns -1 if any error is found,
- *          1 if every send failed to use zero copy.
- *          0 otherwise.
+ *          0 on success.
  */
 
 int qio_channel_flush(QIOChannel *ioc,
diff --git a/io/channel-socket.c b/io/channel-socket.c
index 608bcf066e..0f2a5c2b5f 100644
--- a/io/channel-socket.c
+++ b/io/channel-socket.c
@@ -37,6 +37,12 @@
 
 #define SOCKET_MAX_FDS 16
 
+#ifdef QEMU_MSG_ZEROCOPY
+static int qio_channel_socket_flush_internal(QIOChannel *ioc,
+                                             bool block,
+                                             Error **errp);
+#endif
+
 SocketAddress *
 qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
                                      Error **errp)
@@ -566,6 +572,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
     size_t fdsize = sizeof(int) * nfds;
     struct cmsghdr *cmsg;
     int sflags = 0;
+    bool zero_copy_flush_pending = TRUE;
 
     memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
 
@@ -612,9 +619,21 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
             goto retry;
         case ENOBUFS:
             if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
-                error_setg_errno(errp, errno,
-                                 "Process can't lock enough memory for using MSG_ZEROCOPY");
-                return -1;
+                if (zero_copy_flush_pending) {
+                    ret = qio_channel_socket_flush_internal(ioc, false, errp);
+                    if (ret < 0) {
+                        error_setg_errno(errp, errno,
+                                         "Zerocopy flush failed");
+                        return -1;
+                    }
+                    zero_copy_flush_pending = FALSE;
+                    goto retry;
+                } else {
+                    error_setg_errno(errp, errno,
+                                     "Process can't lock enough memory for "
+                                     "using MSG_ZEROCOPY");
+                    return -1;
+                }
             }
             break;
         }
@@ -725,8 +744,9 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
 
 
 #ifdef QEMU_MSG_ZEROCOPY
-static int qio_channel_socket_flush(QIOChannel *ioc,
-                                    Error **errp)
+static int qio_channel_socket_flush_internal(QIOChannel *ioc,
+                                             bool block,
+                                             Error **errp)
 {
     QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
     struct msghdr msg = {};
@@ -734,7 +754,6 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
     struct cmsghdr *cm;
     char control[CMSG_SPACE(sizeof(*serr))];
     int received;
-    int ret;
 
     if (sioc->zero_copy_queued == sioc->zero_copy_sent) {
         return 0;
@@ -744,16 +763,19 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
     msg.msg_controllen = sizeof(control);
     memset(control, 0, sizeof(control));
 
-    ret = 1;
-
     while (sioc->zero_copy_sent < sioc->zero_copy_queued) {
         received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE);
         if (received < 0) {
             switch (errno) {
             case EAGAIN:
-                /* Nothing on errqueue, wait until something is available */
-                qio_channel_wait(ioc, G_IO_ERR);
-                continue;
+                if (block) {
+                    /* Nothing on errqueue, wait until something is
+                     * available.
+                     */
+                    qio_channel_wait(ioc, G_IO_ERR);
+                    continue;
+                }
+                return 0;
             case EINTR:
                 continue;
             default:
@@ -790,14 +812,15 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
 
         /* No errors, count successfully finished sendmsg()*/
         sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1;
-
-        /* If any sendmsg() succeeded using zero copy, return 0 at the end */
-        if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) {
-            ret = 0;
-        }
     }
 
-    return ret;
+    return 0;
+}
+
+static int qio_channel_socket_flush(QIOChannel *ioc,
+                                    Error **errp)
+{
+    return qio_channel_socket_flush_internal(ioc, true, errp);
 }
 
 #endif /* QEMU_MSG_ZEROCOPY */
diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c
index 49c26daed3..4533ce91ae 100644
--- a/migration/migration-hmp-cmds.c
+++ b/migration/migration-hmp-cmds.c
@@ -138,11 +138,6 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
             monitor_printf(mon, "postcopy ram: %" PRIu64 " kbytes\n",
                            info->ram->postcopy_bytes >> 10);
         }
-        if (info->ram->dirty_sync_missed_zero_copy) {
-            monitor_printf(mon,
-                           "Zero-copy-send fallbacks happened: %" PRIu64 " times\n",
-                           info->ram->dirty_sync_missed_zero_copy);
-        }
     }
 
     if (info->xbzrle_cache) {
diff --git a/migration/migration-stats.h b/migration/migration-stats.h
index 05290ade76..80a3d4be93 100644
--- a/migration/migration-stats.h
+++ b/migration/migration-stats.h
@@ -50,11 +50,6 @@ typedef struct {
      * Number of times we have synchronized guest bitmaps.
      */
     Stat64 dirty_sync_count;
-    /*
-     * Number of times zero copy failed to send any page using zero
-     * copy.
-     */
-    Stat64 dirty_sync_missed_zero_copy;
     /*
      * Number of bytes sent at migration completion stage while the
      * guest is stopped.
diff --git a/migration/migration.c b/migration/migration.c
index 1833cfe358..2ab19ca858 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1231,8 +1231,6 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
     info->ram->mbps = s->mbps;
     info->ram->dirty_sync_count =
         stat64_get(&mig_stats.dirty_sync_count);
-    info->ram->dirty_sync_missed_zero_copy =
-        stat64_get(&mig_stats.dirty_sync_missed_zero_copy);
     info->ram->postcopy_requests =
         stat64_get(&mig_stats.postcopy_requests);
     info->ram->page_size = page_size;
diff --git a/migration/multifd.c b/migration/multifd.c
index dfb5189f0e..ee6b2d3cba 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -607,9 +607,6 @@ static int multifd_zero_copy_flush(QIOChannel *c)
         error_report_err(err);
         return -1;
     }
-    if (ret == 1) {
-        stat64_add(&mig_stats.dirty_sync_missed_zero_copy, 1);
-    }
 
     return ret;
 }
-- 
2.43.0



             reply	other threads:[~2025-03-10  1:16 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-03-10  1:15 Manish Mishra [this message]
2025-03-10 19:12 ` [PATCH v2] QIOChannelSocket: Flush zerocopy socket error queue on ENOBUF failure for sendmsg Peter Xu
2025-03-10 19:48   ` Daniel P. Berrangé
2025-03-10 20:03     ` Peter Xu
2025-03-10 22:45       ` Manish
2025-03-11 15:22         ` Peter Xu
2025-03-11 15:30           ` Manish
2025-03-11  8:13       ` Daniel P. Berrangé
2025-03-11 15:20         ` Peter Xu
2025-03-11 15:33           ` Daniel P. Berrangé
2025-03-11 19:57             ` Peter Xu
2025-03-11 20:08               ` Daniel P. Berrangé
2025-03-11 21:37                 ` Peter Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250310011500.240782-1-manish.mishra@nutanix.com \
    --to=manish.mishra@nutanix.com \
    --cc=berrange@redhat.com \
    --cc=farosas@suse.de \
    --cc=leobras@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).