* [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
@ 2025-04-03 8:21 Manish Mishra
2025-04-14 14:26 ` Fabiano Rosas
2025-04-15 9:27 ` Daniel P. Berrangé
0 siblings, 2 replies; 6+ messages in thread
From: Manish Mishra @ 2025-04-03 8:21 UTC (permalink / raw)
To: qemu-devel; +Cc: berrange, peterx, leobras, farosas, Manish Mishra
We allocate extra metadata SKBs in case of a zerocopy send. This metadata
memory is accounted for in the OPTMEM limit. If there is any error while
sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
queued in the socket error queue. This error queue is freed when userspace
reads it.
Usually, if there are continuous failures, we merge the metadata into a single
SKB and free another one. As a result, it never exceeds the OPTMEM limit.
However, if there is any out-of-order processing or intermittent zerocopy
failures, this error chain can grow significantly, exhausting the OPTMEM limit.
As a result, all new sendmsg requests fail to allocate any new SKB, leading to
an ENOBUF error. Depending on the amount of data queued before the flush
(i.e., large live migration iterations), even large OPTMEM limits are prone to
failure.
To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
we flush the error queue and retry once more.
Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
---
include/io/channel-socket.h | 5 +++
io/channel-socket.c | 74 ++++++++++++++++++++++++++++++-------
2 files changed, 65 insertions(+), 14 deletions(-)
V2:
1. Removed the dirty_sync_missed_zero_copy migration stat.
2. Made the call to qio_channel_socket_flush_internal() from
qio_channel_socket_writev() non-blocking.
V3:
1. Add the dirty_sync_missed_zero_copy migration stat again.
V4:
1. Minor nit to rename s/zero_copy_flush_pending/zerocopy_flushed_once.
diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
index ab15577d38..2c48b972e8 100644
--- a/include/io/channel-socket.h
+++ b/include/io/channel-socket.h
@@ -49,6 +49,11 @@ struct QIOChannelSocket {
socklen_t remoteAddrLen;
ssize_t zero_copy_queued;
ssize_t zero_copy_sent;
+ /**
+ * This flag indicates whether any new data was successfully sent with
+ * zerocopy since the last qio_channel_socket_flush() call.
+ */
+ bool new_zero_copy_sent_success;
};
diff --git a/io/channel-socket.c b/io/channel-socket.c
index 608bcf066e..d5882c16fe 100644
--- a/io/channel-socket.c
+++ b/io/channel-socket.c
@@ -37,6 +37,12 @@
#define SOCKET_MAX_FDS 16
+#ifdef QEMU_MSG_ZEROCOPY
+static int qio_channel_socket_flush_internal(QIOChannel *ioc,
+ bool block,
+ Error **errp);
+#endif
+
SocketAddress *
qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
Error **errp)
@@ -65,6 +71,7 @@ qio_channel_socket_new(void)
sioc->fd = -1;
sioc->zero_copy_queued = 0;
sioc->zero_copy_sent = 0;
+ sioc->new_zero_copy_sent_success = FALSE;
ioc = QIO_CHANNEL(sioc);
qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
@@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
size_t fdsize = sizeof(int) * nfds;
struct cmsghdr *cmsg;
int sflags = 0;
+ bool zerocopy_flushed_once = FALSE;
memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
@@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
goto retry;
case ENOBUFS:
if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
- error_setg_errno(errp, errno,
- "Process can't lock enough memory for using MSG_ZEROCOPY");
- return -1;
+ /**
+ * Socket error queueing may exhaust the OPTMEM limit. Try
+ * flushing the error queue once.
+ */
+ if (!zerocopy_flushed_once) {
+ ret = qio_channel_socket_flush_internal(ioc, false, errp);
+ if (ret < 0) {
+ error_setg_errno(errp, errno,
+ "Zerocopy flush failed");
+ return -1;
+ }
+ zerocopy_flushed_once = TRUE;
+ goto retry;
+ } else {
+ error_setg_errno(errp, errno,
+ "Process can't lock enough memory for "
+ "using MSG_ZEROCOPY");
+ return -1;
+ }
}
break;
}
@@ -725,8 +749,9 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
#ifdef QEMU_MSG_ZEROCOPY
-static int qio_channel_socket_flush(QIOChannel *ioc,
- Error **errp)
+static int qio_channel_socket_flush_internal(QIOChannel *ioc,
+ bool block,
+ Error **errp)
{
QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
struct msghdr msg = {};
@@ -734,7 +759,6 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
struct cmsghdr *cm;
char control[CMSG_SPACE(sizeof(*serr))];
int received;
- int ret;
if (sioc->zero_copy_queued == sioc->zero_copy_sent) {
return 0;
@@ -744,16 +768,19 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
msg.msg_controllen = sizeof(control);
memset(control, 0, sizeof(control));
- ret = 1;
-
while (sioc->zero_copy_sent < sioc->zero_copy_queued) {
received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE);
if (received < 0) {
switch (errno) {
case EAGAIN:
- /* Nothing on errqueue, wait until something is available */
- qio_channel_wait(ioc, G_IO_ERR);
- continue;
+ if (block) {
+ /* Nothing on errqueue, wait until something is
+ * available.
+ */
+ qio_channel_wait(ioc, G_IO_ERR);
+ continue;
+ }
+ return 0;
case EINTR:
continue;
default:
@@ -791,13 +818,32 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
/* No errors, count successfully finished sendmsg()*/
sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1;
- /* If any sendmsg() succeeded using zero copy, return 0 at the end */
+ /* If any sendmsg() succeeded using zero copy, mark zerocopy success */
if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) {
- ret = 0;
+ sioc->new_zero_copy_sent_success = TRUE;
}
}
- return ret;
+ return 0;
+}
+
+static int qio_channel_socket_flush(QIOChannel *ioc,
+ Error **errp)
+{
+ QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
+ int ret;
+
+ ret = qio_channel_socket_flush_internal(ioc, true, errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (sioc->new_zero_copy_sent_success) {
+ sioc->new_zero_copy_sent_success = FALSE;
+ return 0;
+ }
+
+ return 1;
}
#endif /* QEMU_MSG_ZEROCOPY */
--
2.43.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
2025-04-03 8:21 [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF Manish Mishra
@ 2025-04-14 14:26 ` Fabiano Rosas
2025-04-15 9:20 ` Manish
2025-04-15 9:27 ` Daniel P. Berrangé
1 sibling, 1 reply; 6+ messages in thread
From: Fabiano Rosas @ 2025-04-14 14:26 UTC (permalink / raw)
To: Manish Mishra, qemu-devel; +Cc: berrange, peterx, leobras, Manish Mishra
Manish Mishra <manish.mishra@nutanix.com> writes:
> We allocate extra metadata SKBs in case of a zerocopy send. This metadata
> memory is accounted for in the OPTMEM limit. If there is any error while
> sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
> queued in the socket error queue. This error queue is freed when userspace
> reads it.
>
> Usually, if there are continuous failures, we merge the metadata into a single
> SKB and free another one. As a result, it never exceeds the OPTMEM limit.
> However, if there is any out-of-order processing or intermittent zerocopy
> failures, this error chain can grow significantly, exhausting the OPTMEM limit.
> As a result, all new sendmsg requests fail to allocate any new SKB, leading to
> an ENOBUF error. Depending on the amount of data queued before the flush
> (i.e., large live migration iterations), even large OPTMEM limits are prone to
> failure.
>
> To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
> we flush the error queue and retry once more.
>
> Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
> ---
> include/io/channel-socket.h | 5 +++
> io/channel-socket.c | 74 ++++++++++++++++++++++++++++++-------
> 2 files changed, 65 insertions(+), 14 deletions(-)
>
> V2:
> 1. Removed the dirty_sync_missed_zero_copy migration stat.
> 2. Made the call to qio_channel_socket_flush_internal() from
> qio_channel_socket_writev() non-blocking.
>
> V3:
> 1. Add the dirty_sync_missed_zero_copy migration stat again.
>
> V4:
> 1. Minor nit to rename s/zero_copy_flush_pending/zerocopy_flushed_once.
>
> diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
> index ab15577d38..2c48b972e8 100644
> --- a/include/io/channel-socket.h
> +++ b/include/io/channel-socket.h
> @@ -49,6 +49,11 @@ struct QIOChannelSocket {
> socklen_t remoteAddrLen;
> ssize_t zero_copy_queued;
> ssize_t zero_copy_sent;
> + /**
> + * This flag indicates whether any new data was successfully sent with
> + * zerocopy since the last qio_channel_socket_flush() call.
> + */
> + bool new_zero_copy_sent_success;
> };
>
>
> diff --git a/io/channel-socket.c b/io/channel-socket.c
> index 608bcf066e..d5882c16fe 100644
> --- a/io/channel-socket.c
> +++ b/io/channel-socket.c
> @@ -37,6 +37,12 @@
>
> #define SOCKET_MAX_FDS 16
>
> +#ifdef QEMU_MSG_ZEROCOPY
> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
> + bool block,
> + Error **errp);
> +#endif
> +
> SocketAddress *
> qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
> Error **errp)
> @@ -65,6 +71,7 @@ qio_channel_socket_new(void)
> sioc->fd = -1;
> sioc->zero_copy_queued = 0;
> sioc->zero_copy_sent = 0;
> + sioc->new_zero_copy_sent_success = FALSE;
>
> ioc = QIO_CHANNEL(sioc);
> qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
> @@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
> size_t fdsize = sizeof(int) * nfds;
> struct cmsghdr *cmsg;
> int sflags = 0;
> + bool zerocopy_flushed_once = FALSE;
>
> memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
>
> @@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
> goto retry;
> case ENOBUFS:
> if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
> - error_setg_errno(errp, errno,
> - "Process can't lock enough memory for using MSG_ZEROCOPY");
> - return -1;
> + /**
> + * Socket error queueing may exhaust the OPTMEM limit. Try
> + * flushing the error queue once.
> + */
> + if (!zerocopy_flushed_once) {
> + ret = qio_channel_socket_flush_internal(ioc, false, errp);
I'm not following this closely so I might have missed some disussion,
but let me point out that the previous version had a comment regarding
hardcoding 'false' here that I don't see addressed nor any comments
explaining why it wasn't addressed.
> + if (ret < 0) {
> + error_setg_errno(errp, errno,
> + "Zerocopy flush failed");
> + return -1;
> + }
> + zerocopy_flushed_once = TRUE;
> + goto retry;
> + } else {
> + error_setg_errno(errp, errno,
> + "Process can't lock enough memory for "
> + "using MSG_ZEROCOPY");
> + return -1;
> + }
> }
> break;
> }
> @@ -725,8 +749,9 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>
>
> #ifdef QEMU_MSG_ZEROCOPY
> -static int qio_channel_socket_flush(QIOChannel *ioc,
> - Error **errp)
> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
> + bool block,
> + Error **errp)
> {
> QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
> struct msghdr msg = {};
> @@ -734,7 +759,6 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
> struct cmsghdr *cm;
> char control[CMSG_SPACE(sizeof(*serr))];
> int received;
> - int ret;
>
> if (sioc->zero_copy_queued == sioc->zero_copy_sent) {
> return 0;
> @@ -744,16 +768,19 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
> msg.msg_controllen = sizeof(control);
> memset(control, 0, sizeof(control));
>
> - ret = 1;
> -
> while (sioc->zero_copy_sent < sioc->zero_copy_queued) {
> received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE);
> if (received < 0) {
> switch (errno) {
> case EAGAIN:
> - /* Nothing on errqueue, wait until something is available */
> - qio_channel_wait(ioc, G_IO_ERR);
> - continue;
> + if (block) {
> + /* Nothing on errqueue, wait until something is
> + * available.
> + */
> + qio_channel_wait(ioc, G_IO_ERR);
> + continue;
> + }
> + return 0;
> case EINTR:
> continue;
> default:
> @@ -791,13 +818,32 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
> /* No errors, count successfully finished sendmsg()*/
> sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1;
>
> - /* If any sendmsg() succeeded using zero copy, return 0 at the end */
> + /* If any sendmsg() succeeded using zero copy, mark zerocopy success */
> if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) {
> - ret = 0;
> + sioc->new_zero_copy_sent_success = TRUE;
> }
> }
>
> - return ret;
> + return 0;
> +}
> +
> +static int qio_channel_socket_flush(QIOChannel *ioc,
> + Error **errp)
> +{
> + QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
> + int ret;
> +
> + ret = qio_channel_socket_flush_internal(ioc, true, errp);
> + if (ret < 0) {
> + return ret;
> + }
> +
> + if (sioc->new_zero_copy_sent_success) {
> + sioc->new_zero_copy_sent_success = FALSE;
> + return 0;
> + }
> +
> + return 1;
> }
>
> #endif /* QEMU_MSG_ZEROCOPY */
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
2025-04-14 14:26 ` Fabiano Rosas
@ 2025-04-15 9:20 ` Manish
2025-04-15 9:26 ` Daniel P. Berrangé
0 siblings, 1 reply; 6+ messages in thread
From: Manish @ 2025-04-15 9:20 UTC (permalink / raw)
To: Fabiano Rosas, qemu-devel; +Cc: berrange, peterx, leobras
On 14/04/25 7:56 pm, Fabiano Rosas wrote:
> !-------------------------------------------------------------------|
> CAUTION: External Email
>
> |-------------------------------------------------------------------!
>
> Manish Mishra <manish.mishra@nutanix.com> writes:
>
>> We allocate extra metadata SKBs in case of a zerocopy send. This metadata
>> memory is accounted for in the OPTMEM limit. If there is any error while
>> sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
>> queued in the socket error queue. This error queue is freed when userspace
>> reads it.
>>
>> Usually, if there are continuous failures, we merge the metadata into a single
>> SKB and free another one. As a result, it never exceeds the OPTMEM limit.
>> However, if there is any out-of-order processing or intermittent zerocopy
>> failures, this error chain can grow significantly, exhausting the OPTMEM limit.
>> As a result, all new sendmsg requests fail to allocate any new SKB, leading to
>> an ENOBUF error. Depending on the amount of data queued before the flush
>> (i.e., large live migration iterations), even large OPTMEM limits are prone to
>> failure.
>>
>> To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
>> we flush the error queue and retry once more.
>>
>> Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
>> ---
>> include/io/channel-socket.h | 5 +++
>> io/channel-socket.c | 74 ++++++++++++++++++++++++++++++-------
>> 2 files changed, 65 insertions(+), 14 deletions(-)
>>
>> V2:
>> 1. Removed the dirty_sync_missed_zero_copy migration stat.
>> 2. Made the call to qio_channel_socket_flush_internal() from
>> qio_channel_socket_writev() non-blocking.
>>
>> V3:
>> 1. Add the dirty_sync_missed_zero_copy migration stat again.
>>
>> V4:
>> 1. Minor nit to rename s/zero_copy_flush_pending/zerocopy_flushed_once.
>>
>> diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
>> index ab15577d38..2c48b972e8 100644
>> --- a/include/io/channel-socket.h
>> +++ b/include/io/channel-socket.h
>> @@ -49,6 +49,11 @@ struct QIOChannelSocket {
>> socklen_t remoteAddrLen;
>> ssize_t zero_copy_queued;
>> ssize_t zero_copy_sent;
>> + /**
>> + * This flag indicates whether any new data was successfully sent with
>> + * zerocopy since the last qio_channel_socket_flush() call.
>> + */
>> + bool new_zero_copy_sent_success;
>> };
>>
>>
>> diff --git a/io/channel-socket.c b/io/channel-socket.c
>> index 608bcf066e..d5882c16fe 100644
>> --- a/io/channel-socket.c
>> +++ b/io/channel-socket.c
>> @@ -37,6 +37,12 @@
>>
>> #define SOCKET_MAX_FDS 16
>>
>> +#ifdef QEMU_MSG_ZEROCOPY
>> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
>> + bool block,
>> + Error **errp);
>> +#endif
>> +
>> SocketAddress *
>> qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
>> Error **errp)
>> @@ -65,6 +71,7 @@ qio_channel_socket_new(void)
>> sioc->fd = -1;
>> sioc->zero_copy_queued = 0;
>> sioc->zero_copy_sent = 0;
>> + sioc->new_zero_copy_sent_success = FALSE;
>>
>> ioc = QIO_CHANNEL(sioc);
>> qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
>> @@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>> size_t fdsize = sizeof(int) * nfds;
>> struct cmsghdr *cmsg;
>> int sflags = 0;
>> + bool zerocopy_flushed_once = FALSE;
>>
>> memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
>>
>> @@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>> goto retry;
>> case ENOBUFS:
>> if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
>> - error_setg_errno(errp, errno,
>> - "Process can't lock enough memory for using MSG_ZEROCOPY");
>> - return -1;
>> + /**
>> + * Socket error queueing may exhaust the OPTMEM limit. Try
>> + * flushing the error queue once.
>> + */
>> + if (!zerocopy_flushed_once) {
>> + ret = qio_channel_socket_flush_internal(ioc, false, errp);
> I'm not following this closely so I might have missed some disussion,
> but let me point out that the previous version had a comment regarding
> hardcoding 'false' here that I don't see addressed nor any comments
> explaining why it wasn't addressed.
Hi Fabiano, I did reply to that in last comment for v3. Please let me
know if that does not make sense.
https://lore.kernel.org/all/c7a86623-db04-459f-afd5-6a318475bb92@nutanix.com/T/
>
>> + if (ret < 0) {
>> + error_setg_errno(errp, errno,
>> + "Zerocopy flush failed");
>> + return -1;
>> + }
>> + zerocopy_flushed_once = TRUE;
>> + goto retry;
>> + } else {
>> + error_setg_errno(errp, errno,
>> + "Process can't lock enough memory for "
>> + "using MSG_ZEROCOPY");
>> + return -1;
>> + }
>> }
>> break;
>> }
>> @@ -725,8 +749,9 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>>
>>
>> #ifdef QEMU_MSG_ZEROCOPY
>> -static int qio_channel_socket_flush(QIOChannel *ioc,
>> - Error **errp)
>> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
>> + bool block,
>> + Error **errp)
>> {
>> QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
>> struct msghdr msg = {};
>> @@ -734,7 +759,6 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>> struct cmsghdr *cm;
>> char control[CMSG_SPACE(sizeof(*serr))];
>> int received;
>> - int ret;
>>
>> if (sioc->zero_copy_queued == sioc->zero_copy_sent) {
>> return 0;
>> @@ -744,16 +768,19 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>> msg.msg_controllen = sizeof(control);
>> memset(control, 0, sizeof(control));
>>
>> - ret = 1;
>> -
>> while (sioc->zero_copy_sent < sioc->zero_copy_queued) {
>> received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE);
>> if (received < 0) {
>> switch (errno) {
>> case EAGAIN:
>> - /* Nothing on errqueue, wait until something is available */
>> - qio_channel_wait(ioc, G_IO_ERR);
>> - continue;
>> + if (block) {
>> + /* Nothing on errqueue, wait until something is
>> + * available.
>> + */
>> + qio_channel_wait(ioc, G_IO_ERR);
>> + continue;
>> + }
>> + return 0;
>> case EINTR:
>> continue;
>> default:
>> @@ -791,13 +818,32 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>> /* No errors, count successfully finished sendmsg()*/
>> sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1;
>>
>> - /* If any sendmsg() succeeded using zero copy, return 0 at the end */
>> + /* If any sendmsg() succeeded using zero copy, mark zerocopy success */
>> if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) {
>> - ret = 0;
>> + sioc->new_zero_copy_sent_success = TRUE;
>> }
>> }
>>
>> - return ret;
>> + return 0;
>> +}
>> +
>> +static int qio_channel_socket_flush(QIOChannel *ioc,
>> + Error **errp)
>> +{
>> + QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
>> + int ret;
>> +
>> + ret = qio_channel_socket_flush_internal(ioc, true, errp);
>> + if (ret < 0) {
>> + return ret;
>> + }
>> +
>> + if (sioc->new_zero_copy_sent_success) {
>> + sioc->new_zero_copy_sent_success = FALSE;
>> + return 0;
>> + }
>> +
>> + return 1;
>> }
>>
>> #endif /* QEMU_MSG_ZEROCOPY */
Thanks
Manish Mishra
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
2025-04-15 9:20 ` Manish
@ 2025-04-15 9:26 ` Daniel P. Berrangé
2025-04-15 9:33 ` Manish
0 siblings, 1 reply; 6+ messages in thread
From: Daniel P. Berrangé @ 2025-04-15 9:26 UTC (permalink / raw)
To: Manish; +Cc: Fabiano Rosas, qemu-devel, peterx, leobras
On Tue, Apr 15, 2025 at 02:50:39PM +0530, Manish wrote:
>
> On 14/04/25 7:56 pm, Fabiano Rosas wrote:
> > !-------------------------------------------------------------------|
> > CAUTION: External Email
> >
> > |-------------------------------------------------------------------!
> >
> > Manish Mishra <manish.mishra@nutanix.com> writes:
> >
> > > We allocate extra metadata SKBs in case of a zerocopy send. This metadata
> > > memory is accounted for in the OPTMEM limit. If there is any error while
> > > sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
> > > queued in the socket error queue. This error queue is freed when userspace
> > > reads it.
> > >
> > > Usually, if there are continuous failures, we merge the metadata into a single
> > > SKB and free another one. As a result, it never exceeds the OPTMEM limit.
> > > However, if there is any out-of-order processing or intermittent zerocopy
> > > failures, this error chain can grow significantly, exhausting the OPTMEM limit.
> > > As a result, all new sendmsg requests fail to allocate any new SKB, leading to
> > > an ENOBUF error. Depending on the amount of data queued before the flush
> > > (i.e., large live migration iterations), even large OPTMEM limits are prone to
> > > failure.
> > >
> > > To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
> > > we flush the error queue and retry once more.
> > >
> > > Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
> > > ---
> > > include/io/channel-socket.h | 5 +++
> > > io/channel-socket.c | 74 ++++++++++++++++++++++++++++++-------
> > > 2 files changed, 65 insertions(+), 14 deletions(-)
> > >
> > > V2:
> > > 1. Removed the dirty_sync_missed_zero_copy migration stat.
> > > 2. Made the call to qio_channel_socket_flush_internal() from
> > > qio_channel_socket_writev() non-blocking.
> > >
> > > V3:
> > > 1. Add the dirty_sync_missed_zero_copy migration stat again.
> > >
> > > V4:
> > > 1. Minor nit to rename s/zero_copy_flush_pending/zerocopy_flushed_once.
> > >
> > > diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
> > > index ab15577d38..2c48b972e8 100644
> > > --- a/include/io/channel-socket.h
> > > +++ b/include/io/channel-socket.h
> > > @@ -49,6 +49,11 @@ struct QIOChannelSocket {
> > > socklen_t remoteAddrLen;
> > > ssize_t zero_copy_queued;
> > > ssize_t zero_copy_sent;
> > > + /**
> > > + * This flag indicates whether any new data was successfully sent with
> > > + * zerocopy since the last qio_channel_socket_flush() call.
> > > + */
> > > + bool new_zero_copy_sent_success;
> > > };
> > > diff --git a/io/channel-socket.c b/io/channel-socket.c
> > > index 608bcf066e..d5882c16fe 100644
> > > --- a/io/channel-socket.c
> > > +++ b/io/channel-socket.c
> > > @@ -37,6 +37,12 @@
> > > #define SOCKET_MAX_FDS 16
> > > +#ifdef QEMU_MSG_ZEROCOPY
> > > +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
> > > + bool block,
> > > + Error **errp);
> > > +#endif
> > > +
> > > SocketAddress *
> > > qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
> > > Error **errp)
> > > @@ -65,6 +71,7 @@ qio_channel_socket_new(void)
> > > sioc->fd = -1;
> > > sioc->zero_copy_queued = 0;
> > > sioc->zero_copy_sent = 0;
> > > + sioc->new_zero_copy_sent_success = FALSE;
> > > ioc = QIO_CHANNEL(sioc);
> > > qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
> > > @@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
> > > size_t fdsize = sizeof(int) * nfds;
> > > struct cmsghdr *cmsg;
> > > int sflags = 0;
> > > + bool zerocopy_flushed_once = FALSE;
> > > memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
> > > @@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
> > > goto retry;
> > > case ENOBUFS:
> > > if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
> > > - error_setg_errno(errp, errno,
> > > - "Process can't lock enough memory for using MSG_ZEROCOPY");
> > > - return -1;
> > > + /**
> > > + * Socket error queueing may exhaust the OPTMEM limit. Try
> > > + * flushing the error queue once.
> > > + */
> > > + if (!zerocopy_flushed_once) {
> > > + ret = qio_channel_socket_flush_internal(ioc, false, errp);
> > I'm not following this closely so I might have missed some disussion,
> > but let me point out that the previous version had a comment regarding
> > hardcoding 'false' here that I don't see addressed nor any comments
> > explaining why it wasn't addressed.
>
> Hi Fabiano, I did reply to that in last comment for v3. Please let me know
> if that does not make sense. https://lore.kernel.org/all/c7a86623-db04-459f-afd5-6a318475bb92@nutanix.com/T/
That comment doesn't really address the problem.
If the socket is in blocking mode, we *MUST* block to send
all data. Returning early with a partial send when zerocopy
buffers are full isn't matching the requested semantics
for blocking mode.
>
>
> >
> > > + if (ret < 0) {
> > > + error_setg_errno(errp, errno,
> > > + "Zerocopy flush failed");
> > > + return -1;
> > > + }
> > > + zerocopy_flushed_once = TRUE;
> > > + goto retry;
> > > + } else {
> > > + error_setg_errno(errp, errno,
> > > + "Process can't lock enough memory for "
> > > + "using MSG_ZEROCOPY");
> > > + return -1;
> > > + }
> > > }
> > > break;
With regards,
Daniel
--
|: https://berrange.com -o- https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o- https://fstop138.berrange.com :|
|: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
2025-04-15 9:26 ` Daniel P. Berrangé
@ 2025-04-15 9:33 ` Manish
0 siblings, 0 replies; 6+ messages in thread
From: Manish @ 2025-04-15 9:33 UTC (permalink / raw)
To: Daniel P. Berrangé; +Cc: Fabiano Rosas, qemu-devel, peterx, leobras
On 15/04/25 2:56 pm, Daniel P. Berrangé wrote:
> !-------------------------------------------------------------------|
> CAUTION: External Email
>
> |-------------------------------------------------------------------!
>
> On Tue, Apr 15, 2025 at 02:50:39PM +0530, Manish wrote:
>> On 14/04/25 7:56 pm, Fabiano Rosas wrote:
>>> !-------------------------------------------------------------------|
>>> CAUTION: External Email
>>>
>>> |-------------------------------------------------------------------!
>>>
>>> Manish Mishra <manish.mishra@nutanix.com> writes:
>>>
>>>> We allocate extra metadata SKBs in case of a zerocopy send. This metadata
>>>> memory is accounted for in the OPTMEM limit. If there is any error while
>>>> sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
>>>> queued in the socket error queue. This error queue is freed when userspace
>>>> reads it.
>>>>
>>>> Usually, if there are continuous failures, we merge the metadata into a single
>>>> SKB and free another one. As a result, it never exceeds the OPTMEM limit.
>>>> However, if there is any out-of-order processing or intermittent zerocopy
>>>> failures, this error chain can grow significantly, exhausting the OPTMEM limit.
>>>> As a result, all new sendmsg requests fail to allocate any new SKB, leading to
>>>> an ENOBUF error. Depending on the amount of data queued before the flush
>>>> (i.e., large live migration iterations), even large OPTMEM limits are prone to
>>>> failure.
>>>>
>>>> To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
>>>> we flush the error queue and retry once more.
>>>>
>>>> Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
>>>> ---
>>>> include/io/channel-socket.h | 5 +++
>>>> io/channel-socket.c | 74 ++++++++++++++++++++++++++++++-------
>>>> 2 files changed, 65 insertions(+), 14 deletions(-)
>>>>
>>>> V2:
>>>> 1. Removed the dirty_sync_missed_zero_copy migration stat.
>>>> 2. Made the call to qio_channel_socket_flush_internal() from
>>>> qio_channel_socket_writev() non-blocking.
>>>>
>>>> V3:
>>>> 1. Add the dirty_sync_missed_zero_copy migration stat again.
>>>>
>>>> V4:
>>>> 1. Minor nit to rename s/zero_copy_flush_pending/zerocopy_flushed_once.
>>>>
>>>> diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
>>>> index ab15577d38..2c48b972e8 100644
>>>> --- a/include/io/channel-socket.h
>>>> +++ b/include/io/channel-socket.h
>>>> @@ -49,6 +49,11 @@ struct QIOChannelSocket {
>>>> socklen_t remoteAddrLen;
>>>> ssize_t zero_copy_queued;
>>>> ssize_t zero_copy_sent;
>>>> + /**
>>>> + * This flag indicates whether any new data was successfully sent with
>>>> + * zerocopy since the last qio_channel_socket_flush() call.
>>>> + */
>>>> + bool new_zero_copy_sent_success;
>>>> };
>>>> diff --git a/io/channel-socket.c b/io/channel-socket.c
>>>> index 608bcf066e..d5882c16fe 100644
>>>> --- a/io/channel-socket.c
>>>> +++ b/io/channel-socket.c
>>>> @@ -37,6 +37,12 @@
>>>> #define SOCKET_MAX_FDS 16
>>>> +#ifdef QEMU_MSG_ZEROCOPY
>>>> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
>>>> + bool block,
>>>> + Error **errp);
>>>> +#endif
>>>> +
>>>> SocketAddress *
>>>> qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
>>>> Error **errp)
>>>> @@ -65,6 +71,7 @@ qio_channel_socket_new(void)
>>>> sioc->fd = -1;
>>>> sioc->zero_copy_queued = 0;
>>>> sioc->zero_copy_sent = 0;
>>>> + sioc->new_zero_copy_sent_success = FALSE;
>>>> ioc = QIO_CHANNEL(sioc);
>>>> qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
>>>> @@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>>>> size_t fdsize = sizeof(int) * nfds;
>>>> struct cmsghdr *cmsg;
>>>> int sflags = 0;
>>>> + bool zerocopy_flushed_once = FALSE;
>>>> memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
>>>> @@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>>>> goto retry;
>>>> case ENOBUFS:
>>>> if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
>>>> - error_setg_errno(errp, errno,
>>>> - "Process can't lock enough memory for using MSG_ZEROCOPY");
>>>> - return -1;
>>>> + /**
>>>> + * Socket error queueing may exhaust the OPTMEM limit. Try
>>>> + * flushing the error queue once.
>>>> + */
>>>> + if (!zerocopy_flushed_once) {
>>>> + ret = qio_channel_socket_flush_internal(ioc, false, errp);
>>> I'm not following this closely so I might have missed some disussion,
>>> but let me point out that the previous version had a comment regarding
>>> hardcoding 'false' here that I don't see addressed nor any comments
>>> explaining why it wasn't addressed.
>> Hi Fabiano, I did reply to that in last comment for v3. Please let me know
>> if that does not make sense. https://urldefense.proofpoint.com/v2/url?u=https-3A__lore.kernel.org_all_c7a86623-2Ddb04-2D459f-2Dafd5-2D6a318475bb92-40nutanix.com_T_&d=DwIBaQ&c=s883GpUCOChKOHiocYtGcg&r=c4KON2DiMd-szjwjggQcuUvTsPWblztAL0gVzaHnNmc&m=gSsIJRLS4OSUnEkLMiIoiEpY4GGpBsw1hThWqPIQxeyBiNnpZKt3fBmvoU8Psc5Q&s=rWZP5zS-gO43ebKzVTB0970TjuqPCIKmc-_kYco6-Qk&e=
> That comment doesn't really address the problem.
>
> If the socket is in blocking mode, we *MUST* block to send
> all data. Returning early with a partial send when zerocopy
> buffers are full isn't matching the requested semantics
> for blocking mode.
>
>>
>>>> + if (ret < 0) {
>>>> + error_setg_errno(errp, errno,
>>>> + "Zerocopy flush failed");
>>>> + return -1;
>>>> + }
>>>> + zerocopy_flushed_once = TRUE;
>>>> + goto retry;
>>>> + } else {
>>>> + error_setg_errno(errp, errno,
>>>> + "Process can't lock enough memory for "
>>>> + "using MSG_ZEROCOPY");
>>>> + return -1;
>>>> + }
>>>> }
>>>> break;
> With regards,
> Daniel
Sure Daniel
Thanks
Manish Mishra
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
2025-04-03 8:21 [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF Manish Mishra
2025-04-14 14:26 ` Fabiano Rosas
@ 2025-04-15 9:27 ` Daniel P. Berrangé
1 sibling, 0 replies; 6+ messages in thread
From: Daniel P. Berrangé @ 2025-04-15 9:27 UTC (permalink / raw)
To: Manish Mishra; +Cc: qemu-devel, peterx, leobras, farosas
On Thu, Apr 03, 2025 at 04:21:21AM -0400, Manish Mishra wrote:
> We allocate extra metadata SKBs in case of a zerocopy send. This metadata
> memory is accounted for in the OPTMEM limit. If there is any error while
> sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
> queued in the socket error queue. This error queue is freed when userspace
> reads it.
>
> Usually, if there are continuous failures, we merge the metadata into a single
> SKB and free another one. As a result, it never exceeds the OPTMEM limit.
> However, if there is any out-of-order processing or intermittent zerocopy
> failures, this error chain can grow significantly, exhausting the OPTMEM limit.
> As a result, all new sendmsg requests fail to allocate any new SKB, leading to
> an ENOBUF error. Depending on the amount of data queued before the flush
> (i.e., large live migration iterations), even large OPTMEM limits are prone to
> failure.
>
> To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
> we flush the error queue and retry once more.
>
> Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
> ---
> include/io/channel-socket.h | 5 +++
> io/channel-socket.c | 74 ++++++++++++++++++++++++++++++-------
> 2 files changed, 65 insertions(+), 14 deletions(-)
>
> V2:
> 1. Removed the dirty_sync_missed_zero_copy migration stat.
> 2. Made the call to qio_channel_socket_flush_internal() from
> qio_channel_socket_writev() non-blocking.
>
> V3:
> 1. Add the dirty_sync_missed_zero_copy migration stat again.
>
> V4:
> 1. Minor nit to rename s/zero_copy_flush_pending/zerocopy_flushed_once.
>
> diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
> index ab15577d38..2c48b972e8 100644
> --- a/include/io/channel-socket.h
> +++ b/include/io/channel-socket.h
> @@ -49,6 +49,11 @@ struct QIOChannelSocket {
> socklen_t remoteAddrLen;
> ssize_t zero_copy_queued;
> ssize_t zero_copy_sent;
> + /**
> + * This flag indicates whether any new data was successfully sent with
> + * zerocopy since the last qio_channel_socket_flush() call.
> + */
> + bool new_zero_copy_sent_success;
> };
>
>
> diff --git a/io/channel-socket.c b/io/channel-socket.c
> index 608bcf066e..d5882c16fe 100644
> --- a/io/channel-socket.c
> +++ b/io/channel-socket.c
> @@ -37,6 +37,12 @@
>
> #define SOCKET_MAX_FDS 16
>
> +#ifdef QEMU_MSG_ZEROCOPY
> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
> + bool block,
> + Error **errp);
> +#endif
> +
> SocketAddress *
> qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
> Error **errp)
> @@ -65,6 +71,7 @@ qio_channel_socket_new(void)
> sioc->fd = -1;
> sioc->zero_copy_queued = 0;
> sioc->zero_copy_sent = 0;
> + sioc->new_zero_copy_sent_success = FALSE;
>
> ioc = QIO_CHANNEL(sioc);
> qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
> @@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
> size_t fdsize = sizeof(int) * nfds;
> struct cmsghdr *cmsg;
> int sflags = 0;
> + bool zerocopy_flushed_once = FALSE;
>
> memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
>
> @@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
> goto retry;
> case ENOBUFS:
> if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
> - error_setg_errno(errp, errno,
> - "Process can't lock enough memory for using MSG_ZEROCOPY");
> - return -1;
> + /**
> + * Socket error queueing may exhaust the OPTMEM limit. Try
> + * flushing the error queue once.
> + */
> + if (!zerocopy_flushed_once) {
> + ret = qio_channel_socket_flush_internal(ioc, false, errp);
Passing in 'errp', so the error will be set on failure of this method...
> + if (ret < 0) {
> + error_setg_errno(errp, errno,
> + "Zerocopy flush failed");
...but we now try to overwrite the already set error. This error_setg_errno
call should be removed.
> + return -1;
> + }
> + zerocopy_flushed_once = TRUE;
> + goto retry;
> + } else {
> + error_setg_errno(errp, errno,
> + "Process can't lock enough memory for "
> + "using MSG_ZEROCOPY");
> + return -1;
> + }
> }
> break;
> }
With regards,
Daniel
--
|: https://berrange.com -o- https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o- https://fstop138.berrange.com :|
|: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2025-04-15 9:33 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-04-03 8:21 [PATCH v4] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF Manish Mishra
2025-04-14 14:26 ` Fabiano Rosas
2025-04-15 9:20 ` Manish
2025-04-15 9:26 ` Daniel P. Berrangé
2025-04-15 9:33 ` Manish
2025-04-15 9:27 ` Daniel P. Berrangé
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).