netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] skbuff: Improve the sending efficiency of __skb_send_sock
@ 2025-06-23  8:42 Feng Yang
  2025-06-25 18:35 ` Stanislav Fomichev
  0 siblings, 1 reply; 4+ messages in thread
From: Feng Yang @ 2025-06-23  8:42 UTC (permalink / raw)
  To: davem, edumazet, kuba, pabeni, horms, willemb, almasrymina,
	kerneljasonxing, ebiggers, asml.silence, aleksander.lobakin
  Cc: yangfeng, netdev, linux-kernel

From: Feng Yang <yangfeng@kylinos.cn>

By aggregating skb data into a bvec array for transmission, when using sockmap to forward large packets,
what previously required multiple transmissions now only needs a single transmission, which significantly enhances performance.
For small packets, the performance remains comparable to the original level.

When using sockmap for forwarding, the average latency for different packet sizes
after sending 10,000 packets is as follows:
size	old(us)		new(us)
512	56		55
1472	58		58
1600	106		79
3000	145		108
5000	182		123

Signed-off-by: Feng Yang <yangfeng@kylinos.cn>
---
 net/core/skbuff.c | 112 +++++++++++++++++++++-------------------------
 1 file changed, 52 insertions(+), 60 deletions(-)

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 85fc82f72d26..664443fc9baf 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3235,84 +3235,75 @@ typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
 			   int len, sendmsg_func sendmsg, int flags)
 {
-	unsigned int orig_len = len;
 	struct sk_buff *head = skb;
 	unsigned short fragidx;
-	int slen, ret;
+	struct msghdr msg;
+	struct bio_vec *bvec;
+	int max_vecs, ret, slen;
+	int bvec_count = 0;
+	unsigned int copied = 0;
 
-do_frag_list:
-
-	/* Deal with head data */
-	while (offset < skb_headlen(skb) && len) {
-		struct kvec kv;
-		struct msghdr msg;
-
-		slen = min_t(int, len, skb_headlen(skb) - offset);
-		kv.iov_base = skb->data + offset;
-		kv.iov_len = slen;
-		memset(&msg, 0, sizeof(msg));
-		msg.msg_flags = MSG_DONTWAIT | flags;
-
-		iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
-		ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
-				      sendmsg_unlocked, sk, &msg);
-		if (ret <= 0)
-			goto error;
+	max_vecs = skb_shinfo(skb)->nr_frags + 1; // +1 for linear data
+	if (skb_has_frag_list(skb)) {
+		struct sk_buff *frag_skb = skb_shinfo(skb)->frag_list;
 
-		offset += ret;
-		len -= ret;
+		while (frag_skb) {
+			max_vecs += skb_shinfo(frag_skb)->nr_frags + 1; // +1 for linear data
+			frag_skb = frag_skb->next;
+		}
 	}
 
-	/* All the data was skb head? */
-	if (!len)
-		goto out;
+	bvec = kcalloc(max_vecs, sizeof(struct bio_vec), GFP_KERNEL);
+	if (!bvec)
+		return -ENOMEM;
 
-	/* Make offset relative to start of frags */
-	offset -= skb_headlen(skb);
+	memset(&msg, 0, sizeof(msg));
+	msg.msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | flags;
+
+do_frag_list:
 
-	/* Find where we are in frag list */
-	for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
-		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
+	/* Deal with head data */
+	if (offset < skb_headlen(skb)) {
+		slen = min_t(int, skb_headlen(skb) - offset, len - copied);
+		struct page *page = virt_to_page(skb->data + offset);
+		unsigned int page_offset = offset_in_page(skb->data + offset);
 
-		if (offset < skb_frag_size(frag))
-			break;
+		if (!sendpage_ok(page))
+			msg.msg_flags &= ~MSG_SPLICE_PAGES;
 
-		offset -= skb_frag_size(frag);
+		bvec_set_page(&bvec[bvec_count++], page, slen, page_offset);
+		copied += slen;
+		offset += slen;
 	}
 
-	for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
-		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
+	/* Make offset relative to start of frags */
+	offset -= skb_headlen(skb);
 
-		slen = min_t(size_t, len, skb_frag_size(frag) - offset);
+	if (copied < len) {
+		for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
+			skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
+			unsigned int frag_size = skb_frag_size(frag);
 
-		while (slen) {
-			struct bio_vec bvec;
-			struct msghdr msg = {
-				.msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT |
-					     flags,
-			};
+			/* Find where we are in frag list */
+			if (offset >= frag_size) {
+				offset -= frag_size;
+				continue;
+			}
 
-			bvec_set_page(&bvec, skb_frag_page(frag), slen,
+			slen = min_t(size_t, frag_size - offset, len - copied);
+			bvec_set_page(&bvec[bvec_count++], skb_frag_page(frag), slen,
 				      skb_frag_off(frag) + offset);
-			iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1,
-				      slen);
 
-			ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
-					      sendmsg_unlocked, sk, &msg);
-			if (ret <= 0)
-				goto error;
+			copied += slen;
+			offset = 0;
 
-			len -= ret;
-			offset += ret;
-			slen -= ret;
+			if (copied >= len)
+				break;
 		}
-
-		offset = 0;
 	}
 
-	if (len) {
+	if (copied < len) {
 		/* Process any frag lists */
-
 		if (skb == head) {
 			if (skb_has_frag_list(skb)) {
 				skb = skb_shinfo(skb)->frag_list;
@@ -3324,11 +3315,12 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
 		}
 	}
 
-out:
-	return orig_len - len;
+	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, bvec, bvec_count, len);
+	ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, sendmsg_unlocked, sk, &msg);
+
+	kfree(bvec);
 
-error:
-	return orig_len == len ? ret : orig_len - len;
+	return ret;
 }
 
 /* Send skb data on a socket. Socket must be locked. */
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] skbuff: Improve the sending efficiency of __skb_send_sock
  2025-06-23  8:42 [PATCH] skbuff: Improve the sending efficiency of __skb_send_sock Feng Yang
@ 2025-06-25 18:35 ` Stanislav Fomichev
  2025-06-26  7:50   ` Feng Yang
  0 siblings, 1 reply; 4+ messages in thread
From: Stanislav Fomichev @ 2025-06-25 18:35 UTC (permalink / raw)
  To: Feng Yang
  Cc: davem, edumazet, kuba, pabeni, horms, willemb, almasrymina,
	kerneljasonxing, ebiggers, asml.silence, aleksander.lobakin,
	yangfeng, netdev, linux-kernel

On 06/23, Feng Yang wrote:
> From: Feng Yang <yangfeng@kylinos.cn>
> 
> By aggregating skb data into a bvec array for transmission, when using sockmap to forward large packets,
> what previously required multiple transmissions now only needs a single transmission, which significantly enhances performance.
> For small packets, the performance remains comparable to the original level.
> 
> When using sockmap for forwarding, the average latency for different packet sizes
> after sending 10,000 packets is as follows:
> size	old(us)		new(us)
> 512	56		55
> 1472	58		58
> 1600	106		79
> 3000	145		108
> 5000	182		123
> 
> Signed-off-by: Feng Yang <yangfeng@kylinos.cn>
> ---
>  net/core/skbuff.c | 112 +++++++++++++++++++++-------------------------
>  1 file changed, 52 insertions(+), 60 deletions(-)
> 
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index 85fc82f72d26..664443fc9baf 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -3235,84 +3235,75 @@ typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
>  static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
>  			   int len, sendmsg_func sendmsg, int flags)
>  {
> -	unsigned int orig_len = len;
>  	struct sk_buff *head = skb;
>  	unsigned short fragidx;
> -	int slen, ret;
> +	struct msghdr msg;
> +	struct bio_vec *bvec;
> +	int max_vecs, ret, slen;
> +	int bvec_count = 0;
> +	unsigned int copied = 0;
>  
> -do_frag_list:
> -
> -	/* Deal with head data */
> -	while (offset < skb_headlen(skb) && len) {
> -		struct kvec kv;
> -		struct msghdr msg;
> -
> -		slen = min_t(int, len, skb_headlen(skb) - offset);
> -		kv.iov_base = skb->data + offset;
> -		kv.iov_len = slen;
> -		memset(&msg, 0, sizeof(msg));
> -		msg.msg_flags = MSG_DONTWAIT | flags;
> -
> -		iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
> -		ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
> -				      sendmsg_unlocked, sk, &msg);
> -		if (ret <= 0)
> -			goto error;
> +	max_vecs = skb_shinfo(skb)->nr_frags + 1; // +1 for linear data
> +	if (skb_has_frag_list(skb)) {
> +		struct sk_buff *frag_skb = skb_shinfo(skb)->frag_list;
>  
> -		offset += ret;
> -		len -= ret;
> +		while (frag_skb) {
> +			max_vecs += skb_shinfo(frag_skb)->nr_frags + 1; // +1 for linear data
> +			frag_skb = frag_skb->next;
> +		}
>  	}
>  
> -	/* All the data was skb head? */
> -	if (!len)
> -		goto out;
> +	bvec = kcalloc(max_vecs, sizeof(struct bio_vec), GFP_KERNEL);
> +	if (!bvec)
> +		return -ENOMEM;

Not sure allocating memory here is a good idea. From what I can tell
this function is used by non-sockmap callers as well..

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] skbuff: Improve the sending efficiency of __skb_send_sock
  2025-06-25 18:35 ` Stanislav Fomichev
@ 2025-06-26  7:50   ` Feng Yang
  2025-06-26  8:31     ` Paolo Abeni
  0 siblings, 1 reply; 4+ messages in thread
From: Feng Yang @ 2025-06-26  7:50 UTC (permalink / raw)
  To: stfomichev
  Cc: aleksander.lobakin, almasrymina, asml.silence, davem, ebiggers,
	edumazet, horms, kerneljasonxing, kuba, linux-kernel, netdev,
	pabeni, willemb, yangfeng59949, yangfeng

On Wed, 25 Jun 2025 11:35:55 -0700, Stanislav Fomichev <stfomichev@gmail.com> wrote:

> On 06/23, Feng Yang wrote:
> > From: Feng Yang <yangfeng@kylinos.cn>
> > 
> > By aggregating skb data into a bvec array for transmission, when using sockmap to forward large packets,
> > what previously required multiple transmissions now only needs a single transmission, which significantly enhances performance.
> > For small packets, the performance remains comparable to the original level.
> > 
> > When using sockmap for forwarding, the average latency for different packet sizes
> > after sending 10,000 packets is as follows:
> > size	old(us)		new(us)
> > 512	56		55
> > 1472	58		58
> > 1600	106		79
> > 3000	145		108
> > 5000	182		123
> > 
> > Signed-off-by: Feng Yang <yangfeng@kylinos.cn>
> > ---
> >  net/core/skbuff.c | 112 +++++++++++++++++++++-------------------------
> >  1 file changed, 52 insertions(+), 60 deletions(-)
> > 
> > diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> > index 85fc82f72d26..664443fc9baf 100644
> > --- a/net/core/skbuff.c
> > +++ b/net/core/skbuff.c
> > @@ -3235,84 +3235,75 @@ typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
> >  static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
> >  			   int len, sendmsg_func sendmsg, int flags)
> >  {
> > -	unsigned int orig_len = len;
> >  	struct sk_buff *head = skb;
> >  	unsigned short fragidx;
> > -	int slen, ret;
> > +	struct msghdr msg;
> > +	struct bio_vec *bvec;
> > +	int max_vecs, ret, slen;
> > +	int bvec_count = 0;
> > +	unsigned int copied = 0;
> >  
> > -do_frag_list:
> > -
> > -	/* Deal with head data */
> > -	while (offset < skb_headlen(skb) && len) {
> > -		struct kvec kv;
> > -		struct msghdr msg;
> > -
> > -		slen = min_t(int, len, skb_headlen(skb) - offset);
> > -		kv.iov_base = skb->data + offset;
> > -		kv.iov_len = slen;
> > -		memset(&msg, 0, sizeof(msg));
> > -		msg.msg_flags = MSG_DONTWAIT | flags;
> > -
> > -		iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
> > -		ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
> > -				      sendmsg_unlocked, sk, &msg);
> > -		if (ret <= 0)
> > -			goto error;
> > +	max_vecs = skb_shinfo(skb)->nr_frags + 1; // +1 for linear data
> > +	if (skb_has_frag_list(skb)) {
> > +		struct sk_buff *frag_skb = skb_shinfo(skb)->frag_list;
> >  
> > -		offset += ret;
> > -		len -= ret;
> > +		while (frag_skb) {
> > +			max_vecs += skb_shinfo(frag_skb)->nr_frags + 1; // +1 for linear data
> > +			frag_skb = frag_skb->next;
> > +		}
> >  	}
> >  
> > -	/* All the data was skb head? */
> > -	if (!len)
> > -		goto out;
> > +	bvec = kcalloc(max_vecs, sizeof(struct bio_vec), GFP_KERNEL);
> > +	if (!bvec)
> > +		return -ENOMEM;
> 
> Not sure allocating memory here is a good idea. From what I can tell
> this function is used by non-sockmap callers as well..

Alternatively, we can use struct bio_vec bvec[size] to avoid memory allocation.
Even if the "size" is insufficient, the unsent portion will be transmitted in the next call to `__skb_send_sock`.

Here we just merge them and send together. The other invocations of this function should still be able to send normally.


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] skbuff: Improve the sending efficiency of __skb_send_sock
  2025-06-26  7:50   ` Feng Yang
@ 2025-06-26  8:31     ` Paolo Abeni
  0 siblings, 0 replies; 4+ messages in thread
From: Paolo Abeni @ 2025-06-26  8:31 UTC (permalink / raw)
  To: Feng Yang, stfomichev
  Cc: aleksander.lobakin, almasrymina, asml.silence, davem, ebiggers,
	edumazet, horms, kerneljasonxing, kuba, linux-kernel, netdev,
	willemb, yangfeng

On 6/26/25 9:50 AM, Feng Yang wrote:
> On Wed, 25 Jun 2025 11:35:55 -0700, Stanislav Fomichev <stfomichev@gmail.com> wrote:
>> On 06/23, Feng Yang wrote:
>>> From: Feng Yang <yangfeng@kylinos.cn>
>>>
>>> By aggregating skb data into a bvec array for transmission, when using sockmap to forward large packets,
>>> what previously required multiple transmissions now only needs a single transmission, which significantly enhances performance.
>>> For small packets, the performance remains comparable to the original level.
>>>
>>> When using sockmap for forwarding, the average latency for different packet sizes
>>> after sending 10,000 packets is as follows:
>>> size	old(us)		new(us)
>>> 512	56		55
>>> 1472	58		58
>>> 1600	106		79
>>> 3000	145		108
>>> 5000	182		123
>>>
>>> Signed-off-by: Feng Yang <yangfeng@kylinos.cn>
>>> ---
>>>  net/core/skbuff.c | 112 +++++++++++++++++++++-------------------------
>>>  1 file changed, 52 insertions(+), 60 deletions(-)
>>>
>>> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
>>> index 85fc82f72d26..664443fc9baf 100644
>>> --- a/net/core/skbuff.c
>>> +++ b/net/core/skbuff.c
>>> @@ -3235,84 +3235,75 @@ typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
>>>  static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
>>>  			   int len, sendmsg_func sendmsg, int flags)
>>>  {
>>> -	unsigned int orig_len = len;
>>>  	struct sk_buff *head = skb;
>>>  	unsigned short fragidx;
>>> -	int slen, ret;
>>> +	struct msghdr msg;
>>> +	struct bio_vec *bvec;
>>> +	int max_vecs, ret, slen;
>>> +	int bvec_count = 0;
>>> +	unsigned int copied = 0;
>>>  
>>> -do_frag_list:
>>> -
>>> -	/* Deal with head data */
>>> -	while (offset < skb_headlen(skb) && len) {
>>> -		struct kvec kv;
>>> -		struct msghdr msg;
>>> -
>>> -		slen = min_t(int, len, skb_headlen(skb) - offset);
>>> -		kv.iov_base = skb->data + offset;
>>> -		kv.iov_len = slen;
>>> -		memset(&msg, 0, sizeof(msg));
>>> -		msg.msg_flags = MSG_DONTWAIT | flags;
>>> -
>>> -		iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
>>> -		ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
>>> -				      sendmsg_unlocked, sk, &msg);
>>> -		if (ret <= 0)
>>> -			goto error;
>>> +	max_vecs = skb_shinfo(skb)->nr_frags + 1; // +1 for linear data
>>> +	if (skb_has_frag_list(skb)) {
>>> +		struct sk_buff *frag_skb = skb_shinfo(skb)->frag_list;
>>>  
>>> -		offset += ret;
>>> -		len -= ret;
>>> +		while (frag_skb) {
>>> +			max_vecs += skb_shinfo(frag_skb)->nr_frags + 1; // +1 for linear data
>>> +			frag_skb = frag_skb->next;
>>> +		}
>>>  	}
>>>  
>>> -	/* All the data was skb head? */
>>> -	if (!len)
>>> -		goto out;
>>> +	bvec = kcalloc(max_vecs, sizeof(struct bio_vec), GFP_KERNEL);
>>> +	if (!bvec)
>>> +		return -ENOMEM;
>>
>> Not sure allocating memory here is a good idea. From what I can tell
>> this function is used by non-sockmap callers as well..

Adding a per packet allocation and a free is IMHO a no-go for a patch
intended to improve performances.

> Alternatively, we can use struct bio_vec bvec[size] to avoid memory allocation.

If you mean using a fixed size bio vec allocated on the stack, that
could work...

> Even if the "size" is insufficient, the unsent portion will be transmitted in the next call to `__skb_send_sock`.

... but I think this part is not acceptable, the callers may/should
already assume that partial transmissions are due to errors.

Instead I think you should loop, batching bio_vec_size tx each loop.

Side note: the patch has a few style issues:
- it should not use // for comments
- variable declaration should respect the reverse christmas tree order

and possibly you could use this refactoring to avoid the use backward
goto statement.

Thanks,

Paolo


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2025-06-26  8:31 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-23  8:42 [PATCH] skbuff: Improve the sending efficiency of __skb_send_sock Feng Yang
2025-06-25 18:35 ` Stanislav Fomichev
2025-06-26  7:50   ` Feng Yang
2025-06-26  8:31     ` Paolo Abeni

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).