public inbox for linux-nfs@vger.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Chuck Lever <chuck.lever@oracle.com>,
	Jeff Layton <jlayton@kernel.org>,
	Trond Myklebust <trondmy@kernel.org>,
	Anna Schumaker <anna@kernel.org>
Cc: NeilBrown <neil@brown.name>,
	Olga Kornievskaia <okorniev@redhat.com>,
	Dai Ngo <Dai.Ngo@oracle.com>, Tom Talpey <tom@talpey.com>,
	linux-nfs@vger.kernel.org
Subject: [PATCH 2/3] sunrpc: simplify xdr_partial_copy_from_skb
Date: Thu, 15 May 2025 13:48:46 +0200	[thread overview]
Message-ID: <20250515114906.32559-3-hch@lst.de> (raw)
In-Reply-To: <20250515114906.32559-1-hch@lst.de>

csum_partial_copy_to_xdr can handle a checksumming and non-checksumming
case and implements this using a callback, which leads to a lot of
boilerplate code and indirect calls in the fast path.

Switch to storing a need_checksum flag in struct xdr_skb_reader instead
to remove the indirect call and simplify the code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 net/sunrpc/socklib.c | 163 ++++++++++++++++---------------------------
 1 file changed, 59 insertions(+), 104 deletions(-)

diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 1b2b84feeec6..75e31e3a3ced 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -27,97 +27,60 @@
 struct xdr_skb_reader {
 	struct sk_buff	*skb;
 	unsigned int	offset;
+	bool		need_checksum;
 	size_t		count;
 	__wsum		csum;
 };
 
-typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to,
-				     size_t len);
-
 /**
  * xdr_skb_read_bits - copy some data bits from skb to internal buffer
  * @desc: sk_buff copy helper
  * @to: copy destination
  * @len: number of bytes to copy
  *
- * Possibly called several times to iterate over an sk_buff and copy
- * data out of it.
+ * Possibly called several times to iterate over an sk_buff and copy data out of
+ * it.
  */
 static size_t
 xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
 {
-	if (len > desc->count)
-		len = desc->count;
-	if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
-		return 0;
-	desc->count -= len;
-	desc->offset += len;
-	return len;
-}
+	len = min(len, desc->count);
+
+	if (desc->need_checksum) {
+		__wsum csum;
+
+		csum = skb_copy_and_csum_bits(desc->skb, desc->offset, to, len);
+		desc->csum = csum_block_add(desc->csum, csum, desc->offset);
+	} else {
+		if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
+			return 0;
+	}
 
-/**
- * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
- * @desc: sk_buff copy helper
- * @to: copy destination
- * @len: number of bytes to copy
- *
- * Same as skb_read_bits, but calculate a checksum at the same time.
- */
-static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
-{
-	unsigned int pos;
-	__wsum csum2;
-
-	if (len > desc->count)
-		len = desc->count;
-	pos = desc->offset;
-	csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len);
-	desc->csum = csum_block_add(desc->csum, csum2, pos);
 	desc->count -= len;
 	desc->offset += len;
 	return len;
 }
 
-/**
- * xdr_partial_copy_from_skb - copy data out of an skb
- * @xdr: target XDR buffer
- * @base: starting offset
- * @desc: sk_buff copy helper
- * @copy_actor: virtual method for copying data
- *
- */
 static ssize_t
-xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
+xdr_partial_copy_from_skb(struct xdr_buf *xdr, struct xdr_skb_reader *desc)
 {
-	struct page	**ppage = xdr->pages;
-	unsigned int	len, pglen = xdr->page_len;
-	ssize_t		copied = 0;
-	size_t		ret;
-
-	len = xdr->head[0].iov_len;
-	if (base < len) {
-		len -= base;
-		ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
-		copied += ret;
-		if (ret != len || !desc->count)
-			goto out;
-		base = 0;
-	} else
-		base -= len;
-
-	if (unlikely(pglen == 0))
-		goto copy_tail;
-	if (unlikely(base >= pglen)) {
-		base -= pglen;
-		goto copy_tail;
-	}
-	if (base || xdr->page_base) {
-		pglen -= base;
-		base += xdr->page_base;
-		ppage += base >> PAGE_SHIFT;
-		base &= ~PAGE_MASK;
-	}
-	do {
+	struct page **ppage = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
+	unsigned int poff = xdr->page_base & ~PAGE_MASK;
+	unsigned int pglen = xdr->page_len;
+	ssize_t copied = 0;
+	size_t ret;
+
+	if (xdr->head[0].iov_len == 0)
+		return 0;
+
+	ret = xdr_skb_read_bits(desc, xdr->head[0].iov_base,
+			xdr->head[0].iov_len);
+	if (ret != xdr->head[0].iov_len || !desc->count)
+		return ret;
+	copied += ret;
+
+	while (pglen) {
+		unsigned int len = min(PAGE_SIZE - poff, pglen);
 		char *kaddr;
 
 		/* ACL likes to be lazy in allocating pages - ACLs
@@ -126,36 +89,29 @@ xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb
 			*ppage = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
 			if (unlikely(*ppage == NULL)) {
 				if (copied == 0)
-					copied = -ENOMEM;
-				goto out;
+					return -ENOMEM;
+				return copied;
 			}
 		}
 
-		len = PAGE_SIZE;
 		kaddr = kmap_atomic(*ppage);
-		if (base) {
-			len -= base;
-			if (pglen < len)
-				len = pglen;
-			ret = copy_actor(desc, kaddr + base, len);
-			base = 0;
-		} else {
-			if (pglen < len)
-				len = pglen;
-			ret = copy_actor(desc, kaddr, len);
-		}
+		ret = xdr_skb_read_bits(desc, kaddr + poff, len);
 		flush_dcache_page(*ppage);
 		kunmap_atomic(kaddr);
+
 		copied += ret;
 		if (ret != len || !desc->count)
-			goto out;
+			return copied;
 		ppage++;
-	} while ((pglen -= len) != 0);
-copy_tail:
-	len = xdr->tail[0].iov_len;
-	if (base < len)
-		copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
-out:
+		pglen -= len;
+		poff = 0;
+	}
+
+	if (xdr->tail[0].iov_len) {
+		copied += xdr_skb_read_bits(desc, xdr->tail[0].iov_base,
+					xdr->tail[0].iov_len);
+	}
+
 	return copied;
 }
 
@@ -169,17 +125,22 @@ xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb
  */
 int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
 {
-	struct xdr_skb_reader	desc;
-
-	desc.skb = skb;
-	desc.offset = 0;
-	desc.count = skb->len - desc.offset;
+	struct xdr_skb_reader desc = {
+		.skb		= skb,
+		.count		= skb->len - desc.offset,
+	};
 
-	if (skb_csum_unnecessary(skb))
-		goto no_checksum;
+	if (skb_csum_unnecessary(skb)) {
+		if (xdr_partial_copy_from_skb(xdr, &desc) < 0)
+			return -1;
+		if (desc.count)
+			return -1;
+		return 0;
+	}
 
+	desc.need_checksum = true;
 	desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
-	if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
+	if (xdr_partial_copy_from_skb(xdr, &desc) < 0)
 		return -1;
 	if (desc.offset != skb->len) {
 		__wsum csum2;
@@ -194,12 +155,6 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
 	    !skb->csum_complete_sw)
 		netdev_rx_csum_fault(skb->dev, skb);
 	return 0;
-no_checksum:
-	if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
-		return -1;
-	if (desc.count)
-		return -1;
-	return 0;
 }
 EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);
 
-- 
2.47.2


  parent reply	other threads:[~2025-05-15 11:49 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-15 11:48 small sunrpc cleanups v2 Christoph Hellwig
2025-05-15 11:48 ` [PATCH 1/3] sunrpc: simplify xdr_init_encode_pages Christoph Hellwig
2025-05-15 11:48 ` Christoph Hellwig [this message]
2025-05-15 15:45   ` [PATCH 2/3] sunrpc: simplify xdr_partial_copy_from_skb Jeff Layton
2025-05-15 11:48 ` [PATCH 3/3] sunrpc: unexport csum_partial_copy_to_xdr Christoph Hellwig
2025-05-15 19:03 ` small sunrpc cleanups v2 Chuck Lever
  -- strict thread matches above, loose matches on Subject: below --
2025-05-13  8:57 small sunrpc cleanups Christoph Hellwig
2025-05-13  8:57 ` [PATCH 2/3] sunrpc: simplify xdr_partial_copy_from_skb Christoph Hellwig
2025-05-13 12:15   ` Jeff Layton
2025-05-14  5:05     ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250515114906.32559-3-hch@lst.de \
    --to=hch@lst.de \
    --cc=Dai.Ngo@oracle.com \
    --cc=anna@kernel.org \
    --cc=chuck.lever@oracle.com \
    --cc=jlayton@kernel.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=neil@brown.name \
    --cc=okorniev@redhat.com \
    --cc=tom@talpey.com \
    --cc=trondmy@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox