netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jon Kohler <jon@nutanix.com>
To: "Michael S. Tsirkin" <mst@redhat.com>,
	"Jason Wang" <jasowang@redhat.com>,
	"Eugenio Pérez" <eperezma@redhat.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"David S. Miller" <davem@davemloft.net>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"John Fastabend" <john.fastabend@gmail.com>,
	kvm@vger.kernel.org, virtualization@lists.linux.dev,
	netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
	bpf@vger.kernel.org
Cc: Jon Kohler <jon@nutanix.com>
Subject: [PATCH net-next] vhost/net: align variable names with XDP terminology
Date: Wed,  7 May 2025 09:02:05 -0700	[thread overview]
Message-ID: <20250507160206.3267692-1-jon@nutanix.com> (raw)

Refactor variable names in vhost_net_build_xdp to align with XDP
terminology, enhancing code clarity and consistency. Additionally,
reorder variables to follow a reverse Christmas tree structure,
improving code organization and readability.

This change introduces no functional modifications.

Signed-off-by: Jon Kohler <jon@nutanix.com>
---
 drivers/vhost/net.c | 53 ++++++++++++++++++++++-----------------------
 1 file changed, 26 insertions(+), 27 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7cbfc7d718b3..86db8add92eb 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -665,44 +665,43 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
 	struct vhost_virtqueue *vq = &nvq->vq;
 	struct vhost_net *net = container_of(vq->dev, struct vhost_net,
 					     dev);
+	int copied, headroom, ret, sock_hlen = nvq->sock_hlen;
+	struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
 	struct socket *sock = vhost_vq_get_backend(vq);
+	size_t data_len = iov_iter_count(from);
 	struct virtio_net_hdr *gso;
-	struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
 	struct tun_xdp_hdr *hdr;
-	size_t len = iov_iter_count(from);
-	int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0;
-	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-	int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen);
-	int sock_hlen = nvq->sock_hlen;
-	void *buf;
-	int copied;
-	int ret;
+	void *hard_start;
+	u32 frame_sz;
 
-	if (unlikely(len < nvq->sock_hlen))
+	if (unlikely(data_len < sock_hlen))
 		return -EFAULT;
 
-	if (SKB_DATA_ALIGN(len + pad) +
-	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
+	headroom = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + sock_hlen +
+				  vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0);
+
+	frame_sz = SKB_HEAD_ALIGN(headroom + data_len);
+
+	if (frame_sz > PAGE_SIZE)
 		return -ENOSPC;
 
-	buflen += SKB_DATA_ALIGN(len + pad);
-	buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
-				    SMP_CACHE_BYTES);
-	if (unlikely(!buf))
+	hard_start = page_frag_alloc_align(&net->pf_cache, frame_sz,
+					   GFP_KERNEL, SMP_CACHE_BYTES);
+	if (unlikely(!hard_start))
 		return -ENOMEM;
 
-	copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso),
+	copied = copy_from_iter(hard_start + offsetof(struct tun_xdp_hdr, gso),
 				sock_hlen, from);
 	if (copied != sock_hlen) {
 		ret = -EFAULT;
 		goto err;
 	}
 
-	hdr = buf;
+	hdr = hard_start;
 	gso = &hdr->gso;
 
 	if (!sock_hlen)
-		memset(buf, 0, pad);
+		memset(hard_start, 0, headroom);
 
 	if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
 	    vhost16_to_cpu(vq, gso->csum_start) +
@@ -712,29 +711,29 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
 			       vhost16_to_cpu(vq, gso->csum_start) +
 			       vhost16_to_cpu(vq, gso->csum_offset) + 2);
 
-		if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
+		if (vhost16_to_cpu(vq, gso->hdr_len) > data_len) {
 			ret = -EINVAL;
 			goto err;
 		}
 	}
 
-	len -= sock_hlen;
-	copied = copy_from_iter(buf + pad, len, from);
-	if (copied != len) {
+	data_len -= sock_hlen;
+	copied = copy_from_iter(hard_start + headroom, data_len, from);
+	if (copied != data_len) {
 		ret = -EFAULT;
 		goto err;
 	}
 
-	xdp_init_buff(xdp, buflen, NULL);
-	xdp_prepare_buff(xdp, buf, pad, len, true);
-	hdr->buflen = buflen;
+	xdp_init_buff(xdp, frame_sz, NULL);
+	xdp_prepare_buff(xdp, hard_start, headroom, data_len, true);
+	hdr->buflen = frame_sz;
 
 	++nvq->batched_xdp;
 
 	return 0;
 
 err:
-	page_frag_free(buf);
+	page_frag_free(hard_start);
 	return ret;
 }
 
-- 
2.43.0


             reply	other threads:[~2025-05-07 15:32 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-07 16:02 Jon Kohler [this message]
2025-05-07 17:23 ` [PATCH net-next] vhost/net: align variable names with XDP terminology Willem de Bruijn
2025-05-07 17:31   ` Jon Kohler
2025-05-08 13:42     ` Willem de Bruijn
2025-05-08 13:45       ` Jon Kohler
2025-05-08 12:00 ` kernel test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250507160206.3267692-1-jon@nutanix.com \
    --to=jon@nutanix.com \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=eperezma@redhat.com \
    --cc=hawk@kernel.org \
    --cc=jasowang@redhat.com \
    --cc=john.fastabend@gmail.com \
    --cc=kuba@kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=virtualization@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).