From: Nick Child <nnac123@linux.ibm.com>
To: netdev@vger.kernel.org
Cc: bjking1@linux.ibm.com, haren@linux.ibm.com, ricklind@us.ibm.com,
Nick Child <nnac123@linux.ibm.com>
Subject: [PATCH net-next 3/7] ibmvnic: Reduce memcpys in tx descriptor generation
Date: Thu, 1 Aug 2024 16:23:36 -0500 [thread overview]
Message-ID: <20240801212340.132607-4-nnac123@linux.ibm.com> (raw)
In-Reply-To: <20240801212340.132607-1-nnac123@linux.ibm.com>
Previously when creating the header descriptors, the driver would:
1. allocate a temporary buffer on the stack (in build_hdr_descs_arr)
2. memcpy the header info into the temporary buffer (in build_hdr_data)
3. memcpy the temp buffer into a local variable (in create_hdr_descs)
4. copy the local variable into the return buffer (in create_hdr_descs)
Since, there is no opportunity for errors during this process, the temp
buffer is not needed and work can be done on the return buffer directly.
Repurpose build_hdr_data() to only calculate the header lengths. Rename
it to get_hdr_lens().
Edit create_hdr_descs() to read from the skb directly and copy directly
into the returned useful buffer.
The process now involves less memory and write operations while
also being more readable.
Signed-off-by: Nick Child <nnac123@linux.ibm.com>
---
drivers/net/ethernet/ibm/ibmvnic.c | 80 +++++++++++++-----------------
1 file changed, 34 insertions(+), 46 deletions(-)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7d552d4bbe15..4fe2c8c17b05 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2150,46 +2150,38 @@ static int ibmvnic_close(struct net_device *netdev)
* Builds a buffer containing these headers. Saves individual header
* lengths and total buffer length to be used to build descriptors.
*/
-static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
- int *hdr_len, u8 *hdr_data)
+static int get_hdr_lens(u8 hdr_field, struct sk_buff *skb,
+ int *hdr_len)
{
int len = 0;
- u8 *hdr;
- if (skb->protocol == htons(ETH_P_IP)) {
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- hdr_len[2] = tcp_hdrlen(skb);
- else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
- hdr_len[2] = sizeof(struct udphdr);
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- hdr_len[2] = tcp_hdrlen(skb);
- else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
- hdr_len[2] = sizeof(struct udphdr);
- }
-
if ((hdr_field >> 6) & 1) {
hdr_len[0] = skb_mac_header_len(skb);
- hdr = skb_mac_header(skb);
- memcpy(hdr_data, hdr, hdr_len[0]);
len += hdr_len[0];
}
if ((hdr_field >> 5) & 1) {
hdr_len[1] = skb_network_header_len(skb);
- hdr = skb_network_header(skb);
- memcpy(hdr_data + len, hdr, hdr_len[1]);
len += hdr_len[1];
}
- if ((hdr_field >> 4) & 1) {
- hdr = skb_transport_header(skb);
- memcpy(hdr_data + len, hdr, hdr_len[2]);
- len += hdr_len[2];
+ if (!((hdr_field >> 4) & 1))
+ return len;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ hdr_len[2] = tcp_hdrlen(skb);
+ else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ hdr_len[2] = sizeof(struct udphdr);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ hdr_len[2] = tcp_hdrlen(skb);
+ else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ hdr_len[2] = sizeof(struct udphdr);
}
- return len;
+ return len + hdr_len[2];
}
/**
@@ -2207,7 +2199,7 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
union sub_crq *scrq_arr)
{
- union sub_crq hdr_desc;
+ union sub_crq *hdr_desc;
int tmp_len = len;
int num_descs = 0;
u8 *data, *cur;
@@ -2216,28 +2208,26 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
while (tmp_len > 0) {
cur = hdr_data + len - tmp_len;
- memset(&hdr_desc, 0, sizeof(hdr_desc));
- if (cur != hdr_data) {
- data = hdr_desc.hdr_ext.data;
+ hdr_desc = &scrq_arr[num_descs];
+ if (num_descs) {
+ data = hdr_desc->hdr_ext.data;
tmp = tmp_len > 29 ? 29 : tmp_len;
- hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
- hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
- hdr_desc.hdr_ext.len = tmp;
+ hdr_desc->hdr_ext.first = IBMVNIC_CRQ_CMD;
+ hdr_desc->hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+ hdr_desc->hdr_ext.len = tmp;
} else {
- data = hdr_desc.hdr.data;
+ data = hdr_desc->hdr.data;
tmp = tmp_len > 24 ? 24 : tmp_len;
- hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
- hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
- hdr_desc.hdr.len = tmp;
- hdr_desc.hdr.l2_len = (u8)hdr_len[0];
- hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
- hdr_desc.hdr.l4_len = (u8)hdr_len[2];
- hdr_desc.hdr.flag = hdr_field << 1;
+ hdr_desc->hdr.first = IBMVNIC_CRQ_CMD;
+ hdr_desc->hdr.type = IBMVNIC_HDR_DESC;
+ hdr_desc->hdr.len = tmp;
+ hdr_desc->hdr.l2_len = (u8)hdr_len[0];
+ hdr_desc->hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+ hdr_desc->hdr.l4_len = (u8)hdr_len[2];
+ hdr_desc->hdr.flag = hdr_field << 1;
}
memcpy(data, cur, tmp);
tmp_len -= tmp;
- *scrq_arr = hdr_desc;
- scrq_arr++;
num_descs++;
}
@@ -2260,13 +2250,11 @@ static void build_hdr_descs_arr(struct sk_buff *skb,
int *num_entries, u8 hdr_field)
{
int hdr_len[3] = {0, 0, 0};
- u8 hdr_data[140] = {0};
int tot_len;
- tot_len = build_hdr_data(hdr_field, skb, hdr_len,
- hdr_data);
- *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
- indir_arr + 1);
+ tot_len = get_hdr_lens(hdr_field, skb, hdr_len);
+ *num_entries += create_hdr_descs(hdr_field, skb_mac_header(skb),
+ tot_len, hdr_len, indir_arr + 1);
}
static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
--
2.43.0
next prev parent reply other threads:[~2024-08-01 21:23 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-01 21:23 [PATCH net-next 0/7] ibmvnic RR performance improvements Nick Child
2024-08-01 21:23 ` [PATCH net-next 1/7] ibmvnic: Only replenish rx pool when resources are getting low Nick Child
2024-08-01 21:23 ` [PATCH net-next 2/7] ibmvnic: Use header len helper functions on tx Nick Child
2024-08-01 21:23 ` Nick Child [this message]
2024-08-01 21:23 ` [PATCH net-next 4/7] ibmvnic: Remove duplicate memory barriers in tx Nick Child
2024-08-01 21:23 ` [PATCH net-next 5/7] ibmvnic: Introduce send sub-crq direct Nick Child
2024-08-01 21:23 ` [PATCH net-next 6/7] ibmvnic: Only record tx completed bytes once per handler Nick Child
2024-08-01 21:23 ` [PATCH net-next 7/7] ibmvnic: Perform tx CSO during send scrq direct Nick Child
2024-08-03 0:15 ` Jakub Kicinski
2024-08-05 13:52 ` Nick Child
2024-08-05 19:09 ` Jakub Kicinski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240801212340.132607-4-nnac123@linux.ibm.com \
--to=nnac123@linux.ibm.com \
--cc=bjking1@linux.ibm.com \
--cc=haren@linux.ibm.com \
--cc=netdev@vger.kernel.org \
--cc=ricklind@us.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).