* [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems.
@ 2026-02-27 10:15 Dipayaan Roy
2026-03-02 14:02 ` Simon Horman
` (3 more replies)
0 siblings, 4 replies; 7+ messages in thread
From: Dipayaan Roy @ 2026-02-27 10:15 UTC (permalink / raw)
To: kys, haiyangz, wei.liu, decui, andrew+netdev, davem, edumazet,
kuba, pabeni, leon, longli, kotaranov, horms, shradhagupta,
ssengar, ernis, shirazsaleem, linux-hyperv, netdev, linux-kernel,
linux-rdma, dipayanroy
On certain systems configured with 4K PAGE_SIZE, utilizing page_pool
fragments for RX buffers results in a significant throughput regression.
Profiling reveals that this regression correlates with high overhead in the
fragment allocation and reference counting paths on these specific
platforms, rendering the multi-buffer-per-page strategy counterproductive.
To mitigate this, bypass the page_pool fragment path and force a single RX
packet per page allocation when all the following conditions are met:
1. The system is configured with a 4K PAGE_SIZE.
2. A processor-specific quirk is detected via SMBIOS Type 4 data.
This approach restores expected line-rate performance by ensuring
predictable RX refill behavior on affected hardware.
There is no behavioral change for systems using larger page sizes
(16K/64K), or platforms where this processor-specific quirk do not
apply.
Signed-off-by: Dipayaan Roy <dipayanroy@linux.microsoft.com>
---
.../net/ethernet/microsoft/mana/gdma_main.c | 120 ++++++++++++++++++
drivers/net/ethernet/microsoft/mana/mana_en.c | 23 +++-
include/net/mana/gdma.h | 10 ++
3 files changed, 151 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 0055c231acf6..26bbe736a770 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -9,6 +9,7 @@
#include <linux/msi.h>
#include <linux/irqdomain.h>
#include <linux/export.h>
+#include <linux/dmi.h>
#include <net/mana/mana.h>
#include <net/mana/hw_channel.h>
@@ -1955,6 +1956,115 @@ static bool mana_is_pf(unsigned short dev_id)
return dev_id == MANA_PF_DEVICE_ID;
}
+/*
+ * Table for Processor Version strings found from SMBIOS Type 4 information,
+ * for processors that needs to force single RX buffer per page quirk for
+ * meeting line rate performance with ARM64 + 4K pages.
+ * Note: These strings are exactly matched with version fetched from SMBIOS.
+ */
+static const char * const mana_single_rxbuf_per_page_quirk_tbl[] = {
+ "Cobalt 200",
+};
+
+static const char *smbios_get_string(const struct dmi_header *hdr, u8 idx)
+{
+ const u8 *start, *end;
+ u8 i;
+
+ /* Indexing starts from 1. */
+ if (!idx)
+ return NULL;
+
+ start = (const u8 *)hdr + hdr->length;
+ end = start + SMBIOS_STR_AREA_MAX;
+
+ for (i = 1; i < idx; i++) {
+ while (start < end && *start)
+ start++;
+ if (start < end)
+ start++;
+ if (start + 1 < end && start[0] == 0 && start[1] == 0)
+ return NULL;
+ }
+
+ if (start >= end || *start == 0)
+ return NULL;
+
+ return (const char *)start;
+}
+
+/* On some systems with 4K PAGE_SIZE, page_pool RX fragments can
+ * trigger a throughput regression. Hence identify those processors
+ * from the extracted SMBIOS table and apply the quirk to forces one
+ * RX buffer per page to avoid the fragment allocation/refcounting
+ * overhead in the RX refill path for those processors only.
+ */
+static bool mana_needs_single_rxbuf_per_page(struct gdma_context *gc)
+{
+ int i = 0;
+ const char *ver = gc->processor_version;
+
+ if (!ver)
+ return false;
+
+ if (PAGE_SIZE != SZ_4K)
+ return false;
+
+ while (i < ARRAY_SIZE(mana_single_rxbuf_per_page_quirk_tbl)) {
+ if (!strcmp(ver, mana_single_rxbuf_per_page_quirk_tbl[i]))
+ return true;
+ i++;
+ }
+
+ return false;
+}
+
+static void mana_get_proc_ver_from_smbios(const struct dmi_header *hdr,
+ void *data)
+{
+ struct gdma_context *gc = data;
+ const char *ver_str;
+ u8 idx;
+
+ /* We are only looking for Type 4: Processor Information */
+ if (hdr->type != SMBIOS_TYPE_4_PROCESSOR_INFO)
+ return;
+
+ /* Ensure the record is long enough to contain the Processor Version
+ * field
+ */
+ if (hdr->length <= SMBIOS_TYPE4_PROC_VERSION_OFFSET)
+ return;
+
+ /* The 'Processor Version' string is located at index pointed by
+ * SMBIOS_TYPE4_PROC_VERSION_OFFSET. If found make a copy of it.
+ * There could be multiple Type 4 tables so read and copy the
+ * processor version found the first time.
+ */
+ idx = ((const u8 *)hdr)[SMBIOS_TYPE4_PROC_VERSION_OFFSET];
+ ver_str = smbios_get_string(hdr, idx);
+ if (ver_str && !gc->processor_version)
+ gc->processor_version = kstrdup(ver_str, GFP_KERNEL);
+}
+
+/* Check and initialize all processor optimizations/quirks here */
+static bool mana_init_processor_optimization(struct gdma_context *gc)
+{
+ bool opt_initialized = false;
+
+ gc->processor_version = NULL;
+ dmi_walk(mana_get_proc_ver_from_smbios, gc);
+ if (!gc->processor_version)
+ return false;
+
+ if (mana_needs_single_rxbuf_per_page(gc)) {
+ gc->force_full_page_rx_buffer = true;
+ opt_initialized = true;
+ }
+
+ return opt_initialized;
+}
+
static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct gdma_context *gc;
@@ -2009,6 +2119,11 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->mana_pci_debugfs = debugfs_create_dir(pci_slot_name(pdev->slot),
mana_debugfs_root);
+ if (mana_init_processor_optimization(gc))
+ dev_info(&pdev->dev,
+ "Processor specific optimization initialized on: %s\n",
+ gc->processor_version);
+
err = mana_gd_setup(pdev);
if (err)
goto unmap_bar;
@@ -2051,6 +2166,8 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
+ kfree(gc->processor_version);
+ gc->processor_version = NULL;
vfree(gc);
release_region:
pci_release_regions(pdev);
@@ -2106,6 +2223,9 @@ static void mana_gd_remove(struct pci_dev *pdev)
pci_iounmap(pdev, gc->bar0_va);
+ kfree(gc->processor_version);
+ gc->processor_version = NULL;
+
vfree(gc);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 91c418097284..a53a8921050b 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -748,6 +748,26 @@ static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
return va;
}
+static inline bool
+mana_use_single_rxbuf_per_page(struct mana_port_context *apc, u32 mtu)
+{
+ struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
+
+ /* On some systems with 4K PAGE_SIZE, page_pool RX fragments can
+ * trigger a throughput regression. Hence forces one RX buffer per page
+ * to avoid the fragment allocation/refcounting overhead in the RX
+ * refill path for those processors only.
+ */
+ if (gc->force_full_page_rx_buffer)
+ return true;
+
+ /* For xdp and jumbo frames make sure only one packet fits per page. */
+ if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc))
+ return true;
+
+ return false;
+}
+
/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
static void mana_get_rxbuf_cfg(struct mana_port_context *apc,
int mtu, u32 *datasize, u32 *alloc_size,
@@ -758,8 +778,7 @@ static void mana_get_rxbuf_cfg(struct mana_port_context *apc,
/* Calculate datasize first (consistent across all cases) */
*datasize = mtu + ETH_HLEN;
- /* For xdp and jumbo frames make sure only one packet fits per page */
- if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) {
+ if (mana_use_single_rxbuf_per_page(apc, mtu)) {
if (mana_xdp_get(apc)) {
*headroom = XDP_PACKET_HEADROOM;
*alloc_size = PAGE_SIZE;
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index a59bd4035a99..0ef2d6ac5203 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -9,6 +9,14 @@
#include "shm_channel.h"
+#define SMBIOS_STR_AREA_MAX 4096
+
+/* SMBIOS Type 4: Processor Information table */
+#define SMBIOS_TYPE_4_PROCESSOR_INFO 4
+
+/* Byte offset containing the Processor Version string number.*/
+#define SMBIOS_TYPE4_PROC_VERSION_OFFSET 0x10
+
#define GDMA_STATUS_MORE_ENTRIES 0x00000105
#define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff
@@ -436,6 +444,8 @@ struct gdma_context {
struct workqueue_struct *service_wq;
unsigned long flags;
+ u8 *processor_version;
+ bool force_full_page_rx_buffer;
};
static inline bool mana_gd_is_mana(struct gdma_dev *gd)
--
2.43.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems.
2026-02-27 10:15 [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems Dipayaan Roy
@ 2026-03-02 14:02 ` Simon Horman
2026-03-02 16:38 ` Haiyang Zhang
` (2 subsequent siblings)
3 siblings, 0 replies; 7+ messages in thread
From: Simon Horman @ 2026-03-02 14:02 UTC (permalink / raw)
To: Dipayaan Roy
Cc: kys, haiyangz, wei.liu, decui, andrew+netdev, davem, edumazet,
kuba, pabeni, leon, longli, kotaranov, shradhagupta, ssengar,
ernis, shirazsaleem, linux-hyperv, netdev, linux-kernel,
linux-rdma, dipayanroy
On Fri, Feb 27, 2026 at 02:15:12AM -0800, Dipayaan Roy wrote:
> On certain systems configured with 4K PAGE_SIZE, utilizing page_pool
> fragments for RX buffers results in a significant throughput regression.
> Profiling reveals that this regression correlates with high overhead in the
> fragment allocation and reference counting paths on these specific
> platforms, rendering the multi-buffer-per-page strategy counterproductive.
>
> To mitigate this, bypass the page_pool fragment path and force a single RX
> packet per page allocation when all the following conditions are met:
> 1. The system is configured with a 4K PAGE_SIZE.
> 2. A processor-specific quirk is detected via SMBIOS Type 4 data.
>
> This approach restores expected line-rate performance by ensuring
> predictable RX refill behavior on affected hardware.
>
> There is no behavioral change for systems using larger page sizes
> (16K/64K), or platforms where this processor-specific quirk do not
> apply.
>
> Signed-off-by: Dipayaan Roy <dipayanroy@linux.microsoft.com>
Reviewed-by: Simon Horman <horms@kernel.org>
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems.
2026-02-27 10:15 [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems Dipayaan Roy
2026-03-02 14:02 ` Simon Horman
@ 2026-03-02 16:38 ` Haiyang Zhang
2026-03-03 10:56 ` Paolo Abeni
2026-03-03 11:56 ` Paolo Abeni
3 siblings, 0 replies; 7+ messages in thread
From: Haiyang Zhang @ 2026-03-02 16:38 UTC (permalink / raw)
To: Dipayaan Roy, KY Srinivasan, wei.liu@kernel.org, Dexuan Cui,
andrew+netdev@lunn.ch, davem@davemloft.net, edumazet@google.com,
kuba@kernel.org, pabeni@redhat.com, leon@kernel.org, Long Li,
Konstantin Taranov, horms@kernel.org,
shradhagupta@linux.microsoft.com, ssengar@linux.microsoft.com,
ernis@linux.microsoft.com, Shiraz Saleem,
linux-hyperv@vger.kernel.org, netdev@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-rdma@vger.kernel.org,
Dipayaan Roy
> -----Original Message-----
> From: Dipayaan Roy <dipayanroy@linux.microsoft.com>
> Sent: Friday, February 27, 2026 5:15 AM
> To: KY Srinivasan <kys@microsoft.com>; Haiyang Zhang
> <haiyangz@microsoft.com>; wei.liu@kernel.org; Dexuan Cui
> <DECUI@microsoft.com>; andrew+netdev@lunn.ch; davem@davemloft.net;
> edumazet@google.com; kuba@kernel.org; pabeni@redhat.com; leon@kernel.org;
> Long Li <longli@microsoft.com>; Konstantin Taranov
> <kotaranov@microsoft.com>; horms@kernel.org;
> shradhagupta@linux.microsoft.com; ssengar@linux.microsoft.com;
> ernis@linux.microsoft.com; Shiraz Saleem <shirazsaleem@microsoft.com>;
> linux-hyperv@vger.kernel.org; netdev@vger.kernel.org; linux-
> kernel@vger.kernel.org; linux-rdma@vger.kernel.org; Dipayaan Roy
> <dipayanroy@microsoft.com>
> Subject: [PATCH net-next] net: mana: Force full-page RX buffers for 4K
> page size on specific systems.
>
> On certain systems configured with 4K PAGE_SIZE, utilizing page_pool
> fragments for RX buffers results in a significant throughput regression.
> Profiling reveals that this regression correlates with high overhead in
> the
> fragment allocation and reference counting paths on these specific
> platforms, rendering the multi-buffer-per-page strategy counterproductive.
>
> To mitigate this, bypass the page_pool fragment path and force a single RX
> packet per page allocation when all the following conditions are met:
> 1. The system is configured with a 4K PAGE_SIZE.
> 2. A processor-specific quirk is detected via SMBIOS Type 4 data.
>
> This approach restores expected line-rate performance by ensuring
> predictable RX refill behavior on affected hardware.
>
> There is no behavioral change for systems using larger page sizes
> (16K/64K), or platforms where this processor-specific quirk do not
> apply.
>
> Signed-off-by: Dipayaan Roy <dipayanroy@linux.microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Thanks.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems.
2026-02-27 10:15 [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems Dipayaan Roy
2026-03-02 14:02 ` Simon Horman
2026-03-02 16:38 ` Haiyang Zhang
@ 2026-03-03 10:56 ` Paolo Abeni
2026-03-06 13:12 ` Dipayaan Roy
2026-03-03 11:56 ` Paolo Abeni
3 siblings, 1 reply; 7+ messages in thread
From: Paolo Abeni @ 2026-03-03 10:56 UTC (permalink / raw)
To: Dipayaan Roy, kys, haiyangz, wei.liu, decui, andrew+netdev, davem,
edumazet, kuba, leon, longli, kotaranov, horms, shradhagupta,
ssengar, ernis, shirazsaleem, linux-hyperv, netdev, linux-kernel,
linux-rdma, dipayanroy
On 2/27/26 11:15 AM, Dipayaan Roy wrote:
> On certain systems configured with 4K PAGE_SIZE, utilizing page_pool
> fragments for RX buffers results in a significant throughput regression.
> Profiling reveals that this regression correlates with high overhead in the
> fragment allocation and reference counting paths on these specific
> platforms, rendering the multi-buffer-per-page strategy counterproductive.
>
> To mitigate this, bypass the page_pool fragment path and force a single RX
> packet per page allocation when all the following conditions are met:
> 1. The system is configured with a 4K PAGE_SIZE.
> 2. A processor-specific quirk is detected via SMBIOS Type 4 data.
>
> This approach restores expected line-rate performance by ensuring
> predictable RX refill behavior on affected hardware.
>
> There is no behavioral change for systems using larger page sizes
> (16K/64K), or platforms where this processor-specific quirk do not
> apply.
>
> Signed-off-by: Dipayaan Roy <dipayanroy@linux.microsoft.com>
> ---
> .../net/ethernet/microsoft/mana/gdma_main.c | 120 ++++++++++++++++++
> drivers/net/ethernet/microsoft/mana/mana_en.c | 23 +++-
> include/net/mana/gdma.h | 10 ++
> 3 files changed, 151 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
> index 0055c231acf6..26bbe736a770 100644
> --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
> +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
> @@ -9,6 +9,7 @@
> #include <linux/msi.h>
> #include <linux/irqdomain.h>
> #include <linux/export.h>
> +#include <linux/dmi.h>
>
> #include <net/mana/mana.h>
> #include <net/mana/hw_channel.h>
> @@ -1955,6 +1956,115 @@ static bool mana_is_pf(unsigned short dev_id)
> return dev_id == MANA_PF_DEVICE_ID;
> }
>
> +/*
> + * Table for Processor Version strings found from SMBIOS Type 4 information,
> + * for processors that needs to force single RX buffer per page quirk for
> + * meeting line rate performance with ARM64 + 4K pages.
> + * Note: These strings are exactly matched with version fetched from SMBIOS.
> + */
> +static const char * const mana_single_rxbuf_per_page_quirk_tbl[] = {
> + "Cobalt 200",
> +};
> +
> +static const char *smbios_get_string(const struct dmi_header *hdr, u8 idx)
> +{
> + const u8 *start, *end;
> + u8 i;
> +
> + /* Indexing starts from 1. */
> + if (!idx)
> + return NULL;
> +
> + start = (const u8 *)hdr + hdr->length;
> + end = start + SMBIOS_STR_AREA_MAX;
> +
> + for (i = 1; i < idx; i++) {
> + while (start < end && *start)
> + start++;
> + if (start < end)
> + start++;
> + if (start + 1 < end && start[0] == 0 && start[1] == 0)
> + return NULL;
> + }
> +
> + if (start >= end || *start == 0)
> + return NULL;
> +
> + return (const char *)start;
If I read correctly, the above sort of duplicate dmi_decode_table().
I think you are better of:
- use the mana_get_proc_ver_from_smbios() decoder to store the
SMBIOS_TYPE4_PROC_VERSION_OFFSET index into gd
- do a 2nd walk with a different decoder to fetch the string at the
specified index.
/P
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems.
2026-02-27 10:15 [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems Dipayaan Roy
` (2 preceding siblings ...)
2026-03-03 10:56 ` Paolo Abeni
@ 2026-03-03 11:56 ` Paolo Abeni
2026-03-06 13:25 ` Dipayaan Roy
3 siblings, 1 reply; 7+ messages in thread
From: Paolo Abeni @ 2026-03-03 11:56 UTC (permalink / raw)
To: Dipayaan Roy, kys, haiyangz, wei.liu, decui, andrew+netdev, davem,
edumazet, kuba, leon, longli, kotaranov, horms, shradhagupta,
ssengar, ernis, shirazsaleem, linux-hyperv, netdev, linux-kernel,
linux-rdma, dipayanroy
On 2/27/26 11:15 AM, Dipayaan Roy wrote:
> diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
> index 91c418097284..a53a8921050b 100644
> --- a/drivers/net/ethernet/microsoft/mana/mana_en.c
> +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
> @@ -748,6 +748,26 @@ static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
> return va;
> }
>
> +static inline bool
> +mana_use_single_rxbuf_per_page(struct mana_port_context *apc, u32 mtu)
> +{
I almost forgot: please avoid the 'inline' keyword in .c files. This is
function used only once, should be inlined by the compiler anyway.
> + struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
> +
> + /* On some systems with 4K PAGE_SIZE, page_pool RX fragments can
> + * trigger a throughput regression. Hence forces one RX buffer per page
> + * to avoid the fragment allocation/refcounting overhead in the RX
> + * refill path for those processors only.
> + */
> + if (gc->force_full_page_rx_buffer)
> + return true;
Side note: since you could keep the above flag up2date according to the
current mtu and xdp configuration and just test it in the data path.
/P
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems.
2026-03-03 10:56 ` Paolo Abeni
@ 2026-03-06 13:12 ` Dipayaan Roy
0 siblings, 0 replies; 7+ messages in thread
From: Dipayaan Roy @ 2026-03-06 13:12 UTC (permalink / raw)
To: Paolo Abeni
Cc: kys, haiyangz, wei.liu, decui, andrew+netdev, davem, edumazet,
kuba, leon, longli, kotaranov, horms, shradhagupta, ssengar,
ernis, shirazsaleem, linux-hyperv, netdev, linux-kernel,
linux-rdma, dipayanroy
On Tue, Mar 03, 2026 at 11:56:29AM +0100, Paolo Abeni wrote:
> On 2/27/26 11:15 AM, Dipayaan Roy wrote:
> > On certain systems configured with 4K PAGE_SIZE, utilizing page_pool
> > fragments for RX buffers results in a significant throughput regression.
> > Profiling reveals that this regression correlates with high overhead in the
> > fragment allocation and reference counting paths on these specific
> > platforms, rendering the multi-buffer-per-page strategy counterproductive.
> >
> > To mitigate this, bypass the page_pool fragment path and force a single RX
> > packet per page allocation when all the following conditions are met:
> > 1. The system is configured with a 4K PAGE_SIZE.
> > 2. A processor-specific quirk is detected via SMBIOS Type 4 data.
> >
> > This approach restores expected line-rate performance by ensuring
> > predictable RX refill behavior on affected hardware.
> >
> > There is no behavioral change for systems using larger page sizes
> > (16K/64K), or platforms where this processor-specific quirk do not
> > apply.
> >
> > Signed-off-by: Dipayaan Roy <dipayanroy@linux.microsoft.com>
> > ---
> > .../net/ethernet/microsoft/mana/gdma_main.c | 120 ++++++++++++++++++
> > drivers/net/ethernet/microsoft/mana/mana_en.c | 23 +++-
> > include/net/mana/gdma.h | 10 ++
> > 3 files changed, 151 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
> > index 0055c231acf6..26bbe736a770 100644
> > --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
> > +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
> > @@ -9,6 +9,7 @@
> > #include <linux/msi.h>
> > #include <linux/irqdomain.h>
> > #include <linux/export.h>
> > +#include <linux/dmi.h>
> >
> > #include <net/mana/mana.h>
> > #include <net/mana/hw_channel.h>
> > @@ -1955,6 +1956,115 @@ static bool mana_is_pf(unsigned short dev_id)
> > return dev_id == MANA_PF_DEVICE_ID;
> > }
> >
> > +/*
> > + * Table for Processor Version strings found from SMBIOS Type 4 information,
> > + * for processors that needs to force single RX buffer per page quirk for
> > + * meeting line rate performance with ARM64 + 4K pages.
> > + * Note: These strings are exactly matched with version fetched from SMBIOS.
> > + */
> > +static const char * const mana_single_rxbuf_per_page_quirk_tbl[] = {
> > + "Cobalt 200",
> > +};
> > +
> > +static const char *smbios_get_string(const struct dmi_header *hdr, u8 idx)
> > +{
> > + const u8 *start, *end;
> > + u8 i;
> > +
> > + /* Indexing starts from 1. */
> > + if (!idx)
> > + return NULL;
> > +
> > + start = (const u8 *)hdr + hdr->length;
> > + end = start + SMBIOS_STR_AREA_MAX;
> > +
> > + for (i = 1; i < idx; i++) {
> > + while (start < end && *start)
> > + start++;
> > + if (start < end)
> > + start++;
> > + if (start + 1 < end && start[0] == 0 && start[1] == 0)
> > + return NULL;
> > + }
> > +
> > + if (start >= end || *start == 0)
> > + return NULL;
> > +
> > + return (const char *)start;
>
> If I read correctly, the above sort of duplicate dmi_decode_table().
>
Yes, its not exported.
> I think you are better of:
> - use the mana_get_proc_ver_from_smbios() decoder to store the
> SMBIOS_TYPE4_PROC_VERSION_OFFSET index into gd
> - do a 2nd walk with a different decoder to fetch the string at the
> specified index.
Sure, will implement the 2nd walk for fetching string in v2.
>
> /P
Thank you Paolo, for the comments, and apologies in my delay in response as this week I am on-call.
I will send out v2 with the changes suggested.
Regards
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems.
2026-03-03 11:56 ` Paolo Abeni
@ 2026-03-06 13:25 ` Dipayaan Roy
0 siblings, 0 replies; 7+ messages in thread
From: Dipayaan Roy @ 2026-03-06 13:25 UTC (permalink / raw)
To: Paolo Abeni
Cc: kys, haiyangz, wei.liu, decui, andrew+netdev, davem, edumazet,
kuba, leon, longli, kotaranov, horms, shradhagupta, ssengar,
ernis, shirazsaleem, linux-hyperv, netdev, linux-kernel,
linux-rdma, dipayanroy
On Tue, Mar 03, 2026 at 12:56:35PM +0100, Paolo Abeni wrote:
> On 2/27/26 11:15 AM, Dipayaan Roy wrote:
> > diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
> > index 91c418097284..a53a8921050b 100644
> > --- a/drivers/net/ethernet/microsoft/mana/mana_en.c
> > +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
> > @@ -748,6 +748,26 @@ static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
> > return va;
> > }
> >
> > +static inline bool
> > +mana_use_single_rxbuf_per_page(struct mana_port_context *apc, u32 mtu)
> > +{
>
> I almost forgot: please avoid the 'inline' keyword in .c files. This is
> function used only once, should be inlined by the compiler anyway.
>
Ack, will remove it in v2.
> > + struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
> > +
> > + /* On some systems with 4K PAGE_SIZE, page_pool RX fragments can
> > + * trigger a throughput regression. Hence forces one RX buffer per page
> > + * to avoid the fragment allocation/refcounting overhead in the RX
> > + * refill path for those processors only.
> > + */
> > + if (gc->force_full_page_rx_buffer)
> > + return true;
>
> Side note: since you could keep the above flag up2date according to the
> current mtu and xdp configuration and just test it in the data path.
>
If not an issue, would like to keep it this way for better readability.
> /P
>
Regrads
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2026-03-06 13:25 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-27 10:15 [PATCH net-next] net: mana: Force full-page RX buffers for 4K page size on specific systems Dipayaan Roy
2026-03-02 14:02 ` Simon Horman
2026-03-02 16:38 ` Haiyang Zhang
2026-03-03 10:56 ` Paolo Abeni
2026-03-06 13:12 ` Dipayaan Roy
2026-03-03 11:56 ` Paolo Abeni
2026-03-06 13:25 ` Dipayaan Roy
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox