From: "Nikhil P. Rao" <nikhil.rao@amd.com>
To: Brett Creeley <brett.creeley@amd.com>,
Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Kees Cook <kees@kernel.org>,
"Gustavo A. R. Silva" <gustavoars@kernel.org>
Cc: <netdev@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
<linux-hardening@vger.kernel.org>,
"Nikhil P. Rao" <nikhil.rao@amd.com>, <eric.joyner@amd.com>,
Vamsi Atluri <Vamsi.Atluri@amd.com>
Subject: [PATCH 5/6] pds_core: add host backed memory support for firmware
Date: Wed, 29 Apr 2026 07:58:09 +0000 [thread overview]
Message-ID: <20260429-b4-pldm-b4-v1-5-e43b6c92e46c@amd.com> (raw)
In-Reply-To: <20260429-b4-pldm-b4-v1-0-e43b6c92e46c@amd.com>
From: Vamsi Atluri <Vamsi.Atluri@amd.com>
Some newer AMD/Pensando cards have minimal memory and there are cases
where components, specifically in the control plane, need more memory.
This series adds support for host backed DMA memory that can be used
by the firmware for the previously mentioned cases.
Assisted-by: Claude:claude-opus-4.6
Signed-off-by: Vamsi Atluri <Vamsi.Atluri@amd.com>
Signed-off-by: Nikhil P. Rao <nikhil.rao@amd.com>
---
drivers/net/ethernet/amd/pds_core/core.c | 166 +++++++++++++++++++++++++++++++
drivers/net/ethernet/amd/pds_core/core.h | 19 ++++
drivers/net/ethernet/amd/pds_core/main.c | 1 +
include/linux/pds/pds_adminq.h | 132 ++++++++++++++++++++++++
include/linux/pds/pds_core_if.h | 2 +
5 files changed, 320 insertions(+)
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 705cab7b0727..e94fea06c6cc 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -487,6 +487,7 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
pdsc->viftype_status = NULL;
}
+ pdsc_host_mem_free(pdsc);
pdsc_dev_uninit(pdsc);
set_bit(PDSC_S_FW_DEAD, &pdsc->state);
@@ -496,6 +497,7 @@ int pdsc_start(struct pdsc *pdsc)
{
pds_core_intr_mask(&pdsc->intr_ctrl[pdsc->adminqcq.intx],
PDS_CORE_INTR_MASK_CLEAR);
+ pdsc_host_mem_add(pdsc);
return 0;
}
@@ -658,3 +660,167 @@ void pdsc_health_thread(struct work_struct *work)
out_unlock:
mutex_unlock(&pdsc->config_lock);
}
+
+static void pdsc_host_mem_del_one(struct pdsc *pdsc, u16 tag, u8 reason)
+{
+ union pds_core_adminq_comp comp = {};
+ union pds_core_adminq_cmd cmd = {
+ .mem_del.opcode = PDS_AQ_CMD_MEM_DEL,
+ .mem_del.tag = cpu_to_le16(tag),
+ .mem_del.reason = reason,
+ };
+
+ dev_dbg(pdsc->dev, "Sending aq cmd for mem del tag %d\n", tag);
+ pdsc_adminq_post(pdsc, &cmd, &comp, false);
+}
+
+static int pdsc_host_mem_add_one(struct pdsc *pdsc, int index)
+{
+ struct pdsc_host_mem *hm = &pdsc->host_mem_reqs[index];
+ union pds_core_adminq_comp comp = {};
+ union pds_core_adminq_cmd cmd = {};
+ int err;
+
+ memset(hm, 0, sizeof(*hm));
+ cmd.mem_query.opcode = PDS_AQ_CMD_MEM_QUERY;
+ dev_dbg(pdsc->dev, "Sending aq cmd for mem query index %d\n", index);
+ err = pdsc_adminq_post(pdsc, &cmd, &comp, false);
+ if (err || comp.status != PDS_RC_SUCCESS) {
+ dev_err(pdsc->dev, "mem query failed err %d status %d\n",
+ err, comp.status);
+ return err ? err : -EIO;
+ }
+ hm->size = le32_to_cpu(comp.mem_query.size);
+ hm->tag = le16_to_cpu(comp.mem_query.tag);
+ dev_dbg(pdsc->dev, "mem query returned size %d tag %d\n",
+ hm->size, hm->tag);
+
+ if (!hm->size || hm->size > PDSC_HOST_MEM_MAX_CONTIG) {
+ dev_err(pdsc->dev, "invalid size %d for tag %d\n",
+ hm->size, hm->tag);
+ err = -EINVAL;
+ goto err_del;
+ }
+
+ hm->order = max(ilog2(hm->size), PAGE_SHIFT) - PAGE_SHIFT;
+ hm->pg = alloc_pages(GFP_KERNEL, hm->order);
+ if (!hm->pg) {
+ dev_err(pdsc->dev, "alloc order %d failed for tag %d\n",
+ hm->order, hm->tag);
+ err = -ENOMEM;
+ goto err_del;
+ }
+
+ hm->pa = dma_map_page(pdsc->dev, hm->pg, 0, hm->size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pdsc->dev, hm->pa)) {
+ dev_err(pdsc->dev, "dma map failed for tag %d size %d\n",
+ hm->tag, hm->size);
+ err = -EIO;
+ goto err_del;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&comp, 0, sizeof(comp));
+ cmd.mem_add.opcode = PDS_AQ_CMD_MEM_ADD;
+ cmd.mem_add.tag = cpu_to_le16(hm->tag);
+ cmd.mem_add.size = cpu_to_le32(hm->size);
+ cmd.mem_add.buf_pa = cpu_to_le64(hm->pa);
+
+ dev_dbg(pdsc->dev, "Sending aq cmd for mem add tag %d size %d pa 0x%llx\n",
+ hm->tag, hm->size, hm->pa);
+ err = pdsc_adminq_post(pdsc, &cmd, &comp, false);
+ if (err || comp.status != PDS_RC_SUCCESS) {
+ dev_err(pdsc->dev, "mem add failed err %d status %d for tag %d\n",
+ err, comp.status, hm->tag);
+ err = err ? err : -EIO;
+ goto err_del;
+ }
+ dev_dbg(pdsc->dev, "mem add completed for tag %d\n", hm->tag);
+
+ return 0;
+
+err_del:
+ /* After MEM_QUERY succeeds, firmware expects MEM_ADD or MEM_DEL */
+ pdsc_host_mem_del_one(pdsc, hm->tag, PDS_RC_ENOMEM);
+ if (hm->pg) {
+ if (!dma_mapping_error(pdsc->dev, hm->pa))
+ dma_unmap_page(pdsc->dev, hm->pa, hm->size, DMA_BIDIRECTIONAL);
+ __free_pages(hm->pg, hm->order);
+ hm->pg = NULL;
+ }
+ return err;
+}
+
+void pdsc_host_mem_add(struct pdsc *pdsc)
+{
+ union pds_core_adminq_comp comp = {};
+ union pds_core_adminq_cmd cmd = {};
+ u16 count;
+ int err;
+ int i;
+
+ if (!(pdsc->dev_ident.capabilities & cpu_to_le64(PDS_CORE_DEV_CAP_HOST_MEM)))
+ return;
+
+ cmd.mem_get_count.opcode = PDS_AQ_CMD_MEM_GET_COUNT;
+ cmd.mem_get_count.max_contig = cpu_to_le32(PDSC_HOST_MEM_MAX_CONTIG);
+ dev_dbg(pdsc->dev, "Sending aq cmd for mem get count max_contig %lu\n",
+ PDSC_HOST_MEM_MAX_CONTIG);
+ err = pdsc_adminq_post(pdsc, &cmd, &comp, false);
+ if (err || comp.status != PDS_RC_SUCCESS) {
+ dev_err(pdsc->dev, "mem get count failed err %d status %d\n",
+ err, comp.status);
+ return;
+ }
+
+ count = le16_to_cpu(comp.mem_get_count.count);
+ dev_dbg(pdsc->dev, "mem get count returned count %d\n", count);
+ if (count == 0)
+ return;
+
+ pdsc->host_mem_reqs = kzalloc_objs(*pdsc->host_mem_reqs, count,
+ GFP_KERNEL);
+ if (!pdsc->host_mem_reqs) {
+ dev_err(pdsc->dev, "failed to alloc host_mem_reqs array\n");
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = pdsc_host_mem_add_one(pdsc, i);
+ if (err)
+ break;
+ }
+
+ pdsc->num_host_mem_reqs = i;
+}
+
+void pdsc_host_mem_del(struct pdsc *pdsc)
+{
+ int i;
+
+ if (!pdsc->host_mem_reqs)
+ return;
+
+ for (i = 0; i < pdsc->num_host_mem_reqs; i++)
+ pdsc_host_mem_del_one(pdsc, pdsc->host_mem_reqs[i].tag,
+ PDS_RC_SUCCESS);
+}
+
+void pdsc_host_mem_free(struct pdsc *pdsc)
+{
+ int i;
+
+ if (!pdsc->host_mem_reqs)
+ return;
+
+ for (i = 0; i < pdsc->num_host_mem_reqs; i++) {
+ dma_unmap_page(pdsc->dev, pdsc->host_mem_reqs[i].pa,
+ pdsc->host_mem_reqs[i].size,
+ DMA_BIDIRECTIONAL);
+ __free_pages(pdsc->host_mem_reqs[i].pg, pdsc->host_mem_reqs[i].order);
+ }
+
+ kfree(pdsc->host_mem_reqs);
+ pdsc->host_mem_reqs = NULL;
+ pdsc->num_host_mem_reqs = 0;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index c9ba63878927..e53edf72a5d5 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -5,6 +5,7 @@
#define _PDSC_H_
#include <linux/debugfs.h>
+#include <linux/mmzone.h>
#include <net/devlink.h>
#include <linux/pds/pds_common.h>
@@ -23,6 +24,8 @@
#define PDSC_SETUP_RECOVERY false
#define PDSC_SETUP_INIT true
+#define PDSC_HOST_MEM_MAX_CONTIG ((PAGE_SIZE) << (MAX_PAGE_ORDER))
+
struct pdsc_dev_bar {
void __iomem *vaddr;
phys_addr_t bus_addr;
@@ -141,6 +144,14 @@ struct pdsc_viftype {
struct pds_auxiliary_dev *padev;
};
+struct pdsc_host_mem {
+ u32 size;
+ u16 tag;
+ u8 order;
+ struct page *pg;
+ dma_addr_t pa;
+};
+
/* No state flags set means we are in a steady running state */
enum pdsc_state_flags {
PDSC_S_FW_DEAD, /* stopped, wait on startup or recovery */
@@ -200,6 +211,9 @@ struct pdsc {
struct pdsc_viftype *viftype_status;
struct work_struct pci_reset_work;
+ struct pdsc_host_mem *host_mem_reqs;
+ u16 num_host_mem_reqs;
+
struct pds_core_component_list_info fw_components;
};
@@ -277,6 +291,7 @@ void pdsc_debugfs_add_viftype(struct pdsc *pdsc);
void pdsc_debugfs_add_irqs(struct pdsc *pdsc);
void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq);
void pdsc_debugfs_del_qcq(struct pdsc_qcq *qcq);
+void pdsc_debugfs_add_host_mem(struct pdsc *pdsc);
int pdsc_err_to_errno(enum pds_core_status_code code);
bool pdsc_is_fw_running(struct pdsc *pdsc);
@@ -334,4 +349,8 @@ void pdsc_fw_down(struct pdsc *pdsc);
void pdsc_fw_up(struct pdsc *pdsc);
void pdsc_pci_reset_thread(struct work_struct *work);
+void pdsc_host_mem_add(struct pdsc *pdsc);
+void pdsc_host_mem_del(struct pdsc *pdsc);
+void pdsc_host_mem_free(struct pdsc *pdsc);
+
#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index f0d0993f9d91..0a0542bf7cbb 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -437,6 +437,7 @@ static void pdsc_remove(struct pci_dev *pdev)
pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
timer_shutdown_sync(&pdsc->wdtimer);
+ pdsc_host_mem_del(pdsc);
if (pdsc->wq)
destroy_workqueue(pdsc->wq);
diff --git a/include/linux/pds/pds_adminq.h b/include/linux/pds/pds_adminq.h
index 40ff0ec2b879..ef46415ab9fd 100644
--- a/include/linux/pds/pds_adminq.h
+++ b/include/linux/pds/pds_adminq.h
@@ -34,6 +34,12 @@ enum pds_core_adminq_opcode {
PDS_AQ_CMD_RX_FILTER_ADD = 31,
PDS_AQ_CMD_RX_FILTER_DEL = 32,
+ /* MEM commands */
+ PDS_AQ_CMD_MEM_GET_COUNT = 10,
+ PDS_AQ_CMD_MEM_QUERY = 11,
+ PDS_AQ_CMD_MEM_ADD = 12,
+ PDS_AQ_CMD_MEM_DEL = 13,
+
/* Queue commands */
PDS_AQ_CMD_Q_IDENTIFY = 39,
PDS_AQ_CMD_Q_INIT = 40,
@@ -207,6 +213,122 @@ struct pds_core_client_request_cmd {
u8 client_cmd[60];
};
+/**
+ * struct pds_core_mem_get_count_cmd - MEM_GET_COUNT command
+ * @opcode: opcode PDS_AQ_CMD_MEM_GET_COUNT
+ * @rsvd: Word boundary padding
+ * @max_contig: Maximum contiguous memory size in bytes
+ *
+ * Query the number of host memory requests needed by firmware.
+ */
+struct pds_core_mem_get_count_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ __le32 max_contig;
+} __packed;
+
+/**
+ * struct pds_core_mem_get_count_comp - MEM_GET_COUNT completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @count: Number of host memory requests
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_mem_get_count_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le16 count;
+ u8 rsvd2[9];
+ u8 color;
+} __packed;
+
+/**
+ * struct pds_core_mem_query_cmd - MEM_QUERY command
+ * @opcode: opcode PDS_AQ_CMD_MEM_QUERY
+ * @rsvd: Word boundary padding
+ * @index: Memory request index
+ */
+struct pds_core_mem_query_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 index;
+} __packed;
+
+/**
+ * struct pds_core_mem_query_comp - MEM_QUERY completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @size: Size of memory request in bytes
+ * @tag: Tag for this memory request
+ */
+struct pds_core_mem_query_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le32 size;
+ __le16 tag;
+} __packed;
+
+/**
+ * struct pds_core_mem_add_cmd - MEM_ADD command
+ * @opcode: opcode PDS_AQ_CMD_MEM_ADD
+ * @rsvd: Word boundary padding
+ * @tag: Tag for this memory request
+ * @size: Size of memory in bytes
+ * @buf_pa: DMA address of memory
+ */
+struct pds_core_mem_add_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 tag;
+ __le32 size;
+ __le64 buf_pa;
+} __packed;
+
+/**
+ * struct pds_core_mem_add_comp - MEM_ADD command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: padding for natural alignment
+ * @comp_index: Index in the desc ring for which this is the completion
+ */
+struct pds_core_mem_add_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+} __packed;
+
+/**
+ * struct pds_core_mem_del_cmd - MEM_DEL command
+ * @opcode: opcode PDS_AQ_CMD_MEM_DEL
+ * @rsvd: Word boundary padding
+ * @tag: Tag for this memory request
+ * @reason: Reason for deletion
+ */
+struct pds_core_mem_del_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 tag;
+ u8 reason;
+} __packed;
+
+/**
+ * struct pds_core_mem_del_comp - MEM_DEL command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the desc ring for which this is the completion
+ * @tag: Tag for the memory request
+ */
+struct pds_core_mem_del_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le16 tag;
+} __packed;
+
#define PDS_CORE_MAX_FRAGS 16
#define PDS_CORE_QCQ_F_INITED BIT(0)
@@ -1454,6 +1576,11 @@ union pds_core_adminq_cmd {
struct pds_core_client_unreg_cmd client_unreg;
struct pds_core_client_request_cmd client_request;
+ struct pds_core_mem_get_count_cmd mem_get_count;
+ struct pds_core_mem_query_cmd mem_query;
+ struct pds_core_mem_add_cmd mem_add;
+ struct pds_core_mem_del_cmd mem_del;
+
struct pds_core_lif_identify_cmd lif_ident;
struct pds_core_lif_init_cmd lif_init;
struct pds_core_lif_reset_cmd lif_reset;
@@ -1502,6 +1629,11 @@ union pds_core_adminq_comp {
struct pds_core_client_reg_comp client_reg;
+ struct pds_core_mem_get_count_comp mem_get_count;
+ struct pds_core_mem_query_comp mem_query;
+ struct pds_core_mem_add_comp mem_add;
+ struct pds_core_mem_del_comp mem_del;
+
struct pds_core_lif_identify_comp lif_ident;
struct pds_core_lif_init_comp lif_init;
struct pds_core_lif_setattr_comp lif_setattr;
diff --git a/include/linux/pds/pds_core_if.h b/include/linux/pds/pds_core_if.h
index b8052985dddf..fb489e8d54ef 100644
--- a/include/linux/pds/pds_core_if.h
+++ b/include/linux/pds/pds_core_if.h
@@ -110,9 +110,11 @@ struct pds_core_drv_identity {
/**
* enum pds_core_dev_capability - Device capabilities
* @PDS_CORE_DEV_CAP_PLDM_FW_UPDATE: Device only supports FW update via PLDM
+ * @PDS_CORE_DEV_CAP_HOST_MEM: Device supports host memory for fw use
*/
enum pds_core_dev_capability {
PDS_CORE_DEV_CAP_PLDM_FW_UPDATE = BIT(0),
+ PDS_CORE_DEV_CAP_HOST_MEM = BIT(1),
};
#define PDS_DEV_TYPE_MAX 16
--
2.43.0
next prev parent reply other threads:[~2026-04-29 7:58 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-29 7:58 [PATCH 0/6] pds_core: Add PLDM firmware update and host backed memory support Nikhil P. Rao
2026-04-29 7:58 ` [PATCH 1/6] pds_core: add support for quiet devcmd failures Nikhil P. Rao
2026-04-29 7:58 ` [PATCH 2/6] pds_core: add support for identity version 2 Nikhil P. Rao
2026-04-29 7:58 ` [PATCH 3/6] pds_core: add PLDM firmware update support via devlink flash Nikhil P. Rao
2026-04-29 7:58 ` [PATCH 4/6] pds_core: add PLDM component info display Nikhil P. Rao
2026-04-29 7:58 ` Nikhil P. Rao [this message]
2026-04-29 7:58 ` [PATCH 6/6] pds_core: add debugfs support for host backed memory Nikhil P. Rao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260429-b4-pldm-b4-v1-5-e43b6c92e46c@amd.com \
--to=nikhil.rao@amd.com \
--cc=Vamsi.Atluri@amd.com \
--cc=andrew+netdev@lunn.ch \
--cc=brett.creeley@amd.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=eric.joyner@amd.com \
--cc=gustavoars@kernel.org \
--cc=kees@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-hardening@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox