From: Michael Chan <michael.chan@broadcom.com>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, edumazet@google.com, kuba@kernel.org,
pabeni@redhat.com, pavan.chebbi@broadcom.com,
andrew.gospodarek@broadcom.com, horms@kernel.org,
helgaas@kernel.org, Vikas Gupta <vikas.gupta@broadcom.com>,
Somnath Kotur <somnath.kotur@broadcom.com>
Subject: [PATCH net-next v2 1/9] bnxt_en: add support for storing crash dump into host memory
Date: Fri, 16 Aug 2024 14:28:24 -0700 [thread overview]
Message-ID: <20240816212832.185379-2-michael.chan@broadcom.com> (raw)
In-Reply-To: <20240816212832.185379-1-michael.chan@broadcom.com>
From: Vikas Gupta <vikas.gupta@broadcom.com>
Newer firmware supports automatic DMA of crash dump to host memory
when it crashes. If the feature is supported, allocate the required
memory using the existing context memory infrastructure. Communicate
the page table containing the DMA addresses to the firmware.
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
Reviewed-by: Andy Gospodarek <andrew.gospodarek@broadcom.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: Vikas Gupta <vikas.gupta@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 93 +++++++++++++++++++
drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 +
.../ethernet/broadcom/bnxt/bnxt_coredump.c | 18 +++-
.../ethernet/broadcom/bnxt/bnxt_coredump.h | 8 ++
4 files changed, 117 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 75e2cfed5769..017eefd78ba4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,6 +69,7 @@
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
+#include "bnxt_coredump.h"
#include "bnxt_hwmon.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -8946,6 +8947,82 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
return 0;
}
+#define BNXT_SET_CRASHDUMP_PAGE_ATTR(attr) \
+do { \
+ if (BNXT_PAGE_SIZE == 0x2000) \
+ attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K; \
+ else if (BNXT_PAGE_SIZE == 0x10000) \
+ attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K; \
+ else \
+ attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K; \
+} while (0)
+
+static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
+{
+ struct hwrm_dbg_crashdump_medium_cfg_input *req;
+ u16 page_attr = 0;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
+ if (rc)
+ return rc;
+
+ BNXT_SET_CRASHDUMP_PAGE_ATTR(page_attr);
+ req->pg_size_lvl = cpu_to_le16(page_attr |
+ bp->fw_crash_mem->ring_mem.depth);
+ req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
+ req->size = cpu_to_le32(bp->fw_crash_len);
+ req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
+ return hwrm_req_send(bp, req);
+}
+
+static void bnxt_free_crash_dump_mem(struct bnxt *bp)
+{
+ if (bp->fw_crash_mem) {
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ kfree(bp->fw_crash_mem);
+ bp->fw_crash_len = 0;
+ bp->fw_crash_mem = NULL;
+ }
+}
+
+static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
+{
+ u32 mem_size = 0;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
+ if (rc)
+ return rc;
+
+ mem_size = round_up(mem_size, 4);
+
+ if (bp->fw_crash_mem && mem_size == bp->fw_crash_len)
+ return 0;
+
+ bnxt_free_crash_dump_mem(bp);
+
+ bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem), GFP_KERNEL);
+ if (!bp->fw_crash_mem)
+ return -ENOMEM;
+
+ rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ return rc;
+ }
+
+ bp->fw_crash_len = mem_size;
+
+ return 0;
+}
+
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp;
@@ -13986,6 +14063,19 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (rc)
return -ENODEV;
+ rc = bnxt_alloc_crash_dump_mem(bp);
+ if (rc)
+ netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
+ rc);
+ if (!rc) {
+ rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ netdev_warn(bp->dev,
+ "hwrm crash dump mem failure rc: %d\n", rc);
+ }
+ }
+
if (bnxt_fw_pre_resv_vnics(bp))
bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
@@ -15294,6 +15384,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
@@ -15933,6 +16024,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -15986,6 +16078,7 @@ static int bnxt_suspend(struct device *device)
bnxt_hwrm_func_drv_unrgtr(bp);
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp);
+ bnxt_free_crash_dump_mem(bp);
rtnl_unlock();
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 62e637c5be31..2fa6572b6b1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2649,6 +2649,9 @@ struct bnxt {
#endif
u32 thermal_threshold_type;
enum board_idx board_idx;
+
+ struct bnxt_ctx_pg_info *fw_crash_mem;
+ u32 fw_crash_len;
};
#define BNXT_NUM_RX_RING_STATS 8
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index c06789882036..ebbad9ccab6a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -385,7 +385,7 @@ int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
}
}
-static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
{
struct hwrm_dbg_qcfg_output *resp;
struct hwrm_dbg_qcfg_input *req;
@@ -395,7 +395,8 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return -EOPNOTSUPP;
if (dump_type == BNXT_DUMP_CRASH &&
- !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
+ !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR ||
+ (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
@@ -403,8 +404,12 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return rc;
req->fid = cpu_to_le16(0xffff);
- if (dump_type == BNXT_DUMP_CRASH)
- req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
+ if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
+ else
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
+ }
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@@ -412,7 +417,10 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
goto get_dump_len_exit;
if (dump_type == BNXT_DUMP_CRASH) {
- *dump_len = le32_to_cpu(resp->crashdump_size);
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ *dump_len = BNXT_CRASH_DUMP_LEN;
+ else
+ *dump_len = le32_to_cpu(resp->crashdump_size);
} else {
/* Driver adds coredump header and "HWRM_VER_GET response"
* segment additionally to coredump.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index b1a1b2fffb19..a76d5c281413 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -111,7 +111,15 @@ struct hwrm_dbg_cmn_output {
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
+#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR
+#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
+ DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR
+
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
#endif
--
2.30.1
next prev parent reply other threads:[~2024-08-16 21:29 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-16 21:28 [PATCH net-next v2 0/9] bnxt_en: Update for net-next Michael Chan
2024-08-16 21:28 ` Michael Chan [this message]
2024-08-19 10:00 ` [PATCH net-next v2 1/9] bnxt_en: add support for storing crash dump into host memory Przemek Kitszel
2024-08-20 18:09 ` Michael Chan
2024-08-16 21:28 ` [PATCH net-next v2 2/9] bnxt_en: add support for retrieving crash dump using ethtool Michael Chan
2024-08-19 10:12 ` Przemek Kitszel
2024-08-20 1:41 ` Jakub Kicinski
2024-08-16 21:28 ` [PATCH net-next v2 3/9] bnxt_en: Support QOS and TPID settings for the SRIOV VLAN Michael Chan
2024-08-16 21:28 ` [PATCH net-next v2 4/9] bnxt_en: Deprecate support for legacy INTX mode Michael Chan
2024-08-19 9:17 ` Simon Horman
2024-08-16 21:28 ` [PATCH net-next v2 5/9] bnxt_en: Remove BNXT_FLAG_USING_MSIX flag Michael Chan
2024-08-16 21:28 ` [PATCH net-next v2 6/9] bnxt_en: Remove register mapping to support INTX Michael Chan
2024-08-16 21:28 ` [PATCH net-next v2 7/9] bnxt_en: Replace deprecated PCI MSIX APIs Michael Chan
2024-08-19 10:20 ` Przemek Kitszel
2024-08-16 21:28 ` [PATCH net-next v2 8/9] bnxt_en: Allocate the max bp->irq_tbl size for dynamic msix allocation Michael Chan
2024-08-19 10:31 ` Przemek Kitszel
2024-08-16 21:28 ` [PATCH net-next v2 9/9] bnxt_en: Support dynamic MSIX Michael Chan
2024-08-19 10:33 ` Przemek Kitszel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240816212832.185379-2-michael.chan@broadcom.com \
--to=michael.chan@broadcom.com \
--cc=andrew.gospodarek@broadcom.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=helgaas@kernel.org \
--cc=horms@kernel.org \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=pavan.chebbi@broadcom.com \
--cc=somnath.kotur@broadcom.com \
--cc=vikas.gupta@broadcom.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).