DPDK-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Denis Lyulin <lyulin.2003@mail.ru>
To: Kishore Padmanabha <kishore.padmanabha@broadcom.com>,
	Ajit Khaparde <ajit.khaparde@broadcom.com>,
	Somnath Kotur <somnath.kotur@broadcom.com>,
	Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Cc: dev@dpdk.org, stable@dpdk.org, Denis Lyulin <lyulin.2003@mail.ru>
Subject: [PATCH 1/1] net/bnxt: fix string bounds checks and format specifiers
Date: Fri, 17 Apr 2026 07:56:40 +0300	[thread overview]
Message-ID: <20260417045640.2217188-1-lyulin.2003@mail.ru> (raw)

Some strings, primarily for memory zone names, could
exceed their buffer of size 32 (RTE_MEMZONE_NAMESIZE),
because PCI_PRI_FMT format string is not strictly limited in size.
This commit adds bounds checking via using snprintf instead
of sprintf. It also adds use of appropriate format specifiers,
which take width of integers in account.

Fixes: 02a95625fe9c ("net/bnxt: add flow stats in extended stats")
Cc: somnath.kotur@broadcom.com
Cc: stable@dpdk.org

Signed-off-by: Denis Lyulin <lyulin.2003@mail.ru>
---
 drivers/net/bnxt/bnxt_ethdev.c | 34 +++++++++++++++++++---------------
 drivers/net/bnxt/bnxt_hwrm.c   | 16 +++++++++-------
 2 files changed, 28 insertions(+), 22 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index b677f9491d..bb02a2579b 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -652,8 +652,9 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
 
 	max_fc = bp->flow_stat->max_fc;
 
-	sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
-		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+	snprintf(type, RTE_MEMZONE_NAMESIZE, "bnxt_rx_fc_in_" PCI_PRI_FMT,
+		pdev->addr.domain, pdev->addr.bus,
+		pdev->addr.devid, pdev->addr.function);
 	/* 4 bytes for each counter-id */
 	rc = bnxt_alloc_ctx_mem_buf(bp, type,
 				    max_fc * 4,
@@ -661,8 +662,9 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
 	if (rc)
 		return rc;
 
-	sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
-		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+	snprintf(type, RTE_MEMZONE_NAMESIZE, "bnxt_rx_fc_out_" PCI_PRI_FMT,
+		pdev->addr.domain, pdev->addr.bus,
+		pdev->addr.devid, pdev->addr.function);
 	/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
 	rc = bnxt_alloc_ctx_mem_buf(bp, type,
 				    max_fc * 16,
@@ -670,8 +672,9 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
 	if (rc)
 		return rc;
 
-	sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
-		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+	snprintf(type, RTE_MEMZONE_NAMESIZE, "bnxt_tx_fc_in_" PCI_PRI_FMT,
+		pdev->addr.domain, pdev->addr.bus,
+		pdev->addr.devid, pdev->addr.function);
 	/* 4 bytes for each counter-id */
 	rc = bnxt_alloc_ctx_mem_buf(bp, type,
 				    max_fc * 4,
@@ -679,8 +682,9 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
 	if (rc)
 		return rc;
 
-	sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
-		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+	snprintf(type, RTE_MEMZONE_NAMESIZE, "bnxt_tx_fc_out_" PCI_PRI_FMT,
+		pdev->addr.domain, pdev->addr.bus,
+		pdev->addr.devid, pdev->addr.function);
 	/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
 	rc = bnxt_alloc_ctx_mem_buf(bp, type,
 				    max_fc * 16,
@@ -3252,7 +3256,7 @@ bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
 	uint8_t fw_rsvd = bp->fw_ver & 0xff;
 	int ret;
 
-	ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d",
+	ret = snprintf(fw_version, fw_size, "%hhu.%hhu.%hhu.%hhu",
 			fw_major, fw_minor, fw_updt, fw_rsvd);
 	if (ret < 0)
 		return -EINVAL;
@@ -5199,13 +5203,13 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
 		RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / BNXT_PAGE_SIZE;
 	rmem->page_size = BNXT_PAGE_SIZE;
 
-	snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_arr%s_%x_%d",
+	snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_arr%s_%hx_%hu",
 		 suffix, idx, bp->eth_dev->data->port_id);
 	ctx_pg->ctx_pg_arr = rte_zmalloc(name, sizeof(void *) * rmem->nr_pages, 0);
 	if (ctx_pg->ctx_pg_arr == NULL)
 		return -ENOMEM;
 
-	snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_dma_arr%s_%x_%d",
+	snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_dma_arr%s_%hx_%hu",
 		 suffix, idx, bp->eth_dev->data->port_id);
 	ctx_pg->ctx_dma_arr = rte_zmalloc(name, sizeof(rte_iova_t *) * rmem->nr_pages, 0);
 	if (ctx_pg->ctx_dma_arr == NULL)
@@ -5219,7 +5223,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
 
 	if (rmem->nr_pages > 1) {
 		snprintf(name, RTE_MEMZONE_NAMESIZE,
-			 "bnxt_ctxpgtbl%s_%x_%d",
+			 "bnxt_ctxpgtbl%s_%hx_%hu",
 			 suffix, idx, bp->eth_dev->data->port_id);
 		name[RTE_MEMZONE_NAMESIZE - 1] = 0;
 		mz = rte_memzone_lookup(name);
@@ -5244,7 +5248,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
 		rmem->pg_tbl_mz = mz;
 	}
 
-	snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
+	snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%hx_%hu",
 		 suffix, idx, bp->eth_dev->data->port_id);
 	mz = rte_memzone_lookup(name);
 	if (!mz) {
@@ -5393,7 +5397,7 @@ int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp)
 		for (i = 0; i < w && rc == 0; i++) {
 			char name[RTE_MEMZONE_NAMESIZE] = {0};
 
-			sprintf(name, "_%d_%d", i, type);
+			snprintf(name, RTE_MEMZONE_NAMESIZE, "_%d_%hu", i, type);
 
 			if (ctxm->entry_multiple)
 				entries = bnxt_roundup(ctxm->max_entries,
@@ -6977,7 +6981,7 @@ static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
 		}
 
 		/* representor port net_bdf_port */
-		snprintf(name, sizeof(name), "net_%s_representor_%d",
+		snprintf(name, sizeof(name), "net_%s_representor_%hu",
 			 pci_dev->device.name, eth_da->representor_ports[i]);
 
 		if (rte_eth_dev_allocated(name) != NULL) {
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 0c82935de9..5c51108b65 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -1693,7 +1693,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
 	     (dev_caps_cfg &
 	      HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
 	    bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
-		sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
+		snprintf(type, RTE_MEMZONE_NAMESIZE,
+			"bnxt_hwrm_short_" PCI_PRI_FMT,
 			bp->pdev->addr.domain, bp->pdev->addr.bus,
 			bp->pdev->addr.devid, bp->pdev->addr.function);
 
@@ -3526,8 +3527,9 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
 	struct rte_pci_device *pdev = bp->pdev;
 	char type[RTE_MEMZONE_NAMESIZE];
 
-	sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
-		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+	snprintf(type, RTE_MEMZONE_NAMESIZE, "bnxt_hwrm_" PCI_PRI_FMT,
+		pdev->addr.domain, pdev->addr.bus,
+		pdev->addr.devid, pdev->addr.function);
 	bp->max_resp_len = BNXT_PAGE_SIZE;
 	bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
 	if (bp->hwrm_cmd_resp_addr == NULL)
@@ -6573,7 +6575,7 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp)
 		if (ctxm->instance_bmap)
 			n = hweight32(ctxm->instance_bmap);
 
-		sprintf(name, "bnxt_ctx_pgmem_%d_%d",
+		snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pgmem_%hu_%hu",
 			bp->eth_dev->data->port_id, type);
 		ctxm->pg_info = rte_malloc(name, sizeof(*ctxm->pg_info) * n,
 					   RTE_CACHE_LINE_SIZE);
@@ -7535,7 +7537,7 @@ int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, struct bnxt_representor *rep_bp)
 	}
 
 	HWRM_PREP(&req, HWRM_CFA_PAIR_INFO, BNXT_USE_CHIMP_MB);
-	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
+	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%hu",
 		 bp->eth_dev->data->name, rep_bp->vf_id);
 	req.flags =
 		rte_cpu_to_le_32(HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE);
@@ -7564,7 +7566,7 @@ int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
 
 	HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
 	req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
-	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
+	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%hu",
 		 bp->eth_dev->data->name, rep_bp->vf_id);
 
 	req.pf_b_id = rep_bp->parent_pf_idx;
@@ -7609,7 +7611,7 @@ int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
 	}
 
 	HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
-	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
+	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%hu",
 		 bp->eth_dev->data->name, rep_bp->vf_id);
 	req.pf_b_id = rep_bp->parent_pf_idx;
 	req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
-- 
2.34.1


             reply	other threads:[~2026-04-24  7:06 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-17  4:56 Denis Lyulin [this message]
2026-04-17 16:43 ` [PATCH 1/1] net/bnxt: fix string bounds checks and format specifiers Kishore Padmanabha
2026-04-17 17:03   ` Stephen Hemminger
2026-04-22  4:44     ` Re[2]: " Денис Люлин
2026-04-22 13:35       ` Kishore Padmanabha

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260417045640.2217188-1-lyulin.2003@mail.ru \
    --to=lyulin.2003@mail.ru \
    --cc=ajit.khaparde@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=kishore.padmanabha@broadcom.com \
    --cc=somnath.kotur@broadcom.com \
    --cc=sriharsha.basavapatna@broadcom.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox