netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [net-next PatchV2] octeontx2-af: map management port always to first PF
@ 2024-04-10 13:25 Hariprasad Kelam
  2024-04-10 22:30 ` Jacob Keller
  2024-04-12  2:55 ` Jakub Kicinski
  0 siblings, 2 replies; 5+ messages in thread
From: Hariprasad Kelam @ 2024-04-10 13:25 UTC (permalink / raw)
  To: netdev, linux-kernel
  Cc: kuba, davem, sgoutham, gakula, jerinj, lcherian, sbhatta, hkelam,
	naveenm, edumazet, pabeni

The user can enable or disable any MAC block or a few ports of the
block. The management port's interface name varies depending on the
setup of the user if its not mapped to the first pf.

The management port mapping is now configured to always connect to the
first PF. This patch implements this change.

Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
---
v2 * Refactor code to avoid code duplication.

 .../net/ethernet/marvell/octeontx2/af/mbox.h  |  5 +-
 .../ethernet/marvell/octeontx2/af/rvu_cgx.c   | 84 +++++++++++++------
 2 files changed, 63 insertions(+), 26 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 4a77f6fe2622..88cced83bf23 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -639,7 +639,10 @@ struct cgx_lmac_fwdata_s {
 	/* Only applicable if SFP/QSFP slot is present */
 	struct sfp_eeprom_s sfp_eeprom;
 	struct phy_s phy;
-#define LMAC_FWDATA_RESERVED_MEM 1021
+	u32 lmac_type;
+	u32 portm_idx;
+	u64 mgmt_port:1;
+#define LMAC_FWDATA_RESERVED_MEM 1019
 	u64 reserved[LMAC_FWDATA_RESERVED_MEM];
 };

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 266ecbc1b97a..8cc17d7e368d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -118,15 +118,67 @@ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
 		pfvf->nix_blkaddr = BLKADDR_NIX1;
 }

-static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+static bool rvu_cgx_is_mgmt_port(struct rvu *rvu, int cgx_id, int lmac_id)
+{
+	struct cgx_lmac_fwdata_s *fwdata;
+
+	fwdata =  &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
+	return !!fwdata->mgmt_port;
+}
+
+static void __rvu_map_cgx_lmac_pf(struct rvu *rvu, unsigned int pf,
+				  int cgx, int lmac)
 {
 	struct npc_pkind *pkind = &rvu->hw->pkind;
-	int cgx_cnt_max = rvu->cgx_cnt_max;
-	int pf = PF_CGXMAP_BASE;
+	int numvfs, hwvfs;
+	int free_pkind;
+
+	rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+	rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+	free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+	pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+	rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
+	rvu->cgx_mapped_pfs++;
+	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+	rvu->cgx_mapped_vfs += numvfs;
+}
+
+static void rvu_cgx_map_mgmt_port(struct rvu *rvu, int cgx_cnt_max,
+				  unsigned int *pf, bool req_map_mgmt)
+{
 	unsigned long lmac_bmap;
-	int size, free_pkind;
 	int cgx, lmac, iter;
-	int numvfs, hwvfs;
+
+	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+		if (!rvu_cgx_pdata(cgx, rvu))
+			continue;
+		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+			if (iter >= MAX_LMAC_COUNT)
+				continue;
+			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter);
+			/* Map management port always to first PF */
+			if (req_map_mgmt &&
+			    rvu_cgx_is_mgmt_port(rvu, cgx, lmac)) {
+				__rvu_map_cgx_lmac_pf(rvu, *pf, cgx, lmac);
+				(*pf)++;
+				return;
+			}
+			/* Non management port mapping */
+			if (!req_map_mgmt &&
+			    !rvu_cgx_is_mgmt_port(rvu, cgx, lmac)) {
+				__rvu_map_cgx_lmac_pf(rvu, *pf, cgx, lmac);
+				(*pf)++;
+			}
+		}
+	}
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
+	int cgx_cnt_max = rvu->cgx_cnt_max;
+	unsigned int pf = PF_CGXMAP_BASE;
+	int size;

 	if (!cgx_cnt_max)
 		return 0;
@@ -155,26 +207,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 		return -ENOMEM;

 	rvu->cgx_mapped_pfs = 0;
-	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
-		if (!rvu_cgx_pdata(cgx, rvu))
-			continue;
-		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
-		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
-			if (iter >= MAX_LMAC_COUNT)
-				continue;
-			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
-					      iter);
-			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
-			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
-			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
-			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
-			rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
-			rvu->cgx_mapped_pfs++;
-			rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
-			rvu->cgx_mapped_vfs += numvfs;
-			pf++;
-		}
-	}
+	rvu_cgx_map_mgmt_port(rvu, cgx_cnt_max, &pf, true);
+	rvu_cgx_map_mgmt_port(rvu, cgx_cnt_max, &pf, false);
 	return 0;
 }

--
2.17.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-04-13  1:44 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-04-10 13:25 [net-next PatchV2] octeontx2-af: map management port always to first PF Hariprasad Kelam
2024-04-10 22:30 ` Jacob Keller
2024-04-12  2:55 ` Jakub Kicinski
2024-04-12 12:07   ` Sunil Kovvuri Goutham
2024-04-13  1:44     ` Jakub Kicinski

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).