From: Ratheesh Kannoth <rkannoth@marvell.com>
To: <linux-kernel@vger.kernel.org>, <linux-rdma@vger.kernel.org>,
<netdev@vger.kernel.org>, <oss-drivers@corigine.com>
Cc: <akiyano@amazon.com>, <andrew+netdev@lunn.ch>,
<anthony.l.nguyen@intel.com>, <arkadiusz.kubalewski@intel.com>,
<brett.creeley@amd.com>, <darinzon@amazon.com>,
<davem@davemloft.net>, <donald.hunter@gmail.com>,
<edumazet@google.com>, <horms@kernel.org>, <idosch@nvidia.com>,
<ivecera@redhat.com>, <jiri@resnulli.us>, <kuba@kernel.org>,
<leon@kernel.org>, <mbloch@nvidia.com>,
<michael.chan@broadcom.com>, <pabeni@redhat.com>,
<pavan.chebbi@broadcom.com>, <petrm@nvidia.com>,
<Prathosh.Satish@microchip.com>, <przemyslaw.kitszel@intel.com>,
<saeedm@nvidia.com>, <sgoutham@marvell.com>, <tariqt@nvidia.com>,
<vadim.fedorenko@linux.dev>,
Ratheesh Kannoth <rkannoth@marvell.com>
Subject: [PATCH v13 net-next 5/9] octeontx2-af: npc: cn20k: add subbank search order control
Date: Mon, 11 May 2026 09:09:19 +0530 [thread overview]
Message-ID: <20260511033923.1301976-6-rkannoth@marvell.com> (raw)
In-Reply-To: <20260511033923.1301976-1-rkannoth@marvell.com>
CN20K NPC MCAM is split into 32 subbanks that are searched in a
predefined order during allocation. Lower-numbered subbanks have
higher priority than higher-numbered ones.
Add a runtime "srch_order" to control the order in which
subbanks are searched during MCAM allocation.
Signed-off-by: Ratheesh Kannoth <rkannoth@marvell.com>
---
.../ethernet/marvell/octeontx2/af/cn20k/npc.c | 58 +++++++++++-
.../ethernet/marvell/octeontx2/af/cn20k/npc.h | 3 +
.../marvell/octeontx2/af/rvu_devlink.c | 92 +++++++++++++++++--
3 files changed, 141 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c
index e9aad0ad3fa6..8f03a1f558cf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c
@@ -3376,7 +3376,7 @@ rvu_mbox_handler_npc_cn20k_get_kex_cfg(struct rvu *rvu,
return 0;
}
-static int *subbank_srch_order;
+static u32 *subbank_srch_order;
static void npc_populate_restricted_idxs(int num_subbanks)
{
@@ -3388,7 +3388,7 @@ static int npc_create_srch_order(int cnt)
{
int val = 0;
- subbank_srch_order = kcalloc(cnt, sizeof(int),
+ subbank_srch_order = kcalloc(cnt, sizeof(u32),
GFP_KERNEL);
if (!subbank_srch_order)
return -ENOMEM;
@@ -3906,6 +3906,60 @@ static void npc_unlock_all_subbank(void)
mutex_unlock(&npc_priv.sb[i].lock);
}
+int npc_cn20k_search_order_set(struct rvu *rvu,
+ u64 narr[MAX_NUM_SUB_BANKS], int cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_subbank *sb;
+ struct xarray *xa;
+ int sb_idx, rc;
+
+ if (cnt != npc_priv.num_subbanks) {
+ dev_err(rvu->dev, "Number of entries(%u) != %u\n",
+ cnt, npc_priv.num_subbanks);
+ return -EINVAL;
+ }
+
+ mutex_lock(&mcam->lock);
+ npc_lock_all_subbank();
+ restrict_valid = false;
+
+ for (sb_idx = 0; sb_idx < cnt; sb_idx++) {
+ sb = &npc_priv.sb[sb_idx];
+
+ xa = &npc_priv.xa_sb_free;
+ if (sb->flags & NPC_SUBBANK_FLAG_USED)
+ xa = &npc_priv.xa_sb_used;
+
+ sb->arr_idx = narr[sb_idx];
+
+ rc = xa_err(xa_store(xa, sb->arr_idx,
+ xa_mk_value(sb_idx), GFP_KERNEL));
+ if (rc) {
+ dev_err(rvu->dev,
+ "Setting arr_idx=%d for sb=%d failed\n",
+ sb->arr_idx, sb_idx);
+ goto fail;
+ }
+ }
+
+ for (int i = 0; i < cnt; i++)
+ subbank_srch_order[i] = (u32)narr[i];
+
+fail:
+ npc_unlock_all_subbank();
+ mutex_unlock(&mcam->lock);
+
+ return rc;
+}
+
+const u32 *npc_cn20k_search_order_get(bool *restricted_order, u32 *sz)
+{
+ *restricted_order = restrict_valid;
+ *sz = npc_priv.num_subbanks;
+ return subbank_srch_order;
+}
+
/* Only non-ref non-contigous mcam indexes
* are picked for defrag process
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.h
index 9567a2d80b58..bf030e40fbf9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.h
@@ -343,5 +343,8 @@ bool npc_is_cgx_or_lbk(struct rvu *rvu, u16 pcifunc);
int npc_mcam_idx_2_subbank_idx(struct rvu *rvu, u16 mcam_idx,
struct npc_subbank **sb,
int *sb_off);
+const u32 *npc_cn20k_search_order_get(bool *restricted_order, u32 *sz);
+int npc_cn20k_search_order_set(struct rvu *rvu, u64 narr[MAX_NUM_SUB_BANKS],
+ int cnt);
#endif /* NPC_CN20K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index a42404e6db7c..aa3ecab5ebd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1258,6 +1258,7 @@ enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
RVU_AF_DEVLINK_PARAM_ID_NPC_DEFRAG,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_SRCH_ORDER,
RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
};
@@ -1619,12 +1620,83 @@ static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
return 0;
}
+static int rvu_af_dl_npc_srch_order_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ return npc_cn20k_search_order_set(rvu,
+ ctx->val.u64arr.val,
+ ctx->val.u64arr.size);
+}
+
+static int rvu_af_dl_npc_srch_order_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ bool restricted_order;
+ const u32 *order;
+ u32 sz;
+
+ order = npc_cn20k_search_order_get(&restricted_order, &sz);
+ ctx->val.u64arr.size = sz;
+ for (int i = 0; i < sz; i++)
+ ctx->val.u64arr.val[i] = order[i];
+
+ return 0;
+}
+
+static int rvu_af_dl_npc_srch_order_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value *val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ bool restricted_order;
+ unsigned long w = 0;
+ u64 *arr;
+ u32 sz;
+
+ npc_cn20k_search_order_get(&restricted_order, &sz);
+ if (sz != val->u64arr.size) {
+ dev_err(rvu->dev,
+ "Wrong size %llu, should be %u\n",
+ val->u64arr.size, sz);
+ return -EINVAL;
+ }
+
+ arr = val->u64arr.val;
+ for (int i = 0; i < sz; i++) {
+ if (arr[i] >= sz)
+ return -EINVAL;
+
+ w |= BIT_ULL(arr[i]);
+ }
+
+ if (bitmap_weight(&w, sz) != sz) {
+ dev_err(rvu->dev,
+ "Duplicate or out-of-range subbank index. %lu\n",
+ find_first_zero_bit(&w, sz));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct devlink_ops rvu_devlink_ops = {
.eswitch_mode_get = rvu_devlink_eswitch_mode_get,
.eswitch_mode_set = rvu_devlink_eswitch_mode_set,
};
-static const struct devlink_param rvu_af_dl_param_defrag[] = {
+static const struct devlink_param rvu_af_dl_cn20k_params[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_SRCH_ORDER,
+ "npc_srch_order", DEVLINK_PARAM_TYPE_U64_ARRAY,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_npc_srch_order_get,
+ rvu_af_dl_npc_srch_order_set,
+ rvu_af_dl_npc_srch_order_validate),
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEFRAG,
"npc_defrag", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1666,13 +1738,13 @@ int rvu_register_dl(struct rvu *rvu)
}
if (is_cn20k(rvu->pdev)) {
- err = devlink_params_register(dl, rvu_af_dl_param_defrag,
- ARRAY_SIZE(rvu_af_dl_param_defrag));
+ err = devlink_params_register(dl, rvu_af_dl_cn20k_params,
+ ARRAY_SIZE(rvu_af_dl_cn20k_params));
if (err) {
dev_err(rvu->dev,
- "devlink defrag params register failed with error %d",
+ "devlink cn20k params register failed with error %d",
err);
- goto err_dl_defrag;
+ goto err_dl_cn20k_params;
}
}
@@ -1695,10 +1767,10 @@ int rvu_register_dl(struct rvu *rvu)
err_dl_exact_match:
if (is_cn20k(rvu->pdev))
- devlink_params_unregister(dl, rvu_af_dl_param_defrag,
- ARRAY_SIZE(rvu_af_dl_param_defrag));
+ devlink_params_unregister(dl, rvu_af_dl_cn20k_params,
+ ARRAY_SIZE(rvu_af_dl_cn20k_params));
-err_dl_defrag:
+err_dl_cn20k_params:
devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
err_dl_health:
@@ -1717,8 +1789,8 @@ void rvu_unregister_dl(struct rvu *rvu)
devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
if (is_cn20k(rvu->pdev))
- devlink_params_unregister(dl, rvu_af_dl_param_defrag,
- ARRAY_SIZE(rvu_af_dl_param_defrag));
+ devlink_params_unregister(dl, rvu_af_dl_cn20k_params,
+ ARRAY_SIZE(rvu_af_dl_cn20k_params));
/* Unregister exact match devlink only for CN10K-B */
if (rvu_npc_exact_has_match_table(rvu))
--
2.43.0
next prev parent reply other threads:[~2026-05-11 3:40 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-11 3:39 [PATCH v13 net-next 0/9] octeontx2-af: npc: Enhancements Ratheesh Kannoth
2026-05-11 3:39 ` [PATCH v13 net-next 1/9] octeontx2-af: npc: cn20k: debugfs enhancements Ratheesh Kannoth
2026-05-11 3:39 ` [PATCH v13 net-next 2/9] net/mlx5e: Reduce stack use reading PCIe congestion thresholds Ratheesh Kannoth
2026-05-11 9:02 ` Dragos Tatulea
2026-05-11 3:39 ` [PATCH v13 net-next 3/9] devlink: pass param values by pointer Ratheesh Kannoth
2026-05-11 8:52 ` Petr Machata
2026-05-12 1:43 ` Kiyanovski, Arthur
2026-05-11 3:39 ` [PATCH v13 net-next 4/9] devlink: Implement devlink param multi attribute nested data values Ratheesh Kannoth
2026-05-12 15:54 ` Ratheesh Kannoth
2026-05-14 3:58 ` Ratheesh Kannoth
2026-05-11 3:39 ` Ratheesh Kannoth [this message]
2026-05-14 4:01 ` [PATCH v13 net-next 5/9] octeontx2-af: npc: cn20k: add subbank search order control Ratheesh Kannoth
2026-05-11 3:39 ` [PATCH v13 net-next 6/9] octeontx2: cn20k: Coordinate default rules with NIX LF lifecycle Ratheesh Kannoth
2026-05-14 4:12 ` Ratheesh Kannoth
2026-05-11 3:39 ` [PATCH v13 net-next 7/9] octeontx2-af: npc: Support for custom KPU profile from filesystem Ratheesh Kannoth
2026-05-14 4:13 ` Ratheesh Kannoth
2026-05-11 3:39 ` [PATCH v13 net-next 8/9] octeontx2: cn20k: Respect NPC MCAM X2/X4 profile in flows and DFT alloc Ratheesh Kannoth
2026-05-14 4:14 ` Ratheesh Kannoth
2026-05-11 3:39 ` [PATCH v13 net-next 9/9] octeontx2-af: npc: cn20k: Allocate npc_priv and dstats dynamically Ratheesh Kannoth
2026-05-14 4:15 ` Ratheesh Kannoth
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260511033923.1301976-6-rkannoth@marvell.com \
--to=rkannoth@marvell.com \
--cc=Prathosh.Satish@microchip.com \
--cc=akiyano@amazon.com \
--cc=andrew+netdev@lunn.ch \
--cc=anthony.l.nguyen@intel.com \
--cc=arkadiusz.kubalewski@intel.com \
--cc=brett.creeley@amd.com \
--cc=darinzon@amazon.com \
--cc=davem@davemloft.net \
--cc=donald.hunter@gmail.com \
--cc=edumazet@google.com \
--cc=horms@kernel.org \
--cc=idosch@nvidia.com \
--cc=ivecera@redhat.com \
--cc=jiri@resnulli.us \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=mbloch@nvidia.com \
--cc=michael.chan@broadcom.com \
--cc=netdev@vger.kernel.org \
--cc=oss-drivers@corigine.com \
--cc=pabeni@redhat.com \
--cc=pavan.chebbi@broadcom.com \
--cc=petrm@nvidia.com \
--cc=przemyslaw.kitszel@intel.com \
--cc=saeedm@nvidia.com \
--cc=sgoutham@marvell.com \
--cc=tariqt@nvidia.com \
--cc=vadim.fedorenko@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.