From: Gagandeep Singh <g.singh@nxp.com>
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [PATCH v2 4/5] bus/dpaa: Set max push RXQ number
Date: Wed, 8 Oct 2025 10:05:18 +0530 [thread overview]
Message-ID: <20251008043519.2461707-5-g.singh@nxp.com> (raw)
In-Reply-To: <20251008043519.2461707-1-g.singh@nxp.com>
From: Jun Yang <jun.yang@nxp.com>
Max push queue number is bus level number affecting all dpaa devices.
Move the configuration from PMD driver to bus driver.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/bus/dpaa/bus_dpaa_driver.h | 6 ++++
drivers/bus/dpaa/dpaa_bus.c | 51 ++++++++++++++++++++++++++++++
drivers/net/dpaa/dpaa_ethdev.c | 51 ++++++++----------------------
3 files changed, 70 insertions(+), 38 deletions(-)
diff --git a/drivers/bus/dpaa/bus_dpaa_driver.h b/drivers/bus/dpaa/bus_dpaa_driver.h
index 976f356699..cca0543432 100644
--- a/drivers/bus/dpaa/bus_dpaa_driver.h
+++ b/drivers/bus/dpaa/bus_dpaa_driver.h
@@ -249,6 +249,12 @@ struct fm_eth_port_cfg *dpaa_get_eth_port_cfg(int dev_id);
__rte_internal
uint32_t dpaa_soc_ver(void);
+__rte_internal
+int dpaa_push_queue_num_update(void);
+
+__rte_internal
+uint16_t dpaa_push_queue_max_num(void);
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 1a35aa52df..d9830b68ca 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -50,6 +50,13 @@
#define DPAA_SVR_MASK 0xffff0000
#define RTE_PRIORITY_102 102
+#define DPAA_PUSH_RXQ_NUM_ARG "dpaa_push_rxq_num"
+/* At present we allow up to 4 push mode queues as default - as each of
+ * this queue need dedicated portal and we are short of portals.
+ */
+#define DPAA_MAX_PUSH_MODE_QUEUE 8
+#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4
+
struct rte_dpaa_bus {
struct rte_bus bus;
TAILQ_HEAD(, rte_dpaa_device) device_list;
@@ -57,6 +64,8 @@ struct rte_dpaa_bus {
int device_count;
int detected;
uint32_t svr_ver;
+ uint16_t max_push_rxq_num;
+ RTE_ATOMIC(uint16_t) push_rxq_num;
};
static struct rte_dpaa_bus rte_dpaa_bus;
@@ -91,6 +100,34 @@ dpaa_get_eth_port_cfg(int dev_id)
return &dpaa_netcfg->port_cfg[dev_id];
}
+RTE_EXPORT_INTERNAL_SYMBOL(dpaa_push_queue_num_update)
+int
+dpaa_push_queue_num_update(void)
+{
+ int ret = false;
+ uint16_t current, new_val;
+
+ current = rte_atomic_load_explicit(&rte_dpaa_bus.push_rxq_num,
+ rte_memory_order_acquire);
+ if (current < rte_dpaa_bus.max_push_rxq_num) {
+ new_val = current + 1;
+ if (rte_atomic_compare_exchange_strong_explicit(&rte_dpaa_bus.push_rxq_num,
+ ¤t, new_val,
+ rte_memory_order_release,
+ rte_memory_order_acquire))
+ ret = true;
+ }
+
+ return ret;
+}
+
+RTE_EXPORT_INTERNAL_SYMBOL(dpaa_push_queue_max_num)
+uint16_t
+dpaa_push_queue_max_num(void)
+{
+ return rte_dpaa_bus.max_push_rxq_num;
+}
+
static int
compare_dpaa_devices(struct rte_dpaa_device *dev1,
struct rte_dpaa_device *dev2)
@@ -681,6 +718,7 @@ rte_dpaa_bus_probe(void)
uint32_t svr_ver;
int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_ALLOWLIST;
static int process_once;
+ char *penv;
/* If DPAA bus is not present nothing needs to be done */
if (!rte_dpaa_bus.detected)
@@ -709,6 +747,18 @@ rte_dpaa_bus_probe(void)
rte_dpaa_bus.svr_ver);
}
+ /* Disabling the default push mode for LS1043A */
+ if (rte_dpaa_bus.svr_ver == SVR_LS1043A_FAMILY) {
+ rte_dpaa_bus.max_push_rxq_num = 0;
+ return 0;
+ }
+
+ penv = getenv("DPAA_PUSH_QUEUES_NUMBER");
+ if (penv)
+ rte_dpaa_bus.max_push_rxq_num = atoi(penv);
+ if (rte_dpaa_bus.max_push_rxq_num > DPAA_MAX_PUSH_MODE_QUEUE)
+ rte_dpaa_bus.max_push_rxq_num = DPAA_MAX_PUSH_MODE_QUEUE;
+
/* Device list creation is only done once */
if (!process_once) {
rte_dpaa_bus_dev_build();
@@ -947,6 +997,7 @@ static struct rte_dpaa_bus rte_dpaa_bus = {
.dev_iterate = dpaa_bus_dev_iterate,
.cleanup = dpaa_bus_cleanup,
},
+ .max_push_rxq_num = DPAA_DEFAULT_PUSH_MODE_QUEUE,
.device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
.device_count = 0,
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 43aab98339..0baf5c03fa 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -89,16 +89,6 @@ static int fmc_q = 1; /* Indicates the use of static fmc for distribution */
static int default_q; /* use default queue - FMC is not executed*/
bool dpaa_enable_recv_err_pkts; /* Enable main queue to receive error packets */
-/* At present we only allow up to 4 push mode queues as default - as each of
- * this queue need dedicated portal and we are short of portals.
- */
-#define DPAA_MAX_PUSH_MODE_QUEUE 8
-#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4
-
-static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
-static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
-
-
/* Per RX FQ Taildrop in frame count */
static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
@@ -1113,7 +1103,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
struct qm_mcc_initfq opts = {0};
u32 ch_id, flags = 0;
- int ret;
+ int ret, set_push_rxq = false;
u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
uint32_t max_rx_pktlen;
@@ -1214,12 +1204,12 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
fman_if_get_sg_enable(fif), max_rx_pktlen);
/* checking if push mode only, no error check for now */
- if (!rxq->is_static &&
- dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+ if (!rxq->is_static)
+ set_push_rxq = dpaa_push_queue_num_update();
+ if (set_push_rxq) {
struct qman_portal *qp;
int q_fd;
- dpaa_push_queue_idx++;
opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
QM_FQCTRL_CTXASTASHING |
@@ -1269,7 +1259,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
qp = fsl_qman_fq_portal_create(&q_fd);
if (!qp) {
DPAA_PMD_ERR("Unable to alloc fq portal");
- return -1;
+ return -EIO;
}
rxq->qp = qp;
@@ -1279,19 +1269,19 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
struct rte_device *rdev = dev->device;
dpaa_dev = container_of(rdev, struct rte_dpaa_device,
- device);
+ device);
dev->intr_handle = dpaa_dev->intr_handle;
if (rte_intr_vec_list_alloc(dev->intr_handle,
- NULL, dpaa_push_mode_max_queue)) {
+ NULL, dpaa_push_queue_max_num())) {
DPAA_PMD_ERR("intr_vec alloc failed");
return -ENOMEM;
}
if (rte_intr_nb_efd_set(dev->intr_handle,
- dpaa_push_mode_max_queue))
+ dpaa_push_queue_max_num()))
return -rte_errno;
if (rte_intr_max_intr_set(dev->intr_handle,
- dpaa_push_mode_max_queue))
+ dpaa_push_queue_max_num()))
return -rte_errno;
}
@@ -1339,9 +1329,8 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
RTE_EXPORT_INTERNAL_SYMBOL(dpaa_eth_eventq_attach)
int
dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
- int eth_rx_queue_id,
- u16 ch_id,
- const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+ int eth_rx_queue_id, u16 ch_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
int ret;
u32 flags = 0;
@@ -1349,10 +1338,10 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
struct qm_mcc_initfq opts = {0};
- if (dpaa_push_mode_max_queue) {
+ if (dpaa_push_queue_max_num() > 0) {
DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible");
DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.",
- dpaa_push_mode_max_queue);
+ dpaa_push_queue_max_num());
DPAA_PMD_WARN("To disable set DPAA_PUSH_QUEUES_NUMBER to 0");
}
@@ -2581,20 +2570,6 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
}
}
- /* disabling the default push mode for LS1043 */
- if (dpaa_soc_ver() == SVR_LS1043A_FAMILY)
- dpaa_push_mode_max_queue = 0;
-
- /* if push mode queues to be enabled. Currently we are allowing
- * only one queue per thread.
- */
- if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
- dpaa_push_mode_max_queue =
- atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
- if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
- dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
- }
-
is_global_init = 1;
}
--
2.25.1
next prev parent reply other threads:[~2025-10-08 4:36 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-07 5:00 [PATCH 0/5] DPAA specific changes Gagandeep Singh
2025-10-07 5:00 ` [PATCH 1/5] bus/dpaa: add FQ shutdown and improve logging Gagandeep Singh
2025-10-07 5:00 ` [PATCH 2/5] net/dpaa: Support IEEE1588 by timesync API Gagandeep Singh
2025-10-07 5:00 ` [PATCH 3/5] bus/dpaa: Disable qman Invalid Enqueue State interrupt Gagandeep Singh
2025-10-07 5:00 ` [PATCH 4/5] bus/dpaa: Set max push RXQ number Gagandeep Singh
2025-10-07 5:00 ` [PATCH 5/5] net/dpaa: Fix coverity issue Gagandeep Singh
2025-10-08 4:35 ` [PATCH v2 0/5] DPAA specific changes Gagandeep Singh
2025-10-08 4:35 ` [PATCH v2 1/5] bus/dpaa: add FQ shutdown and improve logging Gagandeep Singh
2025-10-08 4:35 ` [PATCH v2 2/5] net/dpaa: Support IEEE1588 by timesync API Gagandeep Singh
2025-10-08 4:35 ` [PATCH v2 3/5] bus/dpaa: Disable qman Invalid Enqueue State interrupt Gagandeep Singh
2025-10-08 4:35 ` Gagandeep Singh [this message]
2025-10-08 4:35 ` [PATCH v2 5/5] net/dpaa: Fix coverity issue Gagandeep Singh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251008043519.2461707-5-g.singh@nxp.com \
--to=g.singh@nxp.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=jun.yang@nxp.com \
--cc=sachin.saxena@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).