* [PATCH 1/3] net/mlx5: add missing function documentation
@ 2018-02-15 9:29 Nelio Laranjeiro
2018-02-15 9:29 ` [PATCH 2/3] net/mlx5: convert return errno to negative ones Nelio Laranjeiro
` (3 more replies)
0 siblings, 4 replies; 30+ messages in thread
From: Nelio Laranjeiro @ 2018-02-15 9:29 UTC (permalink / raw)
To: dev; +Cc: Adrien Mazarguil, Yongseok Koh
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
---
drivers/net/mlx5/mlx5_trigger.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index f5711a998..a70b13d52 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -14,6 +14,12 @@
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+/**
+ * Stop traffic on Tx queues.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
static void
priv_txq_stop(struct priv *priv)
{
@@ -23,6 +29,15 @@ priv_txq_stop(struct priv *priv)
mlx5_priv_txq_release(priv, i);
}
+/**
+ * Start traffic on Tx queues.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, errno on error.
+ */
static int
priv_txq_start(struct priv *priv)
{
@@ -58,6 +73,12 @@ priv_txq_start(struct priv *priv)
return ret;
}
+/**
+ * Stop traffic on Rx queues.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
static void
priv_rxq_stop(struct priv *priv)
{
@@ -67,6 +88,15 @@ priv_rxq_stop(struct priv *priv)
mlx5_priv_rxq_release(priv, i);
}
+/**
+ * Start traffic on Rx queues.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, errno on error.
+ */
static int
priv_rxq_start(struct priv *priv)
{
--
2.11.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 2/3] net/mlx5: convert return errno to negative ones 2018-02-15 9:29 [PATCH 1/3] net/mlx5: add missing function documentation Nelio Laranjeiro @ 2018-02-15 9:29 ` Nelio Laranjeiro 2018-02-16 14:26 ` Adrien Mazarguil 2018-02-15 9:29 ` [PATCH 3/3] net/mlx5: fix traffic restart function to return errors Nelio Laranjeiro ` (2 subsequent siblings) 3 siblings, 1 reply; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-15 9:29 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Yongseok Koh <yskoh@mellanox.com> --- drivers/net/mlx5/mlx5.c | 19 ++++----- drivers/net/mlx5/mlx5_ethdev.c | 30 ++++++++------ drivers/net/mlx5/mlx5_flow.c | 92 +++++++++++++++++++++-------------------- drivers/net/mlx5/mlx5_mac.c | 7 ++-- drivers/net/mlx5/mlx5_mr.c | 4 +- drivers/net/mlx5/mlx5_rss.c | 16 +++---- drivers/net/mlx5/mlx5_rxq.c | 20 ++++----- drivers/net/mlx5/mlx5_socket.c | 41 ++++++++++++------ drivers/net/mlx5/mlx5_trigger.c | 27 ++++++------ drivers/net/mlx5/mlx5_txq.c | 14 +++---- drivers/net/mlx5/mlx5_vlan.c | 2 +- 11 files changed, 145 insertions(+), 127 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index f52edf74f..d24f2a37c 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -413,7 +413,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque) * Device arguments structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) @@ -446,7 +446,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) mlx5_args_check, config); if (ret != 0) { rte_kvargs_free(kvlist); - return ret; + return -EINVAL; } } } @@ -472,7 +472,7 @@ static void *uar_base; * Pointer to private structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int priv_uar_init_primary(struct priv *priv) @@ -480,7 +480,6 @@ priv_uar_init_primary(struct priv *priv) void *addr = (void *)0; int i; const struct rte_mem_config *mcfg; - int ret; if (uar_base) { /* UAR address space mapped. */ priv->uar_base = uar_base; @@ -502,8 +501,7 @@ priv_uar_init_primary(struct priv *priv) if (addr == MAP_FAILED) { ERROR("Failed to reserve UAR address space, please adjust " "MLX5_UAR_SIZE or try --base-virtaddr"); - ret = ENOMEM; - return ret; + return -ENOMEM; } /* Accept either same addr or a new addr returned from mmap if target * range occupied. @@ -522,13 +520,12 @@ priv_uar_init_primary(struct priv *priv) * Pointer to private structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int priv_uar_init_secondary(struct priv *priv) { void *addr; - int ret; assert(priv->uar_base); if (uar_base) { /* already reserved. */ @@ -541,15 +538,13 @@ priv_uar_init_secondary(struct priv *priv) if (addr == MAP_FAILED) { ERROR("UAR mmap failed: %p size: %llu", priv->uar_base, MLX5_UAR_SIZE); - ret = ENXIO; - return ret; + return -ENXIO; } if (priv->uar_base != addr) { ERROR("UAR address %p size %llu occupied, please adjust " "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr", priv->uar_base, MLX5_UAR_SIZE); - ret = ENXIO; - return ret; + return -ENXIO; } uar_base = addr; /* process local, don't reserve again */ INFO("Reserved UAR address space: %p", addr); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index b73cb53df..12d35dcf2 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -314,7 +314,7 @@ priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int dev_configure(struct rte_eth_dev *dev) @@ -338,26 +338,26 @@ dev_configure(struct rte_eth_dev *dev) ERROR("Some Tx offloads are not supported " "requested 0x%" PRIx64 " supported 0x%" PRIx64, tx_offloads, supp_tx_offloads); - return ENOTSUP; + return -ENOTSUP; } if ((rx_offloads & supp_rx_offloads) != rx_offloads) { ERROR("Some Rx offloads are not supported " "requested 0x%" PRIx64 " supported 0x%" PRIx64, rx_offloads, supp_rx_offloads); - return ENOTSUP; + return -ENOTSUP; } if (use_app_rss_key && (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != rss_hash_default_key_len)) { /* MLX5 RSS only support 40bytes key. */ - return EINVAL; + return -EINVAL; } priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, rss_hash_default_key_len, 0); if (!priv->rss_conf.rss_key) { ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n); - return ENOMEM; + return -ENOMEM; } memcpy(priv->rss_conf.rss_key, use_app_rss_key ? @@ -375,7 +375,7 @@ dev_configure(struct rte_eth_dev *dev) } if (rxqs_n > priv->config.ind_table_max_size) { ERROR("cannot handle this many RX queues (%u)", rxqs_n); - return EINVAL; + return -EINVAL; } if (rxqs_n == priv->rxqs_n) return 0; @@ -389,7 +389,7 @@ dev_configure(struct rte_eth_dev *dev) priv->config.ind_table_max_size : rxqs_n)); if (priv_rss_reta_index_resize(priv, reta_idx_n)) - return ENOMEM; + return -ENOMEM; /* When the number of RX queues is not a power of two, the remaining * table entries are padded with reused WQs and hashes are not spread * uniformly. */ @@ -420,7 +420,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev) ret = dev_configure(dev); assert(ret >= 0); priv_unlock(priv); - return -ret; + return ret; } /** @@ -1199,7 +1199,7 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) * Nonzero for link up, otherwise link down. * * @return - * 0 on success, errno value on failure. + * 0 on success, -1 on error and errno is set. */ static int priv_dev_set_link(struct priv *priv, int up) @@ -1214,7 +1214,7 @@ priv_dev_set_link(struct priv *priv, int up) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int mlx5_set_link_down(struct rte_eth_dev *dev) @@ -1225,7 +1225,9 @@ mlx5_set_link_down(struct rte_eth_dev *dev) priv_lock(priv); err = priv_dev_set_link(priv, 0); priv_unlock(priv); - return err; + if (err == -1) + return errno; + return 0; } /** @@ -1235,7 +1237,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int mlx5_set_link_up(struct rte_eth_dev *dev) @@ -1246,7 +1248,9 @@ mlx5_set_link_up(struct rte_eth_dev *dev) priv_lock(priv); err = priv_dev_set_link(priv, 1); priv_unlock(priv); - return err; + if (err == -1) + return errno; + return 0; } /** diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 26002c4b9..2a595442e 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -460,7 +460,7 @@ struct ibv_spec_header { * Bit-Mask size in bytes. * * @return - * 0 on success. + * 0 on success, -1 on error. */ static int mlx5_flow_item_validate(const struct rte_flow_item *item, @@ -523,7 +523,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * User RSS configuration to save. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int priv_flow_convert_rss_conf(struct priv *priv, @@ -538,9 +538,9 @@ priv_flow_convert_rss_conf(struct priv *priv, (void)priv; if (rss_conf) { if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) - return EINVAL; + return -EINVAL; if (rss_conf->rss_key_len != 40) - return EINVAL; + return -EINVAL; if (rss_conf->rss_key_len && rss_conf->rss_key) { parser->rss_conf.rss_key_len = rss_conf->rss_key_len; memcpy(parser->rss_key, rss_conf->rss_key, @@ -1068,7 +1068,7 @@ priv_flow_convert(struct priv *priv, priv_flow_convert_allocate(priv, priority, offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) - return ENOMEM; + return -ENOMEM; parser->queue[HASH_RXQ_ETH].offset = sizeof(struct ibv_flow_attr); } else { @@ -1103,7 +1103,7 @@ priv_flow_convert(struct priv *priv, cur_item->mask), parser); if (ret) { - rte_flow_error_set(error, ret, + rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, items, "item not supported"); goto exit_free; @@ -1147,11 +1147,11 @@ priv_flow_convert(struct priv *priv, } rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes."); - return ret; + return -rte_errno; exit_count_error: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create counter."); - return rte_errno; + return -rte_errno; } /** @@ -1502,6 +1502,9 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, negative errno value on failure. */ static int mlx5_flow_create_vxlan(const struct rte_flow_item *item, @@ -1542,7 +1545,7 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, * To avoid such situation, VNI 0 is currently refused. */ if (!vxlan.val.tunnel_id) - return EINVAL; + return -EINVAL; mlx5_flow_create_copy(parser, &vxlan, size); return 0; } @@ -1579,7 +1582,7 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) * Pointer to MLX5 flow parser structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int mlx5_flow_create_count(struct priv *priv __rte_unused, @@ -1597,7 +1600,7 @@ mlx5_flow_create_count(struct priv *priv __rte_unused, init_attr.counter_set_id = 0; parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr); if (!parser->cs) - return EINVAL; + return -EINVAL; counter.counter_set_handle = parser->cs->handle; mlx5_flow_create_copy(parser, &counter, size); #endif @@ -1617,7 +1620,7 @@ mlx5_flow_create_count(struct priv *priv __rte_unused, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int priv_flow_create_action_queue_drop(struct priv *priv, @@ -1653,7 +1656,7 @@ priv_flow_create_action_queue_drop(struct priv *priv, if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); - err = ENOMEM; + err = -ENOMEM; goto error; } return 0; @@ -1689,7 +1692,7 @@ priv_flow_create_action_queue_drop(struct priv *priv, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int priv_flow_create_action_queue_rss(struct priv *priv, @@ -1729,7 +1732,7 @@ priv_flow_create_action_queue_rss(struct priv *priv, rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "cannot create hash rxq"); - return ENOMEM; + return -ENOMEM; } } return 0; @@ -1748,7 +1751,7 @@ priv_flow_create_action_queue_rss(struct priv *priv, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int priv_flow_create_action_queue(struct priv *priv, @@ -1779,7 +1782,7 @@ priv_flow_create_action_queue(struct priv *priv, rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); - err = ENOMEM; + err = -ENOMEM; goto error; } DEBUG("%p type %d QP %p ibv_flow %p", @@ -2038,7 +2041,7 @@ priv_flow_flush(struct priv *priv, struct mlx5_flows *list) * Pointer to private structure. * * @return - * 0 on success. + * 0 on success, negative errno on error. */ int priv_flow_create_drop_queue(struct priv *priv) @@ -2117,7 +2120,7 @@ priv_flow_create_drop_queue(struct priv *priv) if (fdq) rte_free(fdq); priv->flow_drop_queue = NULL; - return -1; + return -ENOMEM; } /** @@ -2214,7 +2217,7 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list) * Pointer to a TAILQ flow list. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, negative errno value otherwise and rte_errno is set. */ int priv_flow_start(struct priv *priv, struct mlx5_flows *list) @@ -2233,7 +2236,7 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list) DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } DEBUG("Flow %p applied", (void *)flow); /* Next flow. */ @@ -2260,7 +2263,7 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list) DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } flow_create: flow->frxq[i].ibv_flow = @@ -2270,7 +2273,7 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list) DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } DEBUG("Flow %p applied", (void *)flow); } @@ -2319,7 +2322,7 @@ priv_flow_verify(struct priv *priv) * A VLAN flow mask to apply. * * @return - * 0 on success. + * 0 on success, negative errno on error. */ int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, @@ -2372,7 +2375,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, } action_rss; if (!priv->reta_idx_n) - return EINVAL; + return -EINVAL; for (i = 0; i != priv->reta_idx_n; ++i) action_rss.local.queue[i] = (*priv->reta_idx)[i]; action_rss.local.rss_conf = &priv->rss_conf; @@ -2381,7 +2384,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions, &error); if (!flow) - return rte_errno; + return -rte_errno; return 0; } @@ -2396,7 +2399,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, * An Ethernet flow mask to apply. * * @return - * 0 on success. + * 0 on success, negative errno on error. */ int mlx5_ctrl_flow(struct rte_eth_dev *dev, @@ -2455,7 +2458,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, * returned data from the counter. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int priv_flow_query_count(struct ibv_counter_set *cs, @@ -2567,7 +2570,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, * Generic flow parameters structure. * * @return - * 0 on success, errno value on error. + * 0 on success, negative errno value on error. */ static int priv_fdir_filter_convert(struct priv *priv, @@ -2579,7 +2582,7 @@ priv_fdir_filter_convert(struct priv *priv, /* Validate queue number. */ if (fdir_filter->action.rx_queue >= priv->rxqs_n) { ERROR("invalid queue number %d", fdir_filter->action.rx_queue); - return EINVAL; + return -EINVAL; } attributes->attr.ingress = 1; attributes->items[0] = (struct rte_flow_item) { @@ -2601,7 +2604,7 @@ priv_fdir_filter_convert(struct priv *priv, break; default: ERROR("invalid behavior %d", fdir_filter->action.behavior); - return ENOTSUP; + return -ENOTSUP; } attributes->queue.index = fdir_filter->action.rx_queue; switch (fdir_filter->input.flow_type) { @@ -2737,7 +2740,7 @@ priv_fdir_filter_convert(struct priv *priv, default: ERROR("invalid flow type%d", fdir_filter->input.flow_type); - return ENOTSUP; + return -ENOTSUP; } return 0; } @@ -2751,7 +2754,7 @@ priv_fdir_filter_convert(struct priv *priv, * Flow director filter to add. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int priv_fdir_filter_add(struct priv *priv, @@ -2774,11 +2777,11 @@ priv_fdir_filter_add(struct priv *priv, ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); if (ret) - return -ret; + return ret; ret = priv_flow_convert(priv, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) - return -ret; + return ret; flow = priv_flow_create(priv, &priv->flows, &attributes.attr, @@ -2789,7 +2792,7 @@ priv_fdir_filter_add(struct priv *priv, DEBUG("FDIR created %p", (void *)flow); return 0; } - return ENOTSUP; + return -ENOTSUP; } /** @@ -2801,7 +2804,7 @@ priv_fdir_filter_add(struct priv *priv, * Filter to be deleted. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int priv_fdir_filter_delete(struct priv *priv, @@ -2821,7 +2824,7 @@ priv_fdir_filter_delete(struct priv *priv, ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); if (ret) - return -ret; + return ret; ret = priv_flow_convert(priv, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) @@ -2886,7 +2889,7 @@ priv_fdir_filter_delete(struct priv *priv, if (parser.queue[i].ibv_attr) rte_free(parser.queue[i].ibv_attr); } - return -ret; + return ret; } /** @@ -2898,7 +2901,7 @@ priv_fdir_filter_delete(struct priv *priv, * Filter to be updated. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int priv_fdir_filter_update(struct priv *priv, @@ -2961,7 +2964,7 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) * Pointer to operation-specific structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) @@ -2976,7 +2979,7 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { ERROR("%p: flow director mode %d not supported", (void *)priv, fdir_mode); - return EINVAL; + return -EINVAL; } switch (filter_op) { case RTE_ETH_FILTER_ADD: @@ -2997,7 +3000,7 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) default: DEBUG("%p: unknown operation %u", (void *)priv, filter_op); - ret = EINVAL; + ret = -EINVAL; break; } return ret; @@ -3039,9 +3042,10 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, priv_unlock(priv); break; default: + ret = -ENOTSUP; ERROR("%p: filter type (%d) not supported", (void *)dev, filter_type); break; } - return -ret; + return ret; } diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index e8a8d4594..afd525010 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -84,14 +84,13 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) * VMDq pool index to associate address with (ignored). * * @return - * 0 on success. + * 0 on success, negative errno on error. */ int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, uint32_t index, uint32_t vmdq) { unsigned int i; - int ret = 0; (void)vmdq; assert(index < MLX5_MAX_MAC_ADDRESSES); @@ -103,12 +102,12 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac))) continue; /* Address already configured elsewhere, return with error. */ - return EADDRINUSE; + return -EADDRINUSE; } dev->data->mac_addrs[index] = *mac; if (!dev->data->promiscuous) mlx5_traffic_restart(dev); - return ret; + return 0; } /** diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 857dfcd83..ef267403b 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -333,7 +333,7 @@ priv_mr_get(struct priv *priv, struct rte_mempool *mp) * Pointer to memory region to release. * * @return - * 0 on success, errno on failure. + * 0 on success, negative errno on failure. */ int priv_mr_release(struct priv *priv, struct mlx5_mr *mr) @@ -348,7 +348,7 @@ priv_mr_release(struct priv *priv, struct mlx5_mr *mr) rte_free(mr); return 0; } - return EBUSY; + return -EBUSY; } /** diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index d06b0bee1..9975cb049 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -106,7 +106,7 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, * The size of the array to allocate. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) @@ -120,7 +120,7 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) mem = rte_realloc(priv->reta_idx, reta_size * sizeof((*priv->reta_idx)[0]), 0); if (!mem) - return ENOMEM; + return -ENOMEM; priv->reta_idx = mem; priv->reta_idx_n = reta_size; @@ -142,7 +142,7 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) * Number of entries. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int priv_dev_rss_reta_query(struct priv *priv, @@ -153,7 +153,7 @@ priv_dev_rss_reta_query(struct priv *priv, unsigned int i; if (!reta_size || reta_size > priv->reta_idx_n) - return EINVAL; + return -EINVAL; /* Fill each entry of the table even if its bit is not set. */ for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; @@ -174,7 +174,7 @@ priv_dev_rss_reta_query(struct priv *priv, * Number of entries. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ static int priv_dev_rss_reta_update(struct priv *priv, @@ -187,7 +187,7 @@ priv_dev_rss_reta_update(struct priv *priv, int ret; if (!reta_size) - return EINVAL; + return -EINVAL; ret = priv_rss_reta_index_resize(priv, reta_size); if (ret) return ret; @@ -227,7 +227,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, priv_lock(priv); ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size); priv_unlock(priv); - return -ret; + return ret; } /** @@ -258,5 +258,5 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, mlx5_dev_stop(dev); mlx5_dev_start(dev); } - return -ret; + return ret; } diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 238fa7e56..5be0390e4 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -61,7 +61,7 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); * Pointer to RX queue structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) @@ -131,7 +131,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) } DEBUG("%p: failed, freed everything", (void *)rxq_ctrl); assert(ret > 0); - return ret; + return -ret; } /** @@ -849,7 +849,7 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) * Verbs Rx queue object. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) @@ -876,7 +876,7 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) rte_free(rxq_ibv); return 0; } - return EBUSY; + return -EBUSY; } /** @@ -1084,7 +1084,7 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) * TX queue index. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) @@ -1110,7 +1110,7 @@ mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) (*priv->rxqs)[idx] = NULL; return 0; } - return EBUSY; + return -EBUSY; } /** @@ -1266,7 +1266,7 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], * Indirection table to release. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int mlx5_priv_ind_table_ibv_release(struct priv *priv, @@ -1286,7 +1286,7 @@ mlx5_priv_ind_table_ibv_release(struct priv *priv, rte_free(ind_tbl); return 0; } - return EBUSY; + return -EBUSY; } /** @@ -1440,7 +1440,7 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, * Pointer to Hash Rx queue to release. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq) @@ -1455,7 +1455,7 @@ mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq) return 0; } claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table)); - return EBUSY; + return -EBUSY; } /** diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c index 61c1a4a50..e6d2c98b3 100644 --- a/drivers/net/mlx5/mlx5_socket.c +++ b/drivers/net/mlx5/mlx5_socket.c @@ -22,7 +22,7 @@ * Pointer to private structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int priv_socket_init(struct priv *priv) @@ -40,40 +40,49 @@ priv_socket_init(struct priv *priv) */ ret = socket(AF_UNIX, SOCK_STREAM, 0); if (ret < 0) { + ret = errno; WARN("secondary process not supported: %s", strerror(errno)); - return ret; + return -ret; } priv->primary_socket = ret; flags = fcntl(priv->primary_socket, F_GETFL, 0); - if (flags == -1) + if (flags == -1) { + ret = errno; goto out; + } ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK); - if (ret < 0) + if (ret < 0) { + ret = errno; goto out; + } snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); ret = stat(sun.sun_path, &file_stat); - if (!ret) + if (!ret) { + ret = errno; claim_zero(remove(sun.sun_path)); + } ret = bind(priv->primary_socket, (const struct sockaddr *)&sun, sizeof(sun)); if (ret < 0) { + ret = errno; WARN("cannot bind socket, secondary process not supported: %s", strerror(errno)); goto close; } ret = listen(priv->primary_socket, 0); if (ret < 0) { + ret = errno; WARN("Secondary process not supported: %s", strerror(errno)); goto close; } - return ret; + return 0; close: remove(sun.sun_path); out: claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; - return -(ret); + return -ret; } /** @@ -83,7 +92,7 @@ priv_socket_init(struct priv *priv) * Pointer to private structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int priv_socket_uninit(struct priv *priv) @@ -191,7 +200,7 @@ priv_socket_connect(struct priv *priv) struct sockaddr_un sun = { .sun_family = AF_UNIX, }; - int socket_fd; + int socket_fd = -1; int *fd = NULL; int ret; struct ucred *cred; @@ -211,19 +220,22 @@ priv_socket_connect(struct priv *priv) ret = socket(AF_UNIX, SOCK_STREAM, 0); if (ret < 0) { + ret = errno; WARN("cannot connect to primary"); - return ret; + goto out; } socket_fd = ret; snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun)); if (ret < 0) { + ret = errno; WARN("cannot connect to primary"); goto out; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { + ret = EINVAL; DEBUG("cannot get first message"); goto out; } @@ -232,6 +244,7 @@ priv_socket_connect(struct priv *priv) cmsg->cmsg_len = CMSG_LEN(sizeof(*cred)); cred = (struct ucred *)CMSG_DATA(cmsg); if (cred == NULL) { + ret = EINVAL; DEBUG("no credentials received"); goto out; } @@ -240,17 +253,20 @@ priv_socket_connect(struct priv *priv) cred->gid = getgid(); ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT); if (ret < 0) { + ret = errno; WARN("cannot send credentials to primary: %s", strerror(errno)); goto out; } ret = recvmsg(socket_fd, &msg, MSG_WAITALL); if (ret <= 0) { + ret = errno; WARN("no message from primary: %s", strerror(errno)); goto out; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { + ret = EINVAL; WARN("No file descriptor received"); goto out; } @@ -262,6 +278,7 @@ priv_socket_connect(struct priv *priv) } ret = *fd; out: - close(socket_fd); - return ret; + if (socket_fd > 0) + close(socket_fd); + return -ret; } diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index a70b13d52..2918ba0e9 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -36,7 +36,7 @@ priv_txq_stop(struct priv *priv) * Pointer to private structure. * * @return - * 0 on success, errno on error. + * 0 on success, negative errno on error. */ static int priv_txq_start(struct priv *priv) @@ -60,12 +60,12 @@ priv_txq_start(struct priv *priv) txq_alloc_elts(txq_ctrl); txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i); if (!txq_ctrl->ibv) { - ret = ENOMEM; + ret = -ENOMEM; goto error; } } ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd); - if (ret) + if (!ret) goto error; return ret; error: @@ -95,7 +95,7 @@ priv_rxq_stop(struct priv *priv) * Pointer to private structure. * * @return - * 0 on success, errno on error. + * 0 on success, negative errno on error. */ static int priv_rxq_start(struct priv *priv) @@ -113,14 +113,14 @@ priv_rxq_start(struct priv *priv) goto error; rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i); if (!rxq_ctrl->ibv) { - ret = ENOMEM; + ret = -ENOMEM; goto error; } } - return -ret; + return ret; error: priv_rxq_stop(priv); - return -ret; + return ret; } /** @@ -239,7 +239,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success. + * 0 on success, negative errno on error. */ int priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) @@ -354,7 +354,7 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) } return 0; error: - return rte_errno; + return -rte_errno; } @@ -386,7 +386,7 @@ priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success. + * 0 on success, negative errno on error. */ int priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) @@ -405,15 +405,16 @@ priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success. + * 0 on success, negative errno on error. */ int mlx5_traffic_restart(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; + int ret; priv_lock(priv); - priv_dev_traffic_restart(priv, dev); + ret = priv_dev_traffic_restart(priv, dev); priv_unlock(priv); - return 0; + return ret; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index ed1c713ea..158394168 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -271,7 +271,7 @@ mlx5_tx_queue_release(void *dpdk_txq) * Verbs file descriptor to map UAR pages. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ int priv_tx_uar_remap(struct priv *priv, int fd) @@ -287,7 +287,6 @@ priv_tx_uar_remap(struct priv *priv, int fd) struct mlx5_txq_ctrl *txq_ctrl; int already_mapped; size_t page_size = sysconf(_SC_PAGESIZE); - int r; memset(pages, 0, priv->txqs_n * sizeof(uintptr_t)); /* @@ -326,8 +325,7 @@ priv_tx_uar_remap(struct priv *priv, int fd) /* fixed mmap have to return same address */ ERROR("call to mmap failed on UAR for txq %d\n", i); - r = ENXIO; - return r; + return -ENXIO; } } if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */ @@ -575,7 +573,7 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) * Verbs Tx queue object. * * @return - * 0 on success, errno on failure. + * 0 on success, negative errno on failure. */ int mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) @@ -591,7 +589,7 @@ mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) rte_free(txq_ibv); return 0; } - return EBUSY; + return -EBUSY; } /** @@ -830,7 +828,7 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) * TX queue index. * * @return - * 0 on success, errno on failure. + * 0 on success, negative errno on failure. */ int mlx5_priv_txq_release(struct priv *priv, uint16_t idx) @@ -867,7 +865,7 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx) (*priv->txqs)[idx] = NULL; return 0; } - return EBUSY; + return -EBUSY; } /** diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 75c345626..2356bc0bb 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -80,7 +80,7 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) ++priv->vlan_filter_n; } if (dev->data->dev_started) - priv_dev_traffic_restart(priv, dev); + ret = priv_dev_traffic_restart(priv, dev); out: priv_unlock(priv); return ret; -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 2/3] net/mlx5: convert return errno to negative ones 2018-02-15 9:29 ` [PATCH 2/3] net/mlx5: convert return errno to negative ones Nelio Laranjeiro @ 2018-02-16 14:26 ` Adrien Mazarguil 0 siblings, 0 replies; 30+ messages in thread From: Adrien Mazarguil @ 2018-02-16 14:26 UTC (permalink / raw) To: Nelio Laranjeiro; +Cc: dev, Yongseok Koh How about reusing the title/commit log of its mlx4 counterpart: net/mlx5: standardize on negative errno values (see 9d14b27308a0 "net/mlx4: standardize on negative errno values") More below. On Thu, Feb 15, 2018 at 10:29:26AM +0100, Nelio Laranjeiro wrote: > Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> > Acked-by: Yongseok Koh <yskoh@mellanox.com> > --- > drivers/net/mlx5/mlx5.c | 19 ++++----- > drivers/net/mlx5/mlx5_ethdev.c | 30 ++++++++------ > drivers/net/mlx5/mlx5_flow.c | 92 +++++++++++++++++++++-------------------- > drivers/net/mlx5/mlx5_mac.c | 7 ++-- > drivers/net/mlx5/mlx5_mr.c | 4 +- > drivers/net/mlx5/mlx5_rss.c | 16 +++---- > drivers/net/mlx5/mlx5_rxq.c | 20 ++++----- > drivers/net/mlx5/mlx5_socket.c | 41 ++++++++++++------ > drivers/net/mlx5/mlx5_trigger.c | 27 ++++++------ > drivers/net/mlx5/mlx5_txq.c | 14 +++---- > drivers/net/mlx5/mlx5_vlan.c | 2 +- > 11 files changed, 145 insertions(+), 127 deletions(-) > > diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c > index f52edf74f..d24f2a37c 100644 > --- a/drivers/net/mlx5/mlx5.c > +++ b/drivers/net/mlx5/mlx5.c > @@ -413,7 +413,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque) > * Device arguments structure. > * > * @return > - * 0 on success, errno value on failure. > + * 0 on success, negative errno value on failure. How about s/on \(failure\|error\)/otherwise/ here and everywhere else? Documentation should be identical for all relevant functions. <snip> In mlx5_ethdev.c, priv_get_ifname() is still documented to return "0 on success, -1 on failure and errno is set". You must get rid of the reliance on external errno as part of this commit; you can optionally set rte_errno, but for consistency all int-returning functions must return a valid negative errno value, never -1. The same applies to: - priv_sysfs_read - priv_sysfs_write - priv_get_sysfs_ulong - priv_set_sysfs_ulong - priv_ifreq - priv_get_num_vfs - priv_get_mtu - priv_get_cntr_sysfs - priv_set_mtu - priv_set_flags - mlx5_link_update (lacks documentation) - mlx5_ibv_device_to_pci_addr - priv_dev_set_link - mlx5_flow_item_validate - mlx5_flow_create_* (unsure) - priv_rx_intr_vec_enable ("negative" what?) - mlx5_rx_intr_enable (ditto) - mlx5_rx_intr_disable (ditto) - check_cqe (returning a valid errno wouldn't impact performance) - priv_read_dev_counters ("negative" what?) - priv_ethtool_get_stats_n - priv_xstats_get ("negative" what?) - mlx5_stats_get (lacks documentation) - mlx5_xstats_get ("negative" what?) - mlx5_xstats_get_names (lacks documentation) - priv_dev_traffic_disable (no error defined?) - mlx5_priv_txq_ibv_releasable (lacks documentation) - mlx5_priv_txq_ibv_verify (ditto) - mlx5_vlan_offload_set (ditto, to be checked) Also, some of them additionally set rte_errno while most of them do not. I'd suggest to *always* set rte_errno in case of error then add the "...and rte_errno is set" to documentation. mlx4 approach to errors: if (boom) { rte_errno = ECRAP; return -rte_errno; } Shorter, also valid but frowned upon: if (boom) return -(rte_errno = EBOOM); Alternatively when calling another rte_errno-aware function: ret = boom(); if (ret) return ret; -- Adrien Mazarguil 6WIND ^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 3/3] net/mlx5: fix traffic restart function to return errors 2018-02-15 9:29 [PATCH 1/3] net/mlx5: add missing function documentation Nelio Laranjeiro 2018-02-15 9:29 ` [PATCH 2/3] net/mlx5: convert return errno to negative ones Nelio Laranjeiro @ 2018-02-15 9:29 ` Nelio Laranjeiro 2018-02-16 14:26 ` Adrien Mazarguil 2018-02-16 14:26 ` [PATCH 1/3] net/mlx5: add missing function documentation Adrien Mazarguil 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro 3 siblings, 1 reply; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-15 9:29 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh, stable priv_dev_traffic_restart() was considering nothing could fail whereas it can. Fixes: 272733b5ebfd ("net/mlx5: use flow to enable unicast traffic") Cc: stable@dpdk.org Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Yongseok Koh <yskoh@mellanox.com> --- drivers/net/mlx5/mlx5_trigger.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 2918ba0e9..2895e57e7 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -391,11 +391,15 @@ priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) int priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) { + int ret = 0; + if (dev->data->dev_started) { - priv_dev_traffic_disable(priv, dev); - priv_dev_traffic_enable(priv, dev); + ret = priv_dev_traffic_disable(priv, dev); + if (ret) + return ret; + ret = priv_dev_traffic_enable(priv, dev); } - return 0; + return ret; } /** -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 3/3] net/mlx5: fix traffic restart function to return errors 2018-02-15 9:29 ` [PATCH 3/3] net/mlx5: fix traffic restart function to return errors Nelio Laranjeiro @ 2018-02-16 14:26 ` Adrien Mazarguil 0 siblings, 0 replies; 30+ messages in thread From: Adrien Mazarguil @ 2018-02-16 14:26 UTC (permalink / raw) To: Nelio Laranjeiro; +Cc: dev, Yongseok Koh, stable On Thu, Feb 15, 2018 at 10:29:27AM +0100, Nelio Laranjeiro wrote: > priv_dev_traffic_restart() was considering nothing could fail whereas it > can. How about simply describing it as: priv_dev_traffic_restart() must not ignore errors. > Fixes: 272733b5ebfd ("net/mlx5: use flow to enable unicast traffic") > Cc: stable@dpdk.org > > Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> > Acked-by: Yongseok Koh <yskoh@mellanox.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> -- Adrien Mazarguil 6WIND ^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH 1/3] net/mlx5: add missing function documentation 2018-02-15 9:29 [PATCH 1/3] net/mlx5: add missing function documentation Nelio Laranjeiro 2018-02-15 9:29 ` [PATCH 2/3] net/mlx5: convert return errno to negative ones Nelio Laranjeiro 2018-02-15 9:29 ` [PATCH 3/3] net/mlx5: fix traffic restart function to return errors Nelio Laranjeiro @ 2018-02-16 14:26 ` Adrien Mazarguil 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro 3 siblings, 0 replies; 30+ messages in thread From: Adrien Mazarguil @ 2018-02-16 14:26 UTC (permalink / raw) To: Nelio Laranjeiro; +Cc: dev, Yongseok Koh I hate to sound pedantic but... On Thu, Feb 15, 2018 at 10:29:25AM +0100, Nelio Laranjeiro wrote: > Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> > Acked-by: Yongseok Koh <yskoh@mellanox.com> > --- > drivers/net/mlx5/mlx5_trigger.c | 30 ++++++++++++++++++++++++++++++ > 1 file changed, 30 insertions(+) > > diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c > index f5711a998..a70b13d52 100644 > --- a/drivers/net/mlx5/mlx5_trigger.c > +++ b/drivers/net/mlx5/mlx5_trigger.c > @@ -14,6 +14,12 @@ > #include "mlx5_rxtx.h" > #include "mlx5_utils.h" > > +/** > + * Stop traffic on Tx queues. > + * > + * @param priv ...there's an extra space between @param and priv :) > + * Pointer to private structure. > + */ > static void > priv_txq_stop(struct priv *priv) > { > @@ -23,6 +29,15 @@ priv_txq_stop(struct priv *priv) > mlx5_priv_txq_release(priv, i); > } > > +/** > + * Start traffic on Tx queues. > + * > + * @param priv Ditto. > + * Pointer to private structure. > + * > + * @return > + * 0 on success, errno on error. This should more accurately read "0 on success, positive errno value otherwise". I would suggest to document these functions only after the errno mess is sorted (patch 2/3 should come first) for a smaller amount of changes. > + */ > static int > priv_txq_start(struct priv *priv) > { > @@ -58,6 +73,12 @@ priv_txq_start(struct priv *priv) > return ret; > } > > +/** > + * Stop traffic on Rx queues. > + * > + * @param priv Extra space here also. > + * Pointer to private structure. > + */ > static void > priv_rxq_stop(struct priv *priv) > { > @@ -67,6 +88,15 @@ priv_rxq_stop(struct priv *priv) > mlx5_priv_rxq_release(priv, i); > } > > +/** > + * Start traffic on Rx queues. > + * > + * @param priv Again. > + * Pointer to private structure. > + * > + * @return > + * 0 on success, errno on error. Ditto regarding errno sign. > + */ > static int > priv_rxq_start(struct priv *priv) > { > -- > 2.11.0 > There are other functions are not properly documented either, you might want to add them for consistency: mlx5_ethdev.c: - mlx5_dev_supported_ptypes_get (no documentation) - mlx5_link_update_unlocked_gset (return value) - mlx5_link_update_unlocked_gs (return value) - priv_link_update (return value) - mlx5_link_update (return value) mlx5_flow.c: - mlx5_flow_create_eth (return value) - mlx5_flow_create_vlan (return value) - mlx5_flow_create_ipv4 (return value) - mlx5_flow_create_ipv6 (return value) - mlx5_flow_create_udp (return value) - mlx5_flow_create_tcp (return value) - mlx5_flow_create_vxlan (return value) - mlx5_flow_create_flag_mark (return value) - priv_flow_create_drop_queue (return in case of error) - priv_flow_verify (inconsistent @return syntax) mlx5_glue.c: - mlx5_glue_* (too many of them and those are basic wrappers, don't bother) mlx5_mr.c: - priv_mr_verify (inconsistent @return syntax) mlx5_rxq.c: - mlx5_priv_rxq_ibv_verify (ditto) - mlx5_priv_rxq_verify (ditto) - mlx5_priv_ind_table_ibv_verify (ditto) - mlx5_priv_hrxq_ibv_verify (ditto) mlx5_rxtx.c: - mlx5_tx_burst_raw_vec - mlx5_tx_burst_vec - mlx5_rx_burst_vec - priv_check_raw_vec_tx_support - priv_check_vec_tx_support - rxq_check_vec_support - priv_check_vec_rx_support mlx5_trigger.c: - Those are taken care of by the current patch. mlx5_txq.c: - mlx5_priv_txq_ibv_verify (inconsistent @return syntax) - mlx5_priv_txq_verify (ditto) mlx5_vlan.c - mlx5_vlan_offload_set (return value) I think that's all. -- Adrien Mazarguil 6WIND ^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH v2 00/10] net/mlx5: clean driver 2018-02-15 9:29 [PATCH 1/3] net/mlx5: add missing function documentation Nelio Laranjeiro ` (2 preceding siblings ...) 2018-02-16 14:26 ` [PATCH 1/3] net/mlx5: add missing function documentation Adrien Mazarguil @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 01/10] net/mlx5: fix sriov flag Nelio Laranjeiro ` (10 more replies) 3 siblings, 11 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh - Removes unused SR-IOV flag. - Adds missing documentation on some functions. - Removes the spin-lock on the private structure. - Standardize the return values of all functions as discussed on the mailing list [1]. [1] https://dpdk.org/ml/archives/dev/2018-January/087991.html Nelio Laranjeiro (10): net/mlx5: fix sriov flag net/mlx5: name parameters in function prototypes net/mlx5: mark parameters with unused attribute net/mlx5: normalize function prototypes net/mlx5: add missing function documentation net/mlx5: remove useless empty lines net/mlx5: remove control path locks net/mlx5: prefix all function with mlx5 net/mlx5: change non failing function return values net/mlx5: standardize on negative errno values drivers/net/mlx5/mlx5.c | 234 ++++++-------- drivers/net/mlx5/mlx5.h | 240 ++++++-------- drivers/net/mlx5/mlx5_ethdev.c | 611 +++++++++++++++-------------------- drivers/net/mlx5/mlx5_flow.c | 664 ++++++++++++++++++++------------------- drivers/net/mlx5/mlx5_mac.c | 42 ++- drivers/net/mlx5/mlx5_mr.c | 130 ++++---- drivers/net/mlx5/mlx5_rss.c | 159 ++++------ drivers/net/mlx5/mlx5_rxmode.c | 28 +- drivers/net/mlx5/mlx5_rxq.c | 488 ++++++++++++++-------------- drivers/net/mlx5/mlx5_rxtx.c | 49 ++- drivers/net/mlx5/mlx5_rxtx.h | 161 +++++----- drivers/net/mlx5/mlx5_rxtx_vec.c | 25 +- drivers/net/mlx5/mlx5_socket.c | 115 ++++--- drivers/net/mlx5/mlx5_stats.c | 189 +++++------ drivers/net/mlx5/mlx5_trigger.c | 234 +++++++------- drivers/net/mlx5/mlx5_txq.c | 229 +++++++------- drivers/net/mlx5/mlx5_vlan.c | 93 ++---- 17 files changed, 1761 insertions(+), 1930 deletions(-) -- 2.11.0 ^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH v2 01/10] net/mlx5: fix sriov flag 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 02/10] net/mlx5: name parameters in function prototypes Nelio Laranjeiro ` (9 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh priv_get_num_vfs() was used to help the PMD in prefetching the mbuf in datapath when the PMD was behaving in VF mode. This knowledge is no more used. Fixes: 528a9fbec6de ("net/mlx5: support ConnectX-5 devices") Cc: yskoh@mellanox.com Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 18 ++---------------- drivers/net/mlx5/mlx5.h | 2 -- drivers/net/mlx5/mlx5_ethdev.c | 37 ------------------------------------- 3 files changed, 2 insertions(+), 55 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 0f4b9122f..2cea3dec7 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -578,7 +578,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) int err = 0; struct ibv_context *attr_ctx = NULL; struct ibv_device_attr_ex device_attr; - unsigned int sriov; unsigned int mps; unsigned int cqe_comp; unsigned int tunnel_en = 0; @@ -625,18 +624,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) (pci_dev->addr.devid != pci_addr.devid) || (pci_dev->addr.function != pci_addr.function)) continue; - sriov = ((pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)); - INFO("PCI information matches, using device \"%s\"" - " (SR-IOV: %s)", - list[i]->name, - sriov ? "true" : "false"); + INFO("PCI information matches, using device \"%s\"", + list[i]->name); attr_ctx = mlx5_glue->open_device(list[i]); err = errno; break; @@ -709,7 +698,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct rte_eth_dev *eth_dev; struct ibv_device_attr_ex device_attr_ex; struct ether_addr mac; - uint16_t num_vfs = 0; struct ibv_device_attr_ex device_attr; struct mlx5_dev_config config = { .cqe_comp = cqe_comp, @@ -870,8 +858,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) DEBUG("hardware RX end alignment padding is %ssupported", (config.hw_padding ? "" : "not ")); - priv_get_num_vfs(priv, &num_vfs); - config.sriov = (num_vfs || sriov); config.tso = ((device_attr_ex.tso_caps.max_tso > 0) && (device_attr_ex.tso_caps.supported_qpts & (1 << IBV_QPT_RAW_PACKET))); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 9ad0533fc..5e90d99cc 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -78,7 +78,6 @@ struct mlx5_dev_config { unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */ unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ unsigned int hw_padding:1; /* End alignment padding is supported. */ - unsigned int sriov:1; /* This is a VF or PF with VF devices. */ unsigned int mps:2; /* Multi-packet send supported mode. */ unsigned int tunnel_en:1; /* Whether tunnel stateless offloads are supported. */ @@ -209,7 +208,6 @@ struct priv *mlx5_get_priv(struct rte_eth_dev *dev); int mlx5_is_secondary(void); int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]); int priv_ifreq(const struct priv *, int req, struct ifreq *); -int priv_get_num_vfs(struct priv *, uint16_t *); int priv_get_mtu(struct priv *, uint16_t *); int priv_set_flags(struct priv *, unsigned int, unsigned int); int mlx5_dev_configure(struct rte_eth_dev *); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index b73cb53df..f98fc4c3b 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -201,43 +201,6 @@ priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) } /** - * Return the number of active VFs for the current device. - * - * @param[in] priv - * Pointer to private structure. - * @param[out] num_vfs - * Number of active VFs. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -int -priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs) -{ - /* The sysfs entry name depends on the operating system. */ - const char **name = (const char *[]){ - "sriov_numvfs", - "mlx5_num_vfs", - NULL, - }; - - do { - int n; - FILE *file; - MKSTR(path, "%s/device/%s", priv->ibdev_path, *name); - - file = fopen(path, "rb"); - if (!file) - continue; - n = fscanf(file, "%" SCNu16, num_vfs); - fclose(file); - if (n == 1) - return 0; - } while (*(++name)); - return -1; -} - -/** * Get device MTU. * * @param priv -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v2 02/10] net/mlx5: name parameters in function prototypes 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 01/10] net/mlx5: fix sriov flag Nelio Laranjeiro @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 03/10] net/mlx5: mark parameters with unused attribute Nelio Laranjeiro ` (8 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.h | 191 ++++++++++++++++++++++++------------------- drivers/net/mlx5/mlx5_rxtx.h | 162 ++++++++++++++++++++---------------- 2 files changed, 195 insertions(+), 158 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 5e90d99cc..b65962df9 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -206,113 +206,132 @@ int mlx5_getenv_int(const char *); struct priv *mlx5_get_priv(struct rte_eth_dev *dev); int mlx5_is_secondary(void); -int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]); -int priv_ifreq(const struct priv *, int req, struct ifreq *); -int priv_get_mtu(struct priv *, uint16_t *); -int priv_set_flags(struct priv *, unsigned int, unsigned int); -int mlx5_dev_configure(struct rte_eth_dev *); -void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *); +int priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]); +int priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr); +int priv_get_mtu(struct priv *priv, uint16_t *mtu); +int priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags); +int mlx5_dev_configure(struct rte_eth_dev *dev); +void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info); const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); -int priv_link_update(struct priv *, int); -int priv_force_link_status_change(struct priv *, int); -int mlx5_link_update(struct rte_eth_dev *, int); -int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t); -int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); -int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); -int mlx5_ibv_device_to_pci_addr(const struct ibv_device *, - struct rte_pci_addr *); -void mlx5_dev_link_status_handler(void *); -void mlx5_dev_interrupt_handler(void *); -void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *); -void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *); +int priv_link_update(struct priv *priv, int wait_to_complete); +int priv_force_link_status_change(struct priv *priv, int status); +int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete); +int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); +int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, + struct rte_pci_addr *pci_addr); +void mlx5_dev_link_status_handler(void *arg); +void mlx5_dev_interrupt_handler(void *cb_arg); +void priv_dev_interrupt_handler_uninstall(struct priv *priv, + struct rte_eth_dev *dev); +void priv_dev_interrupt_handler_install(struct priv *priv, + struct rte_eth_dev *dev); int mlx5_set_link_down(struct rte_eth_dev *dev); int mlx5_set_link_up(struct rte_eth_dev *dev); +eth_tx_burst_t priv_select_tx_function(struct priv *priv, + struct rte_eth_dev *dev); +eth_rx_burst_t priv_select_rx_function(struct priv *priv, + struct rte_eth_dev *dev); int mlx5_is_removed(struct rte_eth_dev *dev); -eth_tx_burst_t priv_select_tx_function(struct priv *, struct rte_eth_dev *); -eth_rx_burst_t priv_select_rx_function(struct priv *, struct rte_eth_dev *); /* mlx5_mac.c */ -int priv_get_mac(struct priv *, uint8_t (*)[ETHER_ADDR_LEN]); -void mlx5_mac_addr_remove(struct rte_eth_dev *, uint32_t); -int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t, - uint32_t); -void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *); +int priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]); +void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); +int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index, uint32_t vmdq); +void mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); /* mlx5_rss.c */ -int mlx5_rss_hash_update(struct rte_eth_dev *, struct rte_eth_rss_conf *); -int mlx5_rss_hash_conf_get(struct rte_eth_dev *, struct rte_eth_rss_conf *); -int priv_rss_reta_index_resize(struct priv *, unsigned int); -int mlx5_dev_rss_reta_query(struct rte_eth_dev *, - struct rte_eth_rss_reta_entry64 *, uint16_t); -int mlx5_dev_rss_reta_update(struct rte_eth_dev *, - struct rte_eth_rss_reta_entry64 *, uint16_t); +int mlx5_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size); +int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); /* mlx5_rxmode.c */ -void mlx5_promiscuous_enable(struct rte_eth_dev *); -void mlx5_promiscuous_disable(struct rte_eth_dev *); -void mlx5_allmulticast_enable(struct rte_eth_dev *); -void mlx5_allmulticast_disable(struct rte_eth_dev *); +void mlx5_promiscuous_enable(struct rte_eth_dev *dev); +void mlx5_promiscuous_disable(struct rte_eth_dev *dev); +void mlx5_allmulticast_enable(struct rte_eth_dev *dev); +void mlx5_allmulticast_disable(struct rte_eth_dev *dev); /* mlx5_stats.c */ -void priv_xstats_init(struct priv *); -int mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *); -void mlx5_stats_reset(struct rte_eth_dev *); -int mlx5_xstats_get(struct rte_eth_dev *, - struct rte_eth_xstat *, unsigned int); -void mlx5_xstats_reset(struct rte_eth_dev *); -int mlx5_xstats_get_names(struct rte_eth_dev *, - struct rte_eth_xstat_name *, unsigned int); +void priv_xstats_init(struct priv *priv); +int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +void mlx5_stats_reset(struct rte_eth_dev *dev); +int mlx5_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, unsigned int n); +void mlx5_xstats_reset(struct rte_eth_dev *dev); +int mlx5_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int n); /* mlx5_vlan.c */ -int mlx5_vlan_filter_set(struct rte_eth_dev *, uint16_t, int); -int mlx5_vlan_offload_set(struct rte_eth_dev *, int); -void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int); +int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); /* mlx5_trigger.c */ -int mlx5_dev_start(struct rte_eth_dev *); -void mlx5_dev_stop(struct rte_eth_dev *); -int priv_dev_traffic_enable(struct priv *, struct rte_eth_dev *); -int priv_dev_traffic_disable(struct priv *, struct rte_eth_dev *); -int priv_dev_traffic_restart(struct priv *, struct rte_eth_dev *); -int mlx5_traffic_restart(struct rte_eth_dev *); +int mlx5_dev_start(struct rte_eth_dev *dev); +void mlx5_dev_stop(struct rte_eth_dev *dev); +int priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev); +int priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev); +int priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev); +int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ -int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type, - enum rte_filter_op, void *); -int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *, - const struct rte_flow_item [], - const struct rte_flow_action [], - struct rte_flow_error *); -struct rte_flow *mlx5_flow_create(struct rte_eth_dev *, - const struct rte_flow_attr *, - const struct rte_flow_item [], - const struct rte_flow_action [], - struct rte_flow_error *); -int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *, - struct rte_flow_error *); -void priv_flow_flush(struct priv *, struct mlx5_flows *); -int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *); -int mlx5_flow_query(struct rte_eth_dev *, struct rte_flow *, - enum rte_flow_action_type, void *, - struct rte_flow_error *); -int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *); -int priv_flow_start(struct priv *, struct mlx5_flows *); -void priv_flow_stop(struct priv *, struct mlx5_flows *); -int priv_flow_verify(struct priv *); -int mlx5_ctrl_flow_vlan(struct rte_eth_dev *, struct rte_flow_item_eth *, - struct rte_flow_item_eth *, struct rte_flow_item_vlan *, - struct rte_flow_item_vlan *); -int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *, - struct rte_flow_item_eth *); -int priv_flow_create_drop_queue(struct priv *); -void priv_flow_delete_drop_queue(struct priv *); +int mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +void priv_flow_flush(struct priv *priv, struct mlx5_flows *list); +int priv_flow_create_drop_queue(struct priv *priv); +void priv_flow_stop(struct priv *priv, struct mlx5_flows *list); +int priv_flow_start(struct priv *priv, struct mlx5_flows *list); +int priv_flow_verify(struct priv *priv); +int priv_flow_create_drop_queue(struct priv *priv); +void priv_flow_delete_drop_queue(struct priv *priv); +int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask, + struct rte_flow_item_vlan *vlan_spec, + struct rte_flow_item_vlan *vlan_mask); +int mlx5_ctrl_flow(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask); +struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error); +int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + enum rte_flow_action_type action, void *data, + struct rte_flow_error *error); +int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, + struct rte_flow_error *error); +int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); /* mlx5_socket.c */ @@ -323,9 +342,9 @@ int priv_socket_connect(struct priv *priv); /* mlx5_mr.c */ -struct mlx5_mr *priv_mr_new(struct priv *, struct rte_mempool *); -struct mlx5_mr *priv_mr_get(struct priv *, struct rte_mempool *); -int priv_mr_release(struct priv *, struct mlx5_mr *); -int priv_mr_verify(struct priv *); +struct mlx5_mr *priv_mr_new(struct priv *priv, struct rte_mempool *mp); +struct mlx5_mr *priv_mr_get(struct priv *priv, struct rte_mempool *mp); +int priv_mr_release(struct priv *priv, struct mlx5_mr *mr); +int priv_mr_verify(struct priv *priv); #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index d7e890558..d0ec9a214 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -210,97 +210,115 @@ struct mlx5_txq_ctrl { extern uint8_t rss_hash_default_key[]; extern const size_t rss_hash_default_key_len; -void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *); -int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, - const struct rte_eth_rxconf *, struct rte_mempool *); -void mlx5_rx_queue_release(void *); +void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl); +int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +void mlx5_rx_queue_release(void *dpdk_rxq); int priv_rx_intr_vec_enable(struct priv *priv); void priv_rx_intr_vec_disable(struct priv *priv); int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *, uint16_t); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t); -int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *); -int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *); -int mlx5_priv_rxq_ibv_verify(struct priv *); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t, - uint16_t, unsigned int, - const struct rte_eth_rxconf *, - struct rte_mempool *); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t); -int mlx5_priv_rxq_release(struct priv *, uint16_t); -int mlx5_priv_rxq_releasable(struct priv *, uint16_t); -int mlx5_priv_rxq_verify(struct priv *); -int rxq_alloc_elts(struct mlx5_rxq_ctrl *); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *, - uint16_t [], - uint16_t); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *, - uint16_t [], - uint16_t); -int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *); -int mlx5_priv_ind_table_ibv_verify(struct priv *); -struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t, - uint64_t, uint16_t [], uint16_t); -struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t, - uint64_t, uint16_t [], uint16_t); -int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *); -int mlx5_priv_hrxq_ibv_verify(struct priv *); -uint64_t mlx5_priv_get_rx_port_offloads(struct priv *); -uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *); +struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx); +struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx); +int mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_priv_rxq_ibv_releasable(struct priv *priv, + struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_priv_rxq_ibv_verify(struct priv *priv); +struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, + uint16_t desc, + unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *priv, uint16_t idx); +int mlx5_priv_rxq_release(struct priv *priv, uint16_t idx); +int mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx); +int mlx5_priv_rxq_verify(struct priv *priv); +int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); +struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *priv, + uint16_t queues[], + uint16_t queues_n); +struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *priv, + uint16_t queues[], + uint16_t queues_n); +int mlx5_priv_ind_table_ibv_release(struct priv *priv, + struct mlx5_ind_table_ibv *ind_tbl); +int mlx5_priv_ind_table_ibv_verify(struct priv *priv); +struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, + uint8_t rss_key_len, uint64_t hash_fields, + uint16_t queues[], uint16_t queues_n); +struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, + uint8_t rss_key_len, uint64_t hash_fields, + uint16_t queues[], uint16_t queues_n); +int mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq); +int mlx5_priv_hrxq_ibv_verify(struct priv *priv); +uint64_t mlx5_priv_get_rx_port_offloads(struct priv *priv); +uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *priv); /* mlx5_txq.c */ -int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, - const struct rte_eth_txconf *); -void mlx5_tx_queue_release(void *); +int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf); +void mlx5_tx_queue_release(void *dpdk_txq); int priv_tx_uar_remap(struct priv *priv, int fd); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *, uint16_t); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *, uint16_t); -int mlx5_priv_txq_ibv_release(struct priv *, struct mlx5_txq_ibv *); -int mlx5_priv_txq_ibv_releasable(struct priv *, struct mlx5_txq_ibv *); -int mlx5_priv_txq_ibv_verify(struct priv *); -struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *, uint16_t, - uint16_t, unsigned int, - const struct rte_eth_txconf *); -struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *, uint16_t); -int mlx5_priv_txq_release(struct priv *, uint16_t); -int mlx5_priv_txq_releasable(struct priv *, uint16_t); -int mlx5_priv_txq_verify(struct priv *); -void txq_alloc_elts(struct mlx5_txq_ctrl *); -uint64_t mlx5_priv_get_tx_port_offloads(struct priv *); +struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx); +struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx); +int mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv); +int mlx5_priv_txq_ibv_releasable(struct priv *priv, + struct mlx5_txq_ibv *txq_ibv); +int mlx5_priv_txq_ibv_verify(struct priv *priv); +struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *priv, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf); +struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *priv, uint16_t idx); +int mlx5_priv_txq_release(struct priv *priv, uint16_t idx); +int mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx); +int mlx5_priv_txq_verify(struct priv *priv); +void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); +uint64_t mlx5_priv_get_tx_port_offloads(struct priv *priv); /* mlx5_rxtx.c */ extern uint32_t mlx5_ptype_table[]; void mlx5_set_ptype_table(void); -uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); -int mlx5_rx_descriptor_status(void *, uint16_t); -int mlx5_tx_descriptor_status(void *, uint16_t); +uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); +uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); +int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset); /* Vectorized version of mlx5_rxtx.c */ -int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *); -int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *); -int rxq_check_vec_support(struct mlx5_rxq_data *); -int priv_check_vec_rx_support(struct priv *); -uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t); + +int priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev); +int priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev); +int rxq_check_vec_support(struct mlx5_rxq_data *rxq); +int priv_check_vec_rx_support(struct priv *priv); +uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); /* mlx5_mr.c */ -void mlx5_mp2mr_iter(struct rte_mempool *, void *); -struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *, - struct rte_mempool *, unsigned int); -struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *, - unsigned int); +void mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg); +struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, + struct rte_mempool *mp, unsigned int idx); +struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, + struct rte_mempool *mp, + unsigned int idx); #ifndef NDEBUG /** -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v2 03/10] net/mlx5: mark parameters with unused attribute 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 01/10] net/mlx5: fix sriov flag Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 02/10] net/mlx5: name parameters in function prototypes Nelio Laranjeiro @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 04/10] net/mlx5: normalize function prototypes Nelio Laranjeiro ` (7 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Replaces all (void)foo; by __rte_unused macro except when variables are under #if statements. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 4 ++-- drivers/net/mlx5/mlx5_ethdev.c | 18 +++++---------- drivers/net/mlx5/mlx5_flow.c | 25 ++++++++------------ drivers/net/mlx5/mlx5_mac.c | 3 +-- drivers/net/mlx5/mlx5_mr.c | 10 +++----- drivers/net/mlx5/mlx5_rxq.c | 4 ++-- drivers/net/mlx5/mlx5_rxtx.c | 51 +++++++++++++++++------------------------ drivers/net/mlx5/mlx5_stats.c | 2 +- drivers/net/mlx5/mlx5_trigger.c | 4 ++-- drivers/net/mlx5/mlx5_txq.c | 19 +++++++-------- 10 files changed, 55 insertions(+), 85 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 2cea3dec7..15cb461d6 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -571,7 +571,8 @@ priv_uar_init_secondary(struct priv *priv) * 0 on success, negative errno value on failure. */ static int -mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) { struct ibv_device **list; struct ibv_device *ibv_dev; @@ -588,7 +589,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_counter_set_description cs_desc; #endif - (void)pci_drv; assert(pci_drv == &mlx5_driver); /* Get mlx5_dev[] index. */ idx = mlx5_dev_idx(&pci_dev->addr); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index f98fc4c3b..0c383deba 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -467,11 +467,9 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). */ static int -mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct ethtool_cmd edata = { @@ -483,7 +481,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) /* priv_lock() is not taken to allow concurrent calls. */ - (void)wait_to_complete; if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -533,11 +530,9 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). */ static int -mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; @@ -545,7 +540,6 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) struct rte_eth_link dev_link; uint64_t sc; - (void)wait_to_complete; if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -675,7 +669,7 @@ priv_link_stop(struct priv *priv) * Wait for request completion (ignored). */ int -priv_link_update(struct priv *priv, int wait_to_complete) +priv_link_update(struct priv *priv, int wait_to_complete __rte_unused) { struct rte_eth_dev *dev = priv->dev; struct utsname utsname; @@ -687,9 +681,9 @@ priv_link_update(struct priv *priv, int wait_to_complete) sscanf(utsname.release, "%d.%d.%d", &ver[0], &ver[1], &ver[2]) != 3 || KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) - ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete); + ret = mlx5_link_update_unlocked_gset(dev); else - ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete); + ret = mlx5_link_update_unlocked_gs(dev); /* If lsc interrupt is disabled, should always be ready for traffic. */ if (!dev->data->dev_conf.intr_conf.lsc) { priv_link_start(priv); @@ -741,7 +735,7 @@ priv_force_link_status_change(struct priv *priv, int status) * Wait for request completion (ignored). */ int -mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) { struct priv *priv = dev->data->dev_private; int ret; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 42381c578..bb98fb4c5 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -526,7 +526,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * 0 on success, errno value on failure. */ static int -priv_flow_convert_rss_conf(struct priv *priv, +priv_flow_convert_rss_conf(struct priv *priv __rte_unused, struct mlx5_flow_parse *parser, const struct rte_eth_rss_conf *rss_conf) { @@ -535,7 +535,6 @@ priv_flow_convert_rss_conf(struct priv *priv, * priv_flow_convert_actions() to initialize the parser with the * device default RSS configuration. */ - (void)priv; if (rss_conf) { if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) return EINVAL; @@ -568,13 +567,11 @@ priv_flow_convert_rss_conf(struct priv *priv, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_attributes(struct priv *priv, +priv_flow_convert_attributes(struct priv *priv __rte_unused, const struct rte_flow_attr *attr, struct rte_flow_error *error, - struct mlx5_flow_parse *parser) + struct mlx5_flow_parse *parser __rte_unused) { - (void)priv; - (void)parser; if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -779,7 +776,7 @@ priv_flow_convert_actions(struct priv *priv, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_items_validate(struct priv *priv, +priv_flow_convert_items_validate(struct priv *priv __rte_unused, const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) @@ -787,7 +784,6 @@ priv_flow_convert_items_validate(struct priv *priv, const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; - (void)priv; /* Initialise the offsets to start after verbs attribute. */ for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset = sizeof(struct ibv_flow_attr); @@ -871,14 +867,13 @@ priv_flow_convert_items_validate(struct priv *priv, * A verbs flow attribute on success, NULL otherwise. */ static struct ibv_flow_attr* -priv_flow_convert_allocate(struct priv *priv, +priv_flow_convert_allocate(struct priv *priv __rte_unused, unsigned int priority, unsigned int size, struct rte_flow_error *error) { struct ibv_flow_attr *ibv_attr; - (void)priv; ibv_attr = rte_calloc(__func__, 1, size, 0); if (!ibv_attr) { rte_flow_error_set(error, ENOMEM, @@ -900,7 +895,8 @@ priv_flow_convert_allocate(struct priv *priv, * Internal parser structure. */ static void -priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) +priv_flow_convert_finalise(struct priv *priv __rte_unused, + struct mlx5_flow_parse *parser) { const unsigned int ipv4 = hash_rxq_init[parser->layer].ip_version == MLX5_IPV4; @@ -911,7 +907,6 @@ priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; unsigned int i; - (void)priv; /* Remove any other flow not matching the pattern. */ if (parser->queues_n == 1) { for (i = 0; i != hash_rxq_init_n; ++i) { @@ -2424,11 +2419,10 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; priv_lock(priv); priv_flow_destroy(priv, &priv->flows, flow); priv_unlock(priv); @@ -2443,11 +2437,10 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, */ int mlx5_flow_flush(struct rte_eth_dev *dev, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; priv_lock(priv); priv_flow_flush(priv, &priv->flows); priv_unlock(priv); diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index e8a8d4594..a529dfeac 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -88,12 +88,11 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) */ int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, - uint32_t index, uint32_t vmdq) + uint32_t index, uint32_t vmdq __rte_unused) { unsigned int i; int ret = 0; - (void)vmdq; assert(index < MLX5_MAX_MAC_ADDRESSES); /* First, make sure this address isn't already configured. */ for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) { diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 857dfcd83..38a8e2f40 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -26,15 +26,12 @@ struct mlx5_check_mempool_data { /* Called by mlx5_check_mempool() when iterating the memory chunks. */ static void -mlx5_check_mempool_cb(struct rte_mempool *mp, +mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused, void *opaque, struct rte_mempool_memhdr *memhdr, - unsigned int mem_idx) + unsigned int mem_idx __rte_unused) { struct mlx5_check_mempool_data *data = opaque; - (void)mp; - (void)mem_idx; - /* It already failed, skip the next chunks. */ if (data->ret != 0) return; @@ -336,9 +333,8 @@ priv_mr_get(struct priv *priv, struct rte_mempool *mp) * 0 on success, errno on failure. */ int -priv_mr_release(struct priv *priv, struct mlx5_mr *mr) +priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr) { - (void)priv; assert(mr); DEBUG("Memory Region %p refcnt: %d", (void *)mr, rte_atomic32_read(&mr->refcnt)); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 238fa7e56..8b9cc1dd0 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -910,9 +910,9 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv) * Verbs Rx queue object. */ int -mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, + struct mlx5_rxq_ibv *rxq_ibv) { - (void)priv; assert(rxq_ibv); return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); } diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 049f7e6c1..93d794ede 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1899,11 +1899,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets successfully transmitted (<= pkts_n). */ uint16_t -removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_tx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1924,11 +1923,10 @@ removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets successfully received (<= pkts_n). */ uint16_t -removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_rx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1940,58 +1938,51 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) */ uint16_t __attribute__((weak)) -mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } int __attribute__((weak)) -priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +priv_check_raw_vec_tx_support(struct priv *priv __rte_unused, + struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +priv_check_vec_tx_support(struct priv *priv __rte_unused, + struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq) +rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) { - (void)rxq; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_rx_support(struct priv *priv) +priv_check_vec_rx_support(struct priv *priv __rte_unused) { - (void)priv; return -ENOTSUP; } diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index eb9c65dcc..167e40548 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -488,7 +488,7 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) * Number of xstats names. */ int -mlx5_xstats_get_names(struct rte_eth_dev *dev, +mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, struct rte_eth_xstat_name *xstats_names, unsigned int n) { struct priv *priv = dev->data->dev_private; diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index f5711a998..72e8ff644 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -340,9 +340,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) * 0 on success. */ int -priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) +priv_dev_traffic_disable(struct priv *priv, + struct rte_eth_dev *dev __rte_unused) { - (void)dev; priv_flow_flush(priv, &priv->ctrl_flows); return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index ed1c713ea..071d88a1f 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -578,9 +578,9 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) * 0 on success, errno on failure. */ int -mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused, + struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); @@ -603,9 +603,9 @@ mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) * Verbs Tx queue object. */ int -mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused, + struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); return (rte_atomic32_read(&txq_ibv->refcnt) == 1); } @@ -806,13 +806,10 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) mlx5_priv_txq_ibv_get(priv, idx); for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { - struct mlx5_mr *mr = NULL; - - (void)mr; - if (ctrl->txq.mp2mr[i]) { - mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp); - assert(mr); - } + if (ctrl->txq.mp2mr[i]) + claim_nonzero + (priv_mr_get(priv, + ctrl->txq.mp2mr[i]->mp)); } rte_atomic32_inc(&ctrl->refcnt); DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v2 04/10] net/mlx5: normalize function prototypes 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (2 preceding siblings ...) 2018-02-28 15:12 ` [PATCH v2 03/10] net/mlx5: mark parameters with unused attribute Nelio Laranjeiro @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 05/10] net/mlx5: add missing function documentation Nelio Laranjeiro ` (6 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5_flow.c | 2 +- drivers/net/mlx5/mlx5_mr.c | 11 ++++++----- drivers/net/mlx5/mlx5_rxq.c | 16 ++++++++-------- drivers/net/mlx5/mlx5_txq.c | 8 ++++---- 4 files changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index bb98fb4c5..d8d124749 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -866,7 +866,7 @@ priv_flow_convert_items_validate(struct priv *priv __rte_unused, * @return * A verbs flow attribute on success, NULL otherwise. */ -static struct ibv_flow_attr* +static struct ibv_flow_attr * priv_flow_convert_allocate(struct priv *priv __rte_unused, unsigned int priority, unsigned int size, diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 38a8e2f40..4e1495800 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -66,8 +66,9 @@ mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused, * @return * 0 on success (mempool is virtually contiguous), -1 on error. */ -static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, - uintptr_t *end) +static int +mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, + uintptr_t *end) { struct mlx5_check_mempool_data data; @@ -97,7 +98,7 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, * @return * mr on success, NULL on failure. */ -struct mlx5_mr* +struct mlx5_mr * priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, struct rte_mempool *mp, unsigned int idx) { @@ -244,7 +245,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) * @return * The memory region on success. */ -struct mlx5_mr* +struct mlx5_mr * priv_mr_new(struct priv *priv, struct rte_mempool *mp) { const struct rte_memseg *ms = rte_eal_get_physmem_layout(); @@ -304,7 +305,7 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) * @return * The memory region on success. */ -struct mlx5_mr* +struct mlx5_mr * priv_mr_get(struct priv *priv, struct rte_mempool *mp) { struct mlx5_mr *mr; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 8b9cc1dd0..2fc6e08aa 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -601,7 +601,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) * @return * The Verbs object initialised if it can be created. */ -struct mlx5_rxq_ibv* +struct mlx5_rxq_ibv * mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) { struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; @@ -819,7 +819,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) * @return * The Verbs object if it exists. */ -struct mlx5_rxq_ibv* +struct mlx5_rxq_ibv * mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) { struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; @@ -932,7 +932,7 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, * @return * A DPDK queue object on success. */ -struct mlx5_rxq_ctrl* +struct mlx5_rxq_ctrl * mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, struct rte_mempool *mp) @@ -1057,7 +1057,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, * @return * A pointer to the queue if it exists. */ -struct mlx5_rxq_ctrl* +struct mlx5_rxq_ctrl * mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) { struct mlx5_rxq_ctrl *rxq_ctrl = NULL; @@ -1170,7 +1170,7 @@ mlx5_priv_rxq_verify(struct priv *priv) * @return * A new indirection table. */ -struct mlx5_ind_table_ibv* +struct mlx5_ind_table_ibv * mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], uint16_t queues_n) { @@ -1232,7 +1232,7 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], * @return * An indirection table if found. */ -struct mlx5_ind_table_ibv* +struct mlx5_ind_table_ibv * mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], uint16_t queues_n) { @@ -1331,7 +1331,7 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) * @return * An hash Rx queue on success. */ -struct mlx5_hrxq* +struct mlx5_hrxq * mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) { @@ -1400,7 +1400,7 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, * @return * An hash Rx queue on success. */ -struct mlx5_hrxq* +struct mlx5_hrxq * mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) { diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 071d88a1f..1a508a488 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -369,7 +369,7 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) * @return * The Verbs object initialised if it can be created. */ -struct mlx5_txq_ibv* +struct mlx5_txq_ibv * mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) { struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; @@ -547,7 +547,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) * @return * The Verbs object if it exists. */ -struct mlx5_txq_ibv* +struct mlx5_txq_ibv * mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) { struct mlx5_txq_ctrl *txq_ctrl; @@ -749,7 +749,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) * @return * A DPDK queue object on success. */ -struct mlx5_txq_ctrl* +struct mlx5_txq_ctrl * mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf) @@ -794,7 +794,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, * @return * A pointer to the queue if it exists. */ -struct mlx5_txq_ctrl* +struct mlx5_txq_ctrl * mlx5_priv_txq_get(struct priv *priv, uint16_t idx) { struct mlx5_txq_ctrl *ctrl = NULL; -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v2 05/10] net/mlx5: add missing function documentation 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (3 preceding siblings ...) 2018-02-28 15:12 ` [PATCH v2 04/10] net/mlx5: normalize function prototypes Nelio Laranjeiro @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 06/10] net/mlx5: remove useless empty lines Nelio Laranjeiro ` (5 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5_ethdev.c | 18 ++++++++++++++++++ drivers/net/mlx5/mlx5_mr.c | 7 +++++-- drivers/net/mlx5/mlx5_rxq.c | 20 ++++++++++++-------- drivers/net/mlx5/mlx5_trigger.c | 30 ++++++++++++++++++++++++++++++ drivers/net/mlx5/mlx5_txq.c | 10 ++++++---- 5 files changed, 71 insertions(+), 14 deletions(-) diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 0c383deba..9bbf1eb7d 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -435,6 +435,15 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) priv_unlock(priv); } +/** + * Get supported packet types. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * A pointer to the supported Packet types array. + */ const uint32_t * mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) { @@ -467,6 +476,9 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. + * + * @return + * 0 on success, -1 on error. */ static int mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) @@ -530,6 +542,9 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. + * + * @return + * 0 on success, -1 on error. */ static int mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) @@ -733,6 +748,9 @@ priv_force_link_status_change(struct priv *priv, int status) * Pointer to Ethernet device structure. * @param wait_to_complete * Wait for request completion (ignored). + * + * @return + * 0 on success, -1 on error. */ int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 4e1495800..8748ddcf5 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -242,6 +242,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) * Pointer to private structure. * @param mp * Pointer to the memory pool to register. + * * @return * The memory region on success. */ @@ -302,6 +303,7 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) * Pointer to private structure. * @param mp * Pointer to the memory pool to register. + * * @return * The memory region on success. */ @@ -352,9 +354,10 @@ priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr) * Verify the flow list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int priv_mr_verify(struct priv *priv) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 2fc6e08aa..6924202cc 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -883,9 +883,10 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) * Verify the Verbs Rx queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_rxq_ibv_verify(struct priv *priv) @@ -1139,9 +1140,10 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) * Verify the Rx Queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_rxq_verify(struct priv *priv) @@ -1293,9 +1295,10 @@ mlx5_priv_ind_table_ibv_release(struct priv *priv, * Verify the Rx Queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_ind_table_ibv_verify(struct priv *priv) @@ -1462,9 +1465,10 @@ mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq) * Verify the Rx Queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_hrxq_ibv_verify(struct priv *priv) diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 72e8ff644..b147fb4f8 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -14,6 +14,12 @@ #include "mlx5_rxtx.h" #include "mlx5_utils.h" +/** + * Stop traffic on Tx queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ static void priv_txq_stop(struct priv *priv) { @@ -23,6 +29,15 @@ priv_txq_stop(struct priv *priv) mlx5_priv_txq_release(priv, i); } +/** + * Start traffic on Tx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno on error. + */ static int priv_txq_start(struct priv *priv) { @@ -58,6 +73,12 @@ priv_txq_start(struct priv *priv) return ret; } +/** + * Stop traffic on Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ static void priv_rxq_stop(struct priv *priv) { @@ -67,6 +88,15 @@ priv_rxq_stop(struct priv *priv) mlx5_priv_rxq_release(priv, i); } +/** + * Start traffic on Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno on error. + */ static int priv_rxq_start(struct priv *priv) { diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 1a508a488..9be707840 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -614,9 +614,10 @@ mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused, * Verify the Verbs Tx queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_txq_ibv_verify(struct priv *priv) @@ -893,9 +894,10 @@ mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) * Verify the Tx Queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_txq_verify(struct priv *priv) -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v2 06/10] net/mlx5: remove useless empty lines 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (4 preceding siblings ...) 2018-02-28 15:12 ` [PATCH v2 05/10] net/mlx5: add missing function documentation Nelio Laranjeiro @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 07/10] net/mlx5: remove control path locks Nelio Laranjeiro ` (4 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Some empty lines have been added in the middle of the code without any reason. This commit removes them. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 20 -------------------- drivers/net/mlx5/mlx5_ethdev.c | 7 ------- drivers/net/mlx5/mlx5_mr.c | 1 - drivers/net/mlx5/mlx5_rss.c | 2 -- drivers/net/mlx5/mlx5_rxq.c | 1 - drivers/net/mlx5/mlx5_vlan.c | 6 ------ 6 files changed, 37 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 15cb461d6..77b546c75 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -597,7 +597,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return -ENOMEM; } DEBUG("using driver device index %d", idx); - /* Save PCI address. */ mlx5_dev[idx].pci_addr = pci_dev->addr; list = mlx5_glue->get_device_list(&i); @@ -644,7 +643,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return -err; } ibv_dev = list[i]; - DEBUG("device opened"); /* * Multi-packet send is supported by ConnectX-4 Lx PF as well @@ -685,7 +683,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) goto error; INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt); - for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) { char name[RTE_ETH_NAME_MAX_LEN]; int len; @@ -716,9 +713,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, pci_dev->addr.devid, pci_dev->addr.function); if (device_attr.orig_attr.phys_port_cnt > 1) snprintf(name + len, sizeof(name), " port %u", i); - mlx5_dev[idx].ports |= test; - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { eth_dev = rte_eth_dev_attach_secondary(name); if (eth_dev == NULL) { @@ -755,15 +750,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, priv_select_tx_function(priv, eth_dev); continue; } - DEBUG("using port %u (%08" PRIx32 ")", port, test); - ctx = mlx5_glue->open_device(ibv_dev); if (ctx == NULL) { err = ENODEV; goto port_error; } - mlx5_glue->query_device_ex(ctx, NULL, &device_attr); /* Check port status. */ err = mlx5_glue->query_port(ctx, port, &port_attr); @@ -771,19 +763,16 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, ERROR("port query failed: %s", strerror(err)); goto port_error; } - if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { ERROR("port %d is not configured in Ethernet mode", port); err = EINVAL; goto port_error; } - if (port_attr.state != IBV_PORT_ACTIVE) DEBUG("port %d is not active: \"%s\" (%d)", port, mlx5_glue->port_state_str(port_attr.state), port_attr.state); - /* Allocate protection domain. */ pd = mlx5_glue->alloc_pd(ctx); if (pd == NULL) { @@ -791,9 +780,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, err = ENOMEM; goto port_error; } - mlx5_dev[idx].ports |= test; - /* from rte_ethdev.c */ priv = rte_zmalloc("ethdev private structure", sizeof(*priv), @@ -803,7 +790,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, err = ENOMEM; goto port_error; } - priv->ctx = ctx; strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path, sizeof(priv->ibdev_path)); @@ -906,7 +892,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, /* Get actual MTU if possible. */ priv_get_mtu(priv, &priv->mtu); DEBUG("port %u MTU is %u", priv->port, priv->mtu); - eth_dev = rte_eth_dev_allocate(name); if (eth_dev == NULL) { ERROR("can not allocate rte ethdev"); @@ -929,7 +914,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); TAILQ_INIT(&priv->flows); TAILQ_INIT(&priv->ctrl_flows); - /* Hint libmlx5 to use PMD allocator for data plane resources */ struct mlx5dv_ctx_allocators alctr = { .alloc = &mlx5_alloc_verbs_buf, @@ -946,7 +930,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, /* Store device configuration on private structure. */ priv->config = config; continue; - port_error: if (priv) rte_free(priv); @@ -956,20 +939,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, claim_zero(mlx5_glue->close_device(ctx)); break; } - /* * XXX if something went wrong in the loop above, there is a resource * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as * long as the dpdk does not provide a way to deallocate a ethdev and a * way to enumerate the registered ethdevs to free the previous ones. */ - /* no port found, complain */ if (!mlx5_dev[idx].ports) { err = ENODEV; goto error; } - error: if (attr_ctx) claim_zero(mlx5_glue->close_device(attr_ctx)); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 9bbf1eb7d..5c43755d0 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -403,7 +403,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) char ifname[IF_NAMESIZE]; info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - priv_lock(priv); /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; @@ -492,7 +491,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) int link_speed = 0; /* priv_lock() is not taken to allow concurrent calls. */ - if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -838,7 +836,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) strerror(ret)); goto out; } - fc_conf->autoneg = ethpause.autoneg; if (ethpause.rx_pause && ethpause.tx_pause) fc_conf->mode = RTE_FC_FULL; @@ -849,7 +846,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) else fc_conf->mode = RTE_FC_NONE; ret = 0; - out: priv_unlock(priv); assert(ret >= 0); @@ -890,7 +886,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - priv_lock(priv); if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { ret = errno; @@ -900,7 +895,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) goto out; } ret = 0; - out: priv_unlock(priv); assert(ret >= 0); @@ -1155,7 +1149,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) rte_intr_callback_register(&priv->intr_handle, mlx5_dev_interrupt_handler, dev); } - rc = priv_socket_init(priv); if (!rc && priv->primary_socket) { priv->intr_handle_socket.fd = priv->primary_socket; diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 8748ddcf5..6624d0ffa 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -76,7 +76,6 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data); *start = (uintptr_t)data.start; *end = (uintptr_t)data.end; - return data.ret; } diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index d06b0bee1..8f5c8beff 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -123,7 +123,6 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) return ENOMEM; priv->reta_idx = mem; priv->reta_idx_n = reta_size; - if (old_size < reta_size) memset(&(*priv->reta_idx)[old_size], 0, (reta_size - old_size) * @@ -191,7 +190,6 @@ priv_dev_rss_reta_update(struct priv *priv, ret = priv_rss_reta_index_resize(priv, reta_size); if (ret) return ret; - for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; pos = i % RTE_RETA_GROUP_SIZE; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 6924202cc..320a12be9 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1067,7 +1067,6 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - mlx5_priv_rxq_ibv_get(priv, idx); rte_atomic32_inc(&rxq_ctrl->refcnt); DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 75c345626..85ed546cb 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -120,14 +120,12 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING, .flags = vlan_offloads, }; - err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); if (err) { ERROR("%p: failed to modified stripping mode: %s", (void *)priv, strerror(err)); return; } - /* Update related bits in RX queue. */ rxq->vlan_strip = !!on; } @@ -152,13 +150,11 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ERROR("VLAN stripping is not supported"); return; } - /* Validate queue number */ if (queue >= priv->rxqs_n) { ERROR("VLAN stripping, invalid queue number %d", queue); return; } - priv_lock(priv); priv_vlan_strip_queue_set(priv, queue, on); priv_unlock(priv); @@ -186,13 +182,11 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) ERROR("VLAN stripping is not supported"); return 0; } - /* Run on every RX queue and set/reset VLAN stripping. */ priv_lock(priv); for (i = 0; (i != priv->rxqs_n); i++) priv_vlan_strip_queue_set(priv, i, hw_vlan_strip); priv_unlock(priv); } - return 0; } -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v2 07/10] net/mlx5: remove control path locks 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (5 preceding siblings ...) 2018-02-28 15:12 ` [PATCH v2 06/10] net/mlx5: remove useless empty lines Nelio Laranjeiro @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 08/10] net/mlx5: prefix all function with mlx5 Nelio Laranjeiro ` (3 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh In priv struct only the memory region needs to be protected against concurrent access between the control plane and the data plane. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 2 -- drivers/net/mlx5/mlx5.h | 43 +----------------------------- drivers/net/mlx5/mlx5_ethdev.c | 58 +++-------------------------------------- drivers/net/mlx5/mlx5_flow.c | 18 +------------ drivers/net/mlx5/mlx5_mr.c | 4 +-- drivers/net/mlx5/mlx5_rss.c | 8 ------ drivers/net/mlx5/mlx5_rxq.c | 9 ------- drivers/net/mlx5/mlx5_stats.c | 15 +---------- drivers/net/mlx5/mlx5_trigger.c | 7 ----- drivers/net/mlx5/mlx5_txq.c | 5 ---- drivers/net/mlx5/mlx5_vlan.c | 6 ----- 11 files changed, 9 insertions(+), 166 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 77b546c75..58aa6b29e 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -165,7 +165,6 @@ mlx5_dev_close(struct rte_eth_dev *dev) unsigned int i; int ret; - priv_lock(priv); DEBUG("%p: closing device \"%s\"", (void *)dev, ((priv->ctx != NULL) ? priv->ctx->device->name : "")); @@ -227,7 +226,6 @@ mlx5_dev_close(struct rte_eth_dev *dev) ret = priv_mr_verify(priv); if (ret) WARN("%p: some Memory Region still remain", (void *)priv); - priv_unlock(priv); memset(priv, 0, sizeof(*priv)); } diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index b65962df9..8e021544c 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -148,7 +148,7 @@ struct priv { LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls; uint32_t link_speed_capa; /* Link speed capabilities. */ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ - rte_spinlock_t lock; /* Lock for control functions. */ + rte_spinlock_t mr_lock; /* MR Lock. */ int primary_socket; /* Unix socket for primary process. */ void *uar_base; /* Reserved address space for UAR mapping */ struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */ @@ -157,47 +157,6 @@ struct priv { /* Context for Verbs allocator. */ }; -/** - * Lock private structure to protect it from concurrent access in the - * control path. - * - * @param priv - * Pointer to private structure. - */ -static inline void -priv_lock(struct priv *priv) -{ - rte_spinlock_lock(&priv->lock); -} - -/** - * Try to lock private structure to protect it from concurrent access in the - * control path. - * - * @param priv - * Pointer to private structure. - * - * @return - * 1 if the lock is successfully taken; 0 otherwise. - */ -static inline int -priv_trylock(struct priv *priv) -{ - return rte_spinlock_trylock(&priv->lock); -} - -/** - * Unlock private structure. - * - * @param priv - * Pointer to private structure. - */ -static inline void -priv_unlock(struct priv *priv) -{ - rte_spinlock_unlock(&priv->lock); -} - /* mlx5.c */ int mlx5_getenv_int(const char *); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 5c43755d0..f0defc69d 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -269,18 +269,16 @@ priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) } /** - * Ethernet device configuration. - * - * Prepare the driver for a given number of TX and RX queues. + * DPDK callback for Ethernet device configuration. * * @param dev * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ -static int -dev_configure(struct rte_eth_dev *dev) +int +mlx5_dev_configure(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; unsigned int rxqs_n = dev->data->nb_rx_queues; @@ -362,28 +360,7 @@ dev_configure(struct rte_eth_dev *dev) j = 0; } return 0; -} - -/** - * DPDK callback for Ethernet device configuration. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -mlx5_dev_configure(struct rte_eth_dev *dev) -{ - struct priv *priv = dev->data->dev_private; - int ret; - priv_lock(priv); - ret = dev_configure(dev); - assert(ret >= 0); - priv_unlock(priv); - return -ret; } /** @@ -403,7 +380,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) char ifname[IF_NAMESIZE]; info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - priv_lock(priv); /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; info->max_rx_pktlen = 65536; @@ -431,7 +407,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->hash_key_size = priv->rss_conf.rss_key_len; info->speed_capa = priv->link_speed_capa; info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; - priv_unlock(priv); } /** @@ -490,7 +465,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) struct rte_eth_link dev_link; int link_speed = 0; - /* priv_lock() is not taken to allow concurrent calls. */ if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -756,9 +730,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) struct priv *priv = dev->data->dev_private; int ret; - priv_lock(priv); ret = priv_link_update(priv, wait_to_complete); - priv_unlock(priv); return ret; } @@ -780,7 +752,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) uint16_t kern_mtu; int ret = 0; - priv_lock(priv); ret = priv_get_mtu(priv, &kern_mtu); if (ret) goto out; @@ -795,13 +766,11 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) priv->mtu = mtu; DEBUG("adapter port %u MTU set to %u", priv->port, mtu); } - priv_unlock(priv); return 0; out: ret = errno; WARN("cannot set port %u MTU to %u: %s", priv->port, mtu, strerror(ret)); - priv_unlock(priv); assert(ret >= 0); return -ret; } @@ -828,7 +797,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret; ifr.ifr_data = (void *)ðpause; - priv_lock(priv); if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { ret = errno; WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" @@ -847,7 +815,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) fc_conf->mode = RTE_FC_NONE; ret = 0; out: - priv_unlock(priv); assert(ret >= 0); return -ret; } @@ -886,7 +853,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - priv_lock(priv); if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { ret = errno; WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" @@ -896,7 +862,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) } ret = 0; out: - priv_unlock(priv); assert(ret >= 0); return -ret; } @@ -1039,15 +1004,8 @@ mlx5_dev_link_status_handler(void *arg) struct priv *priv = dev->data->dev_private; int ret; - while (!priv_trylock(priv)) { - /* Alarm is being canceled. */ - if (priv->pending_alarm == 0) - return; - rte_pause(); - } priv->pending_alarm = 0; ret = priv_link_status_update(priv); - priv_unlock(priv); if (!ret) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } @@ -1067,9 +1025,7 @@ mlx5_dev_interrupt_handler(void *cb_arg) struct priv *priv = dev->data->dev_private; uint32_t events; - priv_lock(priv); events = priv_dev_status_handler(priv); - priv_unlock(priv); if (events & (1 << RTE_ETH_EVENT_INTR_LSC)) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); if (events & (1 << RTE_ETH_EVENT_INTR_RMV)) @@ -1088,9 +1044,7 @@ mlx5_dev_handler_socket(void *cb_arg) struct rte_eth_dev *dev = cb_arg; struct priv *priv = dev->data->dev_private; - priv_lock(priv); priv_socket_handle(priv); - priv_unlock(priv); } /** @@ -1190,9 +1144,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; int err; - priv_lock(priv); err = priv_dev_set_link(priv, 0); - priv_unlock(priv); return err; } @@ -1211,9 +1163,7 @@ mlx5_set_link_up(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; int err; - priv_lock(priv); err = priv_dev_set_link(priv, 1); - priv_unlock(priv); return err; } diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index d8d124749..137c34988 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -1911,9 +1911,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev, int ret; struct mlx5_flow_parse parser = { .create = 0, }; - priv_lock(priv); ret = priv_flow_convert(priv, attr, items, actions, error, &parser); - priv_unlock(priv); return ret; } @@ -1933,10 +1931,8 @@ mlx5_flow_create(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; struct rte_flow *flow; - priv_lock(priv); flow = priv_flow_create(priv, &priv->flows, attr, items, actions, error); - priv_unlock(priv); return flow; } @@ -2423,9 +2419,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_lock(priv); priv_flow_destroy(priv, &priv->flows, flow); - priv_unlock(priv); return 0; } @@ -2441,9 +2435,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_lock(priv); priv_flow_flush(priv, &priv->flows); - priv_unlock(priv); return 0; } @@ -2501,16 +2493,14 @@ priv_flow_query_count(struct ibv_counter_set *cs, * @see rte_flow_ops */ int -mlx5_flow_query(struct rte_eth_dev *dev, +mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow, enum rte_flow_action_type action __rte_unused, void *data, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; int res = EINVAL; - priv_lock(priv); if (flow->cs) { res = priv_flow_query_count(flow->cs, &flow->counter_stats, @@ -2522,7 +2512,6 @@ mlx5_flow_query(struct rte_eth_dev *dev, NULL, "no counter found for flow"); } - priv_unlock(priv); return -res; } #endif @@ -2540,13 +2529,11 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_lock(priv); if (dev->data->dev_started) { rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "port must be stopped first"); - priv_unlock(priv); return -rte_errno; } priv->isolated = !!enable; @@ -2554,7 +2541,6 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, priv->dev->dev_ops = &mlx5_dev_ops_isolate; else priv->dev->dev_ops = &mlx5_dev_ops; - priv_unlock(priv); return 0; } @@ -3036,9 +3022,7 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - priv_lock(priv); ret = priv_fdir_ctrl_func(priv, filter_op, arg); - priv_unlock(priv); break; default: ERROR("%p: filter type (%d) not supported", diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 6624d0ffa..3b7b6d140 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -164,9 +164,9 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, container_of(txq, struct mlx5_txq_ctrl, txq); struct mlx5_mr *mr; - priv_lock(txq_ctrl->priv); + rte_spinlock_lock(&txq_ctrl->priv->mr_lock); mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx); - priv_unlock(txq_ctrl->priv); + rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); return mr; } diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index 8f5c8beff..4b88215fb 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -44,7 +44,6 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; int ret = 0; - priv_lock(priv); if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) { ret = -EINVAL; goto out; @@ -62,7 +61,6 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev, } priv->rss_conf.rss_hf = rss_conf->rss_hf; out: - priv_unlock(priv); return ret; } @@ -85,7 +83,6 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, if (!rss_conf) return -EINVAL; - priv_lock(priv); if (rss_conf->rss_key && (rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) { memcpy(rss_conf->rss_key, priv->rss_conf.rss_key, @@ -93,7 +90,6 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, } rss_conf->rss_key_len = priv->rss_conf.rss_key_len; rss_conf->rss_hf = priv->rss_conf.rss_hf; - priv_unlock(priv); return 0; } @@ -222,9 +218,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, int ret; struct priv *priv = dev->data->dev_private; - priv_lock(priv); ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size); - priv_unlock(priv); return -ret; } @@ -249,9 +243,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, int ret; struct priv *priv = dev->data->dev_private; - priv_lock(priv); ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size); - priv_unlock(priv); if (dev->data->dev_started) { mlx5_dev_stop(dev); mlx5_dev_start(dev); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 320a12be9..de3335cb9 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -286,7 +286,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, container_of(rxq, struct mlx5_rxq_ctrl, rxq); int ret = 0; - priv_lock(priv); if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); WARN("%p: increased number of descriptors in RX queue %u" @@ -298,7 +297,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (idx >= priv->rxqs_n) { ERROR("%p: queue index out of range (%u >= %u)", (void *)dev, idx, priv->rxqs_n); - priv_unlock(priv); return -EOVERFLOW; } if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) { @@ -329,7 +327,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, (void *)rxq_ctrl); (*priv->rxqs)[idx] = &rxq_ctrl->rxq; out: - priv_unlock(priv); return -ret; } @@ -350,12 +347,10 @@ mlx5_rx_queue_release(void *dpdk_rxq) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; - priv_lock(priv); if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx)) rte_panic("Rx queue %p is still used by a flow and cannot be" " removed\n", (void *)rxq_ctrl); mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx); - priv_unlock(priv); } /** @@ -512,7 +507,6 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct mlx5_rxq_ctrl *rxq_ctrl; int ret = 0; - priv_lock(priv); rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { ret = EINVAL; @@ -531,7 +525,6 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) mlx5_priv_rxq_ibv_release(priv, rxq_ibv); } exit: - priv_unlock(priv); if (ret) WARN("unable to arm interrupt on rx queue %d", rx_queue_id); return -ret; @@ -559,7 +552,6 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) void *ev_ctx; int ret = 0; - priv_lock(priv); rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { ret = EINVAL; @@ -583,7 +575,6 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) exit: if (rxq_ibv) mlx5_priv_rxq_ibv_release(priv, rxq_ibv); - priv_unlock(priv); if (ret) WARN("unable to disable interrupt on rx queue %d", rx_queue_id); diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 167e40548..39be1865a 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -328,7 +328,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) unsigned int i; unsigned int idx; - priv_lock(priv); /* Add software counters. */ for (i = 0; (i != priv->rxqs_n); ++i) { struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; @@ -374,7 +373,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* FIXME: retrieve and add hardware counters. */ #endif *stats = tmp; - priv_unlock(priv); return 0; } @@ -391,7 +389,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev) unsigned int i; unsigned int idx; - priv_lock(priv); for (i = 0; (i != priv->rxqs_n); ++i) { if ((*priv->rxqs)[i] == NULL) continue; @@ -409,7 +406,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev) #ifndef MLX5_PMD_SOFT_COUNTERS /* FIXME: reset hardware counters. */ #endif - priv_unlock(priv); } /** @@ -436,16 +432,13 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; - priv_lock(priv); stats_n = priv_ethtool_get_stats_n(priv); if (stats_n < 0) { - priv_unlock(priv); return -1; } if (xstats_ctrl->stats_n != stats_n) priv_xstats_init(priv); ret = priv_xstats_get(priv, stats); - priv_unlock(priv); } return ret; } @@ -463,15 +456,12 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; - priv_lock(priv); stats_n = priv_ethtool_get_stats_n(priv); if (stats_n < 0) - goto unlock; + return; if (xstats_ctrl->stats_n != stats_n) priv_xstats_init(priv); priv_xstats_reset(priv); -unlock: - priv_unlock(priv); } /** @@ -491,18 +481,15 @@ int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, struct rte_eth_xstat_name *xstats_names, unsigned int n) { - struct priv *priv = dev->data->dev_private; unsigned int i; if (n >= xstats_n && xstats_names) { - priv_lock(priv); for (i = 0; i != xstats_n; ++i) { strncpy(xstats_names[i].name, mlx5_counters_init[i].dpdk_name, RTE_ETH_XSTATS_NAME_SIZE); xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; } - priv_unlock(priv); } return xstats_n; } diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index b147fb4f8..3ce93910d 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -142,7 +142,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) int err; dev->data->dev_started = 1; - priv_lock(priv); err = priv_flow_create_drop_queue(priv); if (err) { ERROR("%p: Drop queue allocation failed: %s", @@ -180,7 +179,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) goto error; } priv_dev_interrupt_handler_install(priv, dev); - priv_unlock(priv); return 0; error: /* Rollback. */ @@ -192,7 +190,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) priv_txq_stop(priv); priv_rxq_stop(priv); priv_flow_delete_drop_queue(priv); - priv_unlock(priv); return err; } @@ -210,7 +207,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; - priv_lock(priv); dev->data->dev_started = 0; /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; @@ -227,7 +223,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev) for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) priv_mr_release(priv, mr); priv_flow_delete_drop_queue(priv); - priv_unlock(priv); } /** @@ -412,8 +407,6 @@ mlx5_traffic_restart(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - priv_lock(priv); priv_dev_traffic_restart(priv, dev); - priv_unlock(priv); return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 9be707840..47ee95990 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -172,7 +172,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, container_of(txq, struct mlx5_txq_ctrl, txq); int ret = 0; - priv_lock(priv); /* * Don't verify port offloads for application which * use the old API. @@ -205,7 +204,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (idx >= priv->txqs_n) { ERROR("%p: queue index out of range (%u >= %u)", (void *)dev, idx, priv->txqs_n); - priv_unlock(priv); return -EOVERFLOW; } if (!mlx5_priv_txq_releasable(priv, idx)) { @@ -226,7 +224,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, (void *)txq_ctrl); (*priv->txqs)[idx] = &txq_ctrl->txq; out: - priv_unlock(priv); return -ret; } @@ -248,7 +245,6 @@ mlx5_tx_queue_release(void *dpdk_txq) return; txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); priv = txq_ctrl->priv; - priv_lock(priv); for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { DEBUG("%p: removing TX queue %p from list", @@ -256,7 +252,6 @@ mlx5_tx_queue_release(void *dpdk_txq) mlx5_priv_txq_release(priv, i); break; } - priv_unlock(priv); } diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 85ed546cb..184ae2f4e 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -46,7 +46,6 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) unsigned int i; int ret = 0; - priv_lock(priv); DEBUG("%p: %s VLAN filter ID %" PRIu16, (void *)dev, (on ? "enable" : "disable"), vlan_id); assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter)); @@ -82,7 +81,6 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) if (dev->data->dev_started) priv_dev_traffic_restart(priv, dev); out: - priv_unlock(priv); return ret; } @@ -155,9 +153,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ERROR("VLAN stripping, invalid queue number %d", queue); return; } - priv_lock(priv); priv_vlan_strip_queue_set(priv, queue, on); - priv_unlock(priv); } /** @@ -183,10 +179,8 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) return 0; } /* Run on every RX queue and set/reset VLAN stripping. */ - priv_lock(priv); for (i = 0; (i != priv->rxqs_n); i++) priv_vlan_strip_queue_set(priv, i, hw_vlan_strip); - priv_unlock(priv); } return 0; } -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v2 08/10] net/mlx5: prefix all function with mlx5 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (6 preceding siblings ...) 2018-02-28 15:12 ` [PATCH v2 07/10] net/mlx5: remove control path locks Nelio Laranjeiro @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 09/10] net/mlx5: change non failing function return values Nelio Laranjeiro ` (2 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh This change removes the need to distinguish unlocked priv_*() functions which are therefore renamed using a mlx5_*() prefix for consistency. At the same time, all functions from mlx5 uses a pointer to the ETH device instead of the one to the PMD private data. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 104 ++++++------ drivers/net/mlx5/mlx5.h | 90 +++++------ drivers/net/mlx5/mlx5_ethdev.c | 288 ++++++++++++++------------------- drivers/net/mlx5/mlx5_flow.c | 342 +++++++++++++++++++-------------------- drivers/net/mlx5/mlx5_mac.c | 8 +- drivers/net/mlx5/mlx5_mr.c | 86 ++++------ drivers/net/mlx5/mlx5_rss.c | 107 ++++-------- drivers/net/mlx5/mlx5_rxq.c | 285 ++++++++++++++++---------------- drivers/net/mlx5/mlx5_rxtx.c | 10 +- drivers/net/mlx5/mlx5_rxtx.h | 115 +++++++------ drivers/net/mlx5/mlx5_rxtx_vec.c | 25 ++- drivers/net/mlx5/mlx5_socket.c | 29 ++-- drivers/net/mlx5/mlx5_stats.c | 135 ++++++---------- drivers/net/mlx5/mlx5_trigger.c | 118 ++++++-------- drivers/net/mlx5/mlx5_txq.c | 134 +++++++-------- drivers/net/mlx5/mlx5_vlan.c | 61 +++---- 16 files changed, 868 insertions(+), 1069 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 58aa6b29e..768cb9e5a 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -169,8 +169,8 @@ mlx5_dev_close(struct rte_eth_dev *dev) (void *)dev, ((priv->ctx != NULL) ? priv->ctx->device->name : "")); /* In case mlx5_dev_stop() has not been called. */ - priv_dev_interrupt_handler_uninstall(priv, dev); - priv_dev_traffic_disable(priv, dev); + mlx5_dev_interrupt_handler_uninstall(dev); + mlx5_traffic_disable(dev); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; @@ -178,7 +178,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_rx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) - mlx5_priv_rxq_release(priv, i); + mlx5_rxq_release(dev, i); priv->rxqs_n = 0; priv->rxqs = NULL; } @@ -186,7 +186,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_tx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->txqs_n); ++i) - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(dev, i); priv->txqs_n = 0; priv->txqs = NULL; } @@ -201,31 +201,31 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (priv->reta_idx != NULL) rte_free(priv->reta_idx); if (priv->primary_socket) - priv_socket_uninit(priv); - ret = mlx5_priv_hrxq_ibv_verify(priv); + mlx5_socket_uninit(dev); + ret = mlx5_hrxq_ibv_verify(dev); if (ret) - WARN("%p: some Hash Rx queue still remain", (void *)priv); - ret = mlx5_priv_ind_table_ibv_verify(priv); + WARN("%p: some Hash Rx queue still remain", (void *)dev); + ret = mlx5_ind_table_ibv_verify(dev); if (ret) - WARN("%p: some Indirection table still remain", (void *)priv); - ret = mlx5_priv_rxq_ibv_verify(priv); + WARN("%p: some Indirection table still remain", (void *)dev); + ret = mlx5_rxq_ibv_verify(dev); if (ret) - WARN("%p: some Verbs Rx queue still remain", (void *)priv); - ret = mlx5_priv_rxq_verify(priv); + WARN("%p: some Verbs Rx queue still remain", (void *)dev); + ret = mlx5_rxq_verify(dev); if (ret) - WARN("%p: some Rx Queues still remain", (void *)priv); - ret = mlx5_priv_txq_ibv_verify(priv); + WARN("%p: some Rx Queues still remain", (void *)dev); + ret = mlx5_txq_ibv_verify(dev); if (ret) - WARN("%p: some Verbs Tx queue still remain", (void *)priv); - ret = mlx5_priv_txq_verify(priv); + WARN("%p: some Verbs Tx queue still remain", (void *)dev); + ret = mlx5_txq_verify(dev); if (ret) - WARN("%p: some Tx Queues still remain", (void *)priv); - ret = priv_flow_verify(priv); + WARN("%p: some Tx Queues still remain", (void *)dev); + ret = mlx5_flow_verify(dev); if (ret) - WARN("%p: some flows still remain", (void *)priv); - ret = priv_mr_verify(priv); + WARN("%p: some flows still remain", (void *)dev); + ret = mlx5_mr_verify(dev); if (ret) - WARN("%p: some Memory Region still remain", (void *)priv); + WARN("%p: some Memory Region still remain", (void *)dev); memset(priv, 0, sizeof(*priv)); } @@ -466,15 +466,16 @@ static void *uar_base; /** * Reserve UAR address space for primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return * 0 on success, errno value on failure. */ static int -priv_uar_init_primary(struct priv *priv) +mlx5_uar_init_primary(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; void *addr = (void *)0; int i; const struct rte_mem_config *mcfg; @@ -516,15 +517,16 @@ priv_uar_init_primary(struct priv *priv) * Reserve UAR address space for secondary process, align with * primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return * 0 on success, errno value on failure. */ static int -priv_uar_init_secondary(struct priv *priv) +mlx5_uar_init_secondary(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; void *addr; int ret; @@ -690,7 +692,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct ibv_port_attr port_attr; struct ibv_pd *pd = NULL; struct priv *priv = NULL; - struct rte_eth_dev *eth_dev; + struct rte_eth_dev *eth_dev = NULL; struct ibv_device_attr_ex device_attr_ex; struct ether_addr mac; struct ibv_device_attr_ex device_attr; @@ -721,20 +723,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } eth_dev->device = &pci_dev->device; eth_dev->dev_ops = &mlx5_dev_sec_ops; - priv = eth_dev->data->dev_private; - err = priv_uar_init_secondary(priv); + err = mlx5_uar_init_secondary(eth_dev); if (err < 0) { err = -err; goto error; } /* Receive command fd from primary process */ - err = priv_socket_connect(priv); + err = mlx5_socket_connect(eth_dev); if (err < 0) { err = -err; goto error; } /* Remap UAR for Tx queues. */ - err = priv_tx_uar_remap(priv, err); + err = mlx5_tx_uar_remap(eth_dev, err); if (err) goto error; /* @@ -743,9 +744,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, * secondary process. */ eth_dev->rx_pkt_burst = - priv_select_rx_function(priv, eth_dev); + mlx5_select_rx_function(eth_dev); eth_dev->tx_pkt_burst = - priv_select_tx_function(priv, eth_dev); + mlx5_select_tx_function(eth_dev); continue; } DEBUG("using port %u (%08" PRIx32 ")", port, test); @@ -861,11 +862,23 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, WARN("Rx CQE compression isn't supported"); config.cqe_comp = 0; } - err = priv_uar_init_primary(priv); + eth_dev = rte_eth_dev_allocate(name); + if (eth_dev == NULL) { + ERROR("can not allocate rte ethdev"); + err = ENOMEM; + goto port_error; + } + eth_dev->data->dev_private = priv; + priv->dev = eth_dev; + eth_dev->data->mac_addrs = priv->mac; + eth_dev->device = &pci_dev->device; + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->device->driver = &mlx5_driver.driver; + err = mlx5_uar_init_primary(eth_dev); if (err) goto port_error; /* Configure the first MAC address by default. */ - if (priv_get_mac(priv, &mac.addr_bytes)) { + if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { ERROR("cannot get MAC address, is mlx5_en loaded?" " (errno: %s)", strerror(errno)); err = ENODEV; @@ -880,7 +893,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, { char ifname[IF_NAMESIZE]; - if (priv_get_ifname(priv, &ifname) == 0) + if (mlx5_get_ifname(eth_dev, &ifname) == 0) DEBUG("port %u ifname is \"%s\"", priv->port, ifname); else @@ -888,25 +901,13 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } #endif /* Get actual MTU if possible. */ - priv_get_mtu(priv, &priv->mtu); + mlx5_get_mtu(eth_dev, &priv->mtu); DEBUG("port %u MTU is %u", priv->port, priv->mtu); - eth_dev = rte_eth_dev_allocate(name); - if (eth_dev == NULL) { - ERROR("can not allocate rte ethdev"); - err = ENOMEM; - goto port_error; - } - eth_dev->data->dev_private = priv; - eth_dev->data->mac_addrs = priv->mac; - eth_dev->device = &pci_dev->device; - rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->device->driver = &mlx5_driver.driver; /* * Initialize burst functions to prevent crashes before link-up. */ eth_dev->rx_pkt_burst = removed_rx_burst; eth_dev->tx_pkt_burst = removed_tx_burst; - priv->dev = eth_dev; eth_dev->dev_ops = &mlx5_dev_ops; /* Register MAC address. */ claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); @@ -921,10 +922,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS, (void *)((uintptr_t)&alctr)); - /* Bring Ethernet device up. */ DEBUG("forcing Ethernet interface up"); - priv_set_flags(priv, ~IFF_UP, IFF_UP); + mlx5_set_flags(eth_dev, ~IFF_UP, IFF_UP); /* Store device configuration on private structure. */ priv->config = config; continue; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 8e021544c..2cb463b62 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -163,18 +163,16 @@ int mlx5_getenv_int(const char *); /* mlx5_ethdev.c */ -struct priv *mlx5_get_priv(struct rte_eth_dev *dev); -int mlx5_is_secondary(void); -int priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]); -int priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr); -int priv_get_mtu(struct priv *priv, uint16_t *mtu); -int priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags); +int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); +int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr); +int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu); +int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, + unsigned int flags); int mlx5_dev_configure(struct rte_eth_dev *dev); void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info); const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); -int priv_link_update(struct priv *priv, int wait_to_complete); -int priv_force_link_status_change(struct priv *priv, int status); int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete); +int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status); int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); @@ -183,22 +181,18 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, struct rte_pci_addr *pci_addr); void mlx5_dev_link_status_handler(void *arg); -void mlx5_dev_interrupt_handler(void *cb_arg); -void priv_dev_interrupt_handler_uninstall(struct priv *priv, - struct rte_eth_dev *dev); -void priv_dev_interrupt_handler_install(struct priv *priv, - struct rte_eth_dev *dev); +void mlx5_dev_interrupt_handler(void *arg); +void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev); +void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev); int mlx5_set_link_down(struct rte_eth_dev *dev); int mlx5_set_link_up(struct rte_eth_dev *dev); -eth_tx_burst_t priv_select_tx_function(struct priv *priv, - struct rte_eth_dev *dev); -eth_rx_burst_t priv_select_rx_function(struct priv *priv, - struct rte_eth_dev *dev); int mlx5_is_removed(struct rte_eth_dev *dev); +eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); +eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev); /* mlx5_mac.c */ -int priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]); +int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]); void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, uint32_t index, uint32_t vmdq); @@ -210,7 +204,7 @@ int mlx5_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); -int priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size); +int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size); int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); @@ -227,13 +221,13 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *dev); /* mlx5_stats.c */ -void priv_xstats_init(struct priv *priv); +void mlx5_xstats_init(struct rte_eth_dev *dev); int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); void mlx5_stats_reset(struct rte_eth_dev *dev); -int mlx5_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstat *stats, unsigned int n); +int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n); void mlx5_xstats_reset(struct rte_eth_dev *dev); -int mlx5_xstats_get_names(struct rte_eth_dev *dev, +int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, struct rte_eth_xstat_name *xstats_names, unsigned int n); @@ -247,9 +241,8 @@ int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); int mlx5_dev_start(struct rte_eth_dev *dev); void mlx5_dev_stop(struct rte_eth_dev *dev); -int priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev); -int priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev); -int priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev); +int mlx5_traffic_enable(struct rte_eth_dev *dev); +int mlx5_traffic_disable(struct rte_eth_dev *dev); int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ @@ -259,21 +252,6 @@ int mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_item items[], const struct rte_flow_action actions[], struct rte_flow_error *error); -void priv_flow_flush(struct priv *priv, struct mlx5_flows *list); -int priv_flow_create_drop_queue(struct priv *priv); -void priv_flow_stop(struct priv *priv, struct mlx5_flows *list); -int priv_flow_start(struct priv *priv, struct mlx5_flows *list); -int priv_flow_verify(struct priv *priv); -int priv_flow_create_drop_queue(struct priv *priv); -void priv_flow_delete_drop_queue(struct priv *priv); -int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, - struct rte_flow_item_eth *eth_spec, - struct rte_flow_item_eth *eth_mask, - struct rte_flow_item_vlan *vlan_spec, - struct rte_flow_item_vlan *vlan_mask); -int mlx5_ctrl_flow(struct rte_eth_dev *dev, - struct rte_flow_item_eth *eth_spec, - struct rte_flow_item_eth *eth_mask); struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], @@ -281,6 +259,7 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, struct rte_flow_error *error); int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error); +void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list); int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, enum rte_flow_action_type action, void *data, @@ -291,19 +270,32 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg); +int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list); +void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list); +int mlx5_flow_verify(struct rte_eth_dev *dev); +int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask, + struct rte_flow_item_vlan *vlan_spec, + struct rte_flow_item_vlan *vlan_mask); +int mlx5_ctrl_flow(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask); +int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev); +void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev); /* mlx5_socket.c */ -int priv_socket_init(struct priv *priv); -int priv_socket_uninit(struct priv *priv); -void priv_socket_handle(struct priv *priv); -int priv_socket_connect(struct priv *priv); +int mlx5_socket_init(struct rte_eth_dev *priv); +int mlx5_socket_uninit(struct rte_eth_dev *priv); +void mlx5_socket_handle(struct rte_eth_dev *priv); +int mlx5_socket_connect(struct rte_eth_dev *priv); /* mlx5_mr.c */ -struct mlx5_mr *priv_mr_new(struct priv *priv, struct rte_mempool *mp); -struct mlx5_mr *priv_mr_get(struct priv *priv, struct rte_mempool *mp); -int priv_mr_release(struct priv *priv, struct mlx5_mr *mr); -int priv_mr_verify(struct priv *priv); +struct mlx5_mr *mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp); +struct mlx5_mr *mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp); +int mlx5_mr_release(struct mlx5_mr *mr); +int mlx5_mr_verify(struct rte_eth_dev *dev); #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index f0defc69d..1fde3d842 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -95,8 +95,8 @@ struct ethtool_link_settings { /** * Get interface name from private structure. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param[out] ifname * Interface name output buffer. * @@ -104,8 +104,9 @@ struct ethtool_link_settings { * 0 on success, -1 on failure and errno is set. */ int -priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) +mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) { + struct priv *priv = dev->data->dev_private; DIR *dir; struct dirent *dent; unsigned int dev_type = 0; @@ -176,8 +177,8 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) /** * Perform ifreq ioctl() on associated Ethernet device. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param req * Request number to pass to ioctl(). * @param[out] ifr @@ -187,14 +188,14 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) * 0 on success, -1 on failure and errno is set. */ int -priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) +mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) { int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); int ret = -1; if (sock == -1) return ret; - if (priv_get_ifname(priv, &ifr->ifr_name) == 0) + if (mlx5_get_ifname(dev, &ifr->ifr_name) == 0) ret = ioctl(sock, req, ifr); close(sock); return ret; @@ -203,8 +204,8 @@ priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) /** * Get device MTU. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] mtu * MTU value output buffer. * @@ -212,10 +213,10 @@ priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) * 0 on success, -1 on failure and errno is set. */ int -priv_get_mtu(struct priv *priv, uint16_t *mtu) +mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) { struct ifreq request; - int ret = priv_ifreq(priv, SIOCGIFMTU, &request); + int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); if (ret) return ret; @@ -226,8 +227,8 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu) /** * Set device MTU. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param mtu * MTU value to set. * @@ -235,18 +236,18 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu) * 0 on success, -1 on failure and errno is set. */ static int -priv_set_mtu(struct priv *priv, uint16_t mtu) +mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct ifreq request = { .ifr_mtu = mtu, }; - return priv_ifreq(priv, SIOCSIFMTU, &request); + return mlx5_ifreq(dev, SIOCSIFMTU, &request); } /** * Set device flags. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param keep * Bitmask for flags that must remain untouched. * @param flags @@ -256,16 +257,16 @@ priv_set_mtu(struct priv *priv, uint16_t mtu) * 0 on success, -1 on failure and errno is set. */ int -priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) +mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) { struct ifreq request; - int ret = priv_ifreq(priv, SIOCGIFFLAGS, &request); + int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); if (ret) return ret; request.ifr_flags &= keep; request.ifr_flags |= flags & ~keep; - return priv_ifreq(priv, SIOCSIFFLAGS, &request); + return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); } /** @@ -288,11 +289,11 @@ mlx5_dev_configure(struct rte_eth_dev *dev) unsigned int reta_idx_n; const uint8_t use_app_rss_key = !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; - uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv); + uint64_t supp_tx_offloads = mlx5_get_tx_port_offloads(dev); uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; uint64_t supp_rx_offloads = - (mlx5_priv_get_rx_port_offloads(priv) | - mlx5_priv_get_rx_queue_offloads(priv)); + (mlx5_get_rx_port_offloads() | + mlx5_get_rx_queue_offloads(dev)); uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; if ((tx_offloads & supp_tx_offloads) != tx_offloads) { @@ -349,7 +350,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev) reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? priv->config.ind_table_max_size : rxqs_n)); - if (priv_rss_reta_index_resize(priv, reta_idx_n)) + if (mlx5_rss_reta_index_resize(dev, reta_idx_n)) return ENOMEM; /* When the number of RX queues is not a power of two, the remaining * table entries are padded with reused WQs and hashes are not spread @@ -395,12 +396,11 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->max_rx_queues = max; info->max_tx_queues = max; info->max_mac_addrs = RTE_DIM(priv->mac); - info->rx_queue_offload_capa = - mlx5_priv_get_rx_queue_offloads(priv); - info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) | + info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); + info->rx_offload_capa = (mlx5_get_rx_port_offloads() | info->rx_queue_offload_capa); - info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv); - if (priv_get_ifname(priv, &ifname) == 0) + info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); + if (mlx5_get_ifname(dev, &ifname) == 0) info->if_index = if_nametoindex(ifname); info->reta_size = priv->reta_idx_n ? priv->reta_idx_n : config->ind_table_max_size; @@ -465,7 +465,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) struct rte_eth_link dev_link; int link_speed = 0; - if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { + if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; } @@ -473,7 +473,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&edata; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", strerror(errno)); return -1; @@ -527,7 +527,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) struct rte_eth_link dev_link; uint64_t sc; - if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { + if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; } @@ -535,7 +535,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&gcmd; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", strerror(errno)); return -1; @@ -549,7 +549,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) *ecmd = gcmd; ifr.ifr_data = (void *)ecmd; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", strerror(errno)); return -1; @@ -608,90 +608,50 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) /** * Enable receiving and transmitting traffic. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ static void -priv_link_start(struct priv *priv) +mlx5_link_start(struct rte_eth_dev *dev) { - struct rte_eth_dev *dev = priv->dev; + struct priv *priv = dev->data->dev_private; int err; - dev->tx_pkt_burst = priv_select_tx_function(priv, dev); - dev->rx_pkt_burst = priv_select_rx_function(priv, dev); - err = priv_dev_traffic_enable(priv, dev); + dev->tx_pkt_burst = mlx5_select_tx_function(dev); + dev->rx_pkt_burst = mlx5_select_rx_function(dev); + err = mlx5_traffic_enable(dev); if (err) ERROR("%p: error occurred while configuring control flows: %s", - (void *)priv, strerror(err)); - err = priv_flow_start(priv, &priv->flows); + (void *)dev, strerror(err)); + err = mlx5_flow_start(dev, &priv->flows); if (err) ERROR("%p: error occurred while configuring flows: %s", - (void *)priv, strerror(err)); + (void *)dev, strerror(err)); } /** * Disable receiving and transmitting traffic. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ static void -priv_link_stop(struct priv *priv) +mlx5_link_stop(struct rte_eth_dev *dev) { - struct rte_eth_dev *dev = priv->dev; + struct priv *priv = dev->data->dev_private; - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; } /** - * Retrieve physical link information and update rx/tx_pkt_burst callbacks - * accordingly. - * - * @param priv - * Pointer to private structure. - * @param wait_to_complete - * Wait for request completion (ignored). - */ -int -priv_link_update(struct priv *priv, int wait_to_complete __rte_unused) -{ - struct rte_eth_dev *dev = priv->dev; - struct utsname utsname; - int ver[3]; - int ret; - struct rte_eth_link dev_link = dev->data->dev_link; - - if (uname(&utsname) == -1 || - sscanf(utsname.release, "%d.%d.%d", - &ver[0], &ver[1], &ver[2]) != 3 || - KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) - ret = mlx5_link_update_unlocked_gset(dev); - else - ret = mlx5_link_update_unlocked_gs(dev); - /* If lsc interrupt is disabled, should always be ready for traffic. */ - if (!dev->data->dev_conf.intr_conf.lsc) { - priv_link_start(priv); - return ret; - } - /* Re-select burst callbacks only if link status has been changed. */ - if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { - if (dev->data->dev_link.link_status == ETH_LINK_UP) - priv_link_start(priv); - else - priv_link_stop(priv); - } - return ret; -} - -/** * Querying the link status till it changes to the desired state. * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param status * Link desired status. * @@ -699,13 +659,13 @@ priv_link_update(struct priv *priv, int wait_to_complete __rte_unused) * 0 on success, negative errno value on failure. */ int -priv_force_link_status_change(struct priv *priv, int status) +mlx5_force_link_status_change(struct rte_eth_dev *dev, int status) { int try = 0; while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) { - priv_link_update(priv, 0); - if (priv->dev->data->dev_link.link_status == status) + mlx5_link_update(dev, 0); + if (dev->data->dev_link.link_status == status) return 0; try++; sleep(1); @@ -727,10 +687,30 @@ priv_force_link_status_change(struct priv *priv, int status) int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct utsname utsname; + int ver[3]; int ret; + struct rte_eth_link dev_link = dev->data->dev_link; - ret = priv_link_update(priv, wait_to_complete); + if (uname(&utsname) == -1 || + sscanf(utsname.release, "%d.%d.%d", + &ver[0], &ver[1], &ver[2]) != 3 || + KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) + ret = mlx5_link_update_unlocked_gset(dev); + else + ret = mlx5_link_update_unlocked_gs(dev); + /* If lsc interrupt is disabled, should always be ready for traffic. */ + if (!dev->data->dev_conf.intr_conf.lsc) { + mlx5_link_start(dev); + return ret; + } + /* Re-select burst callbacks only if link status has been changed. */ + if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { + if (dev->data->dev_link.link_status == ETH_LINK_UP) + mlx5_link_start(dev); + else + mlx5_link_stop(dev); + } return ret; } @@ -752,14 +732,14 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) uint16_t kern_mtu; int ret = 0; - ret = priv_get_mtu(priv, &kern_mtu); + ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) goto out; /* Set kernel interface MTU first. */ - ret = priv_set_mtu(priv, mtu); + ret = mlx5_set_mtu(dev, mtu); if (ret) goto out; - ret = priv_get_mtu(priv, &kern_mtu); + ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) goto out; if (kern_mtu == mtu) { @@ -789,7 +769,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct priv *priv = dev->data->dev_private; struct ifreq ifr; struct ethtool_pauseparam ethpause = { .cmd = ETHTOOL_GPAUSEPARAM @@ -797,10 +776,9 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret; ifr.ifr_data = (void *)ðpause; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { ret = errno; - WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" - " failed: %s", + WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed: %s", strerror(ret)); goto out; } @@ -833,7 +811,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct priv *priv = dev->data->dev_private; struct ifreq ifr; struct ethtool_pauseparam ethpause = { .cmd = ETHTOOL_SPAUSEPARAM @@ -853,7 +830,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { ret = errno; WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" " failed: %s", @@ -919,18 +896,19 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, /** * Update the link status. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Zero if the callback process can be called immediately. */ static int -priv_link_status_update(struct priv *priv) +mlx5_link_status_update(struct rte_eth_dev *dev) { - struct rte_eth_link *link = &priv->dev->data->dev_link; + struct priv *priv = dev->data->dev_private; + struct rte_eth_link *link = &dev->data->dev_link; - priv_link_update(priv, 0); + mlx5_link_update(dev, 0); if (((link->link_speed == 0) && link->link_status) || ((link->link_speed != 0) && !link->link_status)) { /* @@ -955,8 +933,8 @@ priv_link_status_update(struct priv *priv) /** * Device status handler. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param events * Pointer to event flags holder. * @@ -964,8 +942,9 @@ priv_link_status_update(struct priv *priv) * Events bitmap of callback process which can be called immediately. */ static uint32_t -priv_dev_status_handler(struct priv *priv) +mlx5_dev_status_handler(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct ibv_async_event event; uint32_t ret = 0; @@ -975,10 +954,10 @@ priv_dev_status_handler(struct priv *priv) break; if ((event.event_type == IBV_EVENT_PORT_ACTIVE || event.event_type == IBV_EVENT_PORT_ERR) && - (priv->dev->data->dev_conf.intr_conf.lsc == 1)) + (dev->data->dev_conf.intr_conf.lsc == 1)) ret |= (1 << RTE_ETH_EVENT_INTR_LSC); else if (event.event_type == IBV_EVENT_DEVICE_FATAL && - priv->dev->data->dev_conf.intr_conf.rmv == 1) + dev->data->dev_conf.intr_conf.rmv == 1) ret |= (1 << RTE_ETH_EVENT_INTR_RMV); else DEBUG("event type %d on port %d not handled", @@ -986,7 +965,7 @@ priv_dev_status_handler(struct priv *priv) mlx5_glue->ack_async_event(&event); } if (ret & (1 << RTE_ETH_EVENT_INTR_LSC)) - if (priv_link_status_update(priv)) + if (mlx5_link_status_update(dev)) ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC); return ret; } @@ -1005,7 +984,7 @@ mlx5_dev_link_status_handler(void *arg) int ret; priv->pending_alarm = 0; - ret = priv_link_status_update(priv); + ret = mlx5_link_status_update(dev); if (!ret) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } @@ -1022,10 +1001,9 @@ void mlx5_dev_interrupt_handler(void *cb_arg) { struct rte_eth_dev *dev = cb_arg; - struct priv *priv = dev->data->dev_private; uint32_t events; - events = priv_dev_status_handler(priv); + events = mlx5_dev_status_handler(dev); if (events & (1 << RTE_ETH_EVENT_INTR_LSC)) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); if (events & (1 << RTE_ETH_EVENT_INTR_RMV)) @@ -1042,22 +1020,21 @@ static void mlx5_dev_handler_socket(void *cb_arg) { struct rte_eth_dev *dev = cb_arg; - struct priv *priv = dev->data->dev_private; - priv_socket_handle(priv); + mlx5_socket_handle(dev); } /** * Uninstall interrupt handler. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to the rte_eth_dev structure. + * Pointer to Ethernet device. */ void -priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) +mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + if (dev->data->dev_conf.intr_conf.lsc || dev->data->dev_conf.intr_conf.rmv) rte_intr_callback_unregister(&priv->intr_handle, @@ -1078,14 +1055,13 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) /** * Install interrupt handler. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to the rte_eth_dev structure. + * Pointer to Ethernet device. */ void -priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) +mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int rc, flags; assert(priv->ctx->async_fd > 0); @@ -1103,7 +1079,7 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) rte_intr_callback_register(&priv->intr_handle, mlx5_dev_interrupt_handler, dev); } - rc = priv_socket_init(priv); + rc = mlx5_socket_init(dev); if (!rc && priv->primary_socket) { priv->intr_handle_socket.fd = priv->primary_socket; priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT; @@ -1113,23 +1089,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) } /** - * Change the link state (UP / DOWN). - * - * @param priv - * Pointer to private data structure. - * @param up - * Nonzero for link up, otherwise link down. - * - * @return - * 0 on success, errno value on failure. - */ -static int -priv_dev_set_link(struct priv *priv, int up) -{ - return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP); -} - -/** * DPDK callback to bring the link DOWN. * * @param dev @@ -1141,11 +1100,7 @@ priv_dev_set_link(struct priv *priv, int up) int mlx5_set_link_down(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - int err; - - err = priv_dev_set_link(priv, 0); - return err; + return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); } /** @@ -1160,27 +1115,22 @@ mlx5_set_link_down(struct rte_eth_dev *dev) int mlx5_set_link_up(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - int err; - - err = priv_dev_set_link(priv, 1); - return err; + return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); } /** * Configure the TX function to use. * - * @param priv - * Pointer to private data structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to private data structure. * * @return * Pointer to selected Tx burst function. */ eth_tx_burst_t -priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) +mlx5_select_tx_function(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; struct mlx5_dev_config *config = &priv->config; uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; @@ -1194,8 +1144,8 @@ priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) if (vlan_insert || tso) return tx_pkt_burst; if (config->mps == MLX5_MPW_ENHANCED) { - if (priv_check_vec_tx_support(priv, dev) > 0) { - if (priv_check_raw_vec_tx_support(priv, dev) > 0) + if (mlx5_check_vec_tx_support(dev) > 0) { + if (mlx5_check_raw_vec_tx_support(dev) > 0) tx_pkt_burst = mlx5_tx_burst_raw_vec; else tx_pkt_burst = mlx5_tx_burst_vec; @@ -1217,21 +1167,19 @@ priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) /** * Configure the RX function to use. * - * @param priv - * Pointer to private data structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to private data structure. * * @return * Pointer to selected Rx burst function. */ eth_rx_burst_t -priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev) +mlx5_select_rx_function(struct rte_eth_dev *dev) { eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; - assert(priv != NULL); - if (priv_check_vec_rx_support(priv) > 0) { + assert(dev != NULL); + if (mlx5_check_vec_rx_support(dev) > 0) { rx_pkt_burst = mlx5_rx_burst_vec; DEBUG("selected RX vectorized function"); } diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 137c34988..6b53b3ea5 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -89,7 +89,7 @@ static int mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id); static int -mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser); +mlx5_flow_create_count(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser); /* Hash RX queue types. */ enum hash_rxq_type { @@ -515,8 +515,6 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * Copy the RSS configuration from the user ones, of the rss_conf is null, * uses the driver one. * - * @param priv - * Pointer to private structure. * @param parser * Internal parser structure. * @param rss_conf @@ -526,13 +524,12 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * 0 on success, errno value on failure. */ static int -priv_flow_convert_rss_conf(struct priv *priv __rte_unused, - struct mlx5_flow_parse *parser, +mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser, const struct rte_eth_rss_conf *rss_conf) { /* * This function is also called at the beginning of - * priv_flow_convert_actions() to initialize the parser with the + * mlx5_flow_convert_actions() to initialize the parser with the * device default RSS configuration. */ if (rss_conf) { @@ -554,23 +551,17 @@ priv_flow_convert_rss_conf(struct priv *priv __rte_unused, /** * Extract attribute to the parser. * - * @param priv - * Pointer to private structure. * @param[in] attr * Flow rule attributes. * @param[out] error * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_attributes(struct priv *priv __rte_unused, - const struct rte_flow_attr *attr, - struct rte_flow_error *error, - struct mlx5_flow_parse *parser __rte_unused) +mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, + struct rte_flow_error *error) { if (attr->group) { rte_flow_error_set(error, ENOTSUP, @@ -606,8 +597,8 @@ priv_flow_convert_attributes(struct priv *priv __rte_unused, /** * Extract actions request to the parser. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error @@ -619,16 +610,18 @@ priv_flow_convert_attributes(struct priv *priv __rte_unused, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_actions(struct priv *priv, +mlx5_flow_convert_actions(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { + struct priv *priv = dev->data->dev_private; + /* * Add default RSS configuration necessary for Verbs to create QP even * if no RSS is necessary. */ - priv_flow_convert_rss_conf(priv, parser, + mlx5_flow_convert_rss_conf(parser, (const struct rte_eth_rss_conf *) &priv->rss_conf); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { @@ -708,8 +701,7 @@ priv_flow_convert_actions(struct priv *priv, for (n = 0; n < rss->num; ++n) parser->queues[n] = rss->queue[n]; parser->queues_n = rss->num; - if (priv_flow_convert_rss_conf(priv, parser, - rss->rss_conf)) { + if (mlx5_flow_convert_rss_conf(parser, rss->rss_conf)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -763,8 +755,6 @@ priv_flow_convert_actions(struct priv *priv, /** * Validate items. * - * @param priv - * Pointer to private structure. * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[out] error @@ -776,8 +766,7 @@ priv_flow_convert_actions(struct priv *priv, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_items_validate(struct priv *priv __rte_unused, - const struct rte_flow_item items[], +mlx5_flow_convert_items_validate(const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { @@ -854,8 +843,6 @@ priv_flow_convert_items_validate(struct priv *priv __rte_unused, /** * Allocate memory space to store verbs flow attributes. * - * @param priv - * Pointer to private structure. * @param[in] priority * Flow priority. * @param[in] size @@ -867,8 +854,7 @@ priv_flow_convert_items_validate(struct priv *priv __rte_unused, * A verbs flow attribute on success, NULL otherwise. */ static struct ibv_flow_attr * -priv_flow_convert_allocate(struct priv *priv __rte_unused, - unsigned int priority, +mlx5_flow_convert_allocate(unsigned int priority, unsigned int size, struct rte_flow_error *error) { @@ -889,14 +875,11 @@ priv_flow_convert_allocate(struct priv *priv __rte_unused, /** * Finalise verbs flow attributes. * - * @param priv - * Pointer to private structure. * @param[in, out] parser * Internal parser structure. */ static void -priv_flow_convert_finalise(struct priv *priv __rte_unused, - struct mlx5_flow_parse *parser) +mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser) { const unsigned int ipv4 = hash_rxq_init[parser->layer].ip_version == MLX5_IPV4; @@ -1014,8 +997,8 @@ priv_flow_convert_finalise(struct priv *priv __rte_unused, /** * Validate and convert a flow supported by the NIC. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[in] attr * Flow rule attributes. * @param[in] pattern @@ -1031,7 +1014,7 @@ priv_flow_convert_finalise(struct priv *priv __rte_unused, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert(struct priv *priv, +mlx5_flow_convert(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -1048,16 +1031,16 @@ priv_flow_convert(struct priv *priv, .layer = HASH_RXQ_ETH, .mark_id = MLX5_FLOW_MARK_DEFAULT, }; - ret = priv_flow_convert_attributes(priv, attr, error, parser); + ret = mlx5_flow_convert_attributes(attr, error); if (ret) return ret; - ret = priv_flow_convert_actions(priv, actions, error, parser); + ret = mlx5_flow_convert_actions(dev, actions, error, parser); if (ret) return ret; - ret = priv_flow_convert_items_validate(priv, items, error, parser); + ret = mlx5_flow_convert_items_validate(items, error, parser); if (ret) return ret; - priv_flow_convert_finalise(priv, parser); + mlx5_flow_convert_finalise(parser); /* * Second step. * Allocate the memory space to store verbs specifications. @@ -1069,8 +1052,7 @@ priv_flow_convert(struct priv *priv, unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; parser->queue[HASH_RXQ_ETH].ibv_attr = - priv_flow_convert_allocate(priv, priority, - offset, error); + mlx5_flow_convert_allocate(priority, offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) return ENOMEM; parser->queue[HASH_RXQ_ETH].offset = @@ -1088,7 +1070,7 @@ priv_flow_convert(struct priv *priv, continue; offset = parser->queue[i].offset; parser->queue[i].ibv_attr = - priv_flow_convert_allocate(priv, priority, + mlx5_flow_convert_allocate(priority, offset, error); if (!parser->queue[i].ibv_attr) goto exit_enomem; @@ -1116,7 +1098,7 @@ priv_flow_convert(struct priv *priv, if (parser->mark) mlx5_flow_create_flag_mark(parser, parser->mark_id); if (parser->count && parser->create) { - mlx5_flow_create_count(priv, parser); + mlx5_flow_create_count(dev, parser); if (!parser->cs) goto exit_count_error; } @@ -1125,7 +1107,7 @@ priv_flow_convert(struct priv *priv, * configuration. */ if (!parser->drop) { - priv_flow_convert_finalise(priv, parser); + mlx5_flow_convert_finalise(parser); } else { parser->queue[HASH_RXQ_ETH].ibv_attr->priority = attr->priority + @@ -1577,8 +1559,8 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) /** * Convert count action to Verbs specification. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Pointer to MLX5 flow parser structure. * @@ -1586,10 +1568,11 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) * 0 on success, errno value on failure. */ static int -mlx5_flow_create_count(struct priv *priv __rte_unused, +mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused, struct mlx5_flow_parse *parser __rte_unused) { #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + struct priv *priv = dev->data->dev_private; unsigned int size = sizeof(struct ibv_flow_spec_counter_action); struct ibv_counter_set_init_attr init_attr = {0}; struct ibv_flow_spec_counter_action counter = { @@ -1611,8 +1594,8 @@ mlx5_flow_create_count(struct priv *priv __rte_unused, /** * Complete flow rule creation with a drop queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow @@ -1624,11 +1607,12 @@ mlx5_flow_create_count(struct priv *priv __rte_unused, * 0 on success, errno value on failure. */ static int -priv_flow_create_action_queue_drop(struct priv *priv, +mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { + struct priv *priv = dev->data->dev_private; struct ibv_flow_spec_action_drop *drop; unsigned int size = sizeof(struct ibv_flow_spec_action_drop); int err = 0; @@ -1683,8 +1667,8 @@ priv_flow_create_action_queue_drop(struct priv *priv, /** * Create hash Rx queues when RSS is enabled. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow @@ -1696,11 +1680,12 @@ priv_flow_create_action_queue_drop(struct priv *priv, * 0 on success, a errno value otherwise and rte_errno is set. */ static int -priv_flow_create_action_queue_rss(struct priv *priv, +mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { + struct priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != hash_rxq_init_n; ++i) { @@ -1714,21 +1699,21 @@ priv_flow_create_action_queue_rss(struct priv *priv, if (!priv->dev->data->dev_started) continue; flow->frxq[i].hrxq = - mlx5_priv_hrxq_get(priv, - parser->rss_conf.rss_key, - parser->rss_conf.rss_key_len, - hash_fields, - parser->queues, - parser->queues_n); + mlx5_hrxq_get(dev, + parser->rss_conf.rss_key, + parser->rss_conf.rss_key_len, + hash_fields, + parser->queues, + parser->queues_n); if (flow->frxq[i].hrxq) continue; flow->frxq[i].hrxq = - mlx5_priv_hrxq_new(priv, - parser->rss_conf.rss_key, - parser->rss_conf.rss_key_len, - hash_fields, - parser->queues, - parser->queues_n); + mlx5_hrxq_new(dev, + parser->rss_conf.rss_key, + parser->rss_conf.rss_key_len, + hash_fields, + parser->queues, + parser->queues_n); if (!flow->frxq[i].hrxq) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, @@ -1742,8 +1727,8 @@ priv_flow_create_action_queue_rss(struct priv *priv, /** * Complete flow rule creation. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow @@ -1755,18 +1740,19 @@ priv_flow_create_action_queue_rss(struct priv *priv, * 0 on success, a errno value otherwise and rte_errno is set. */ static int -priv_flow_create_action_queue(struct priv *priv, +mlx5_flow_create_action_queue(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { + struct priv *priv = dev->data->dev_private; int err = 0; unsigned int i; assert(priv->pd); assert(priv->ctx); assert(!parser->drop); - err = priv_flow_create_action_queue_rss(priv, parser, flow, error); + err = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); if (err) goto error; if (parser->count) @@ -1807,7 +1793,7 @@ priv_flow_create_action_queue(struct priv *priv, claim_zero(mlx5_glue->destroy_flow(ibv_flow)); } if (flow->frxq[i].hrxq) - mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq); + mlx5_hrxq_release(dev, flow->frxq[i].hrxq); if (flow->frxq[i].ibv_attr) rte_free(flow->frxq[i].ibv_attr); } @@ -1822,8 +1808,8 @@ priv_flow_create_action_queue(struct priv *priv, /** * Convert a flow. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @param[in] attr @@ -1839,19 +1825,19 @@ priv_flow_create_action_queue(struct priv *priv, * A flow on success, NULL otherwise. */ static struct rte_flow * -priv_flow_create(struct priv *priv, - struct mlx5_flows *list, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +mlx5_flow_list_create(struct rte_eth_dev *dev, + struct mlx5_flows *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct mlx5_flow_parse parser = { .create = 1, }; struct rte_flow *flow = NULL; unsigned int i; int err; - err = priv_flow_convert(priv, attr, items, actions, error, &parser); + err = mlx5_flow_convert(dev, attr, items, actions, error, &parser); if (err) goto exit; flow = rte_calloc(__func__, 1, @@ -1875,10 +1861,10 @@ priv_flow_create(struct priv *priv, memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len); /* finalise the flow. */ if (parser.drop) - err = priv_flow_create_action_queue_drop(priv, &parser, flow, + err = mlx5_flow_create_action_queue_drop(dev, &parser, flow, error); else - err = priv_flow_create_action_queue(priv, &parser, flow, error); + err = mlx5_flow_create_action_queue(dev, &parser, flow, error); if (err) goto exit; TAILQ_INSERT_TAIL(list, flow, next); @@ -1907,11 +1893,10 @@ mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; int ret; struct mlx5_flow_parse parser = { .create = 0, }; - ret = priv_flow_convert(priv, attr, items, actions, error, &parser); + ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser); return ret; } @@ -1929,28 +1914,26 @@ mlx5_flow_create(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; - struct rte_flow *flow; - flow = priv_flow_create(priv, &priv->flows, attr, items, actions, - error); - return flow; + return mlx5_flow_list_create(dev, &priv->flows, attr, items, actions, + error); } /** - * Destroy a flow. + * Destroy a flow in a list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @param[in] flow * Flow to destroy. */ static void -priv_flow_destroy(struct priv *priv, - struct mlx5_flows *list, - struct rte_flow *flow) +mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, + struct rte_flow *flow) { + struct priv *priv = dev->data->dev_private; unsigned int i; if (flow->drop || !flow->mark) @@ -1998,7 +1981,7 @@ priv_flow_destroy(struct priv *priv, claim_zero(mlx5_glue->destroy_flow (frxq->ibv_flow)); if (frxq->hrxq) - mlx5_priv_hrxq_release(priv, frxq->hrxq); + mlx5_hrxq_release(dev, frxq->hrxq); if (frxq->ibv_attr) rte_free(frxq->ibv_attr); } @@ -2015,34 +1998,35 @@ priv_flow_destroy(struct priv *priv, /** * Destroy all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. */ void -priv_flow_flush(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) { while (!TAILQ_EMPTY(list)) { struct rte_flow *flow; flow = TAILQ_FIRST(list); - priv_flow_destroy(priv, list, flow); + mlx5_flow_list_destroy(dev, list, flow); } } /** * Create drop queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * 0 on success. */ int -priv_flow_create_drop_queue(struct priv *priv) +mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq_drop *fdq = NULL; assert(priv->pd); @@ -2123,12 +2107,13 @@ priv_flow_create_drop_queue(struct priv *priv) /** * Delete drop queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_flow_delete_drop_queue(struct priv *priv) +mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue; if (!fdq) @@ -2148,14 +2133,15 @@ priv_flow_delete_drop_queue(struct priv *priv) /** * Remove all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. */ void -priv_flow_stop(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { @@ -2198,7 +2184,7 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list) claim_zero(mlx5_glue->destroy_flow (flow->frxq[i].ibv_flow)); flow->frxq[i].ibv_flow = NULL; - mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq); + mlx5_hrxq_release(dev, flow->frxq[i].hrxq); flow->frxq[i].hrxq = NULL; } DEBUG("Flow %p removed", (void *)flow); @@ -2208,8 +2194,8 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list) /** * Add all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @@ -2217,8 +2203,9 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list) * 0 on success, a errno value otherwise and rte_errno is set. */ int -priv_flow_start(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; TAILQ_FOREACH(flow, list, next) { @@ -2243,19 +2230,19 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list) if (!flow->frxq[i].ibv_attr) continue; flow->frxq[i].hrxq = - mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key, - flow->rss_conf.rss_key_len, - hash_rxq_init[i].hash_fields, - (*flow->queues), - flow->queues_n); + mlx5_hrxq_get(dev, flow->rss_conf.rss_key, + flow->rss_conf.rss_key_len, + hash_rxq_init[i].hash_fields, + (*flow->queues), + flow->queues_n); if (flow->frxq[i].hrxq) goto flow_create; flow->frxq[i].hrxq = - mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key, - flow->rss_conf.rss_key_len, - hash_rxq_init[i].hash_fields, - (*flow->queues), - flow->queues_n); + mlx5_hrxq_new(dev, flow->rss_conf.rss_key, + flow->rss_conf.rss_key_len, + hash_rxq_init[i].hash_fields, + (*flow->queues), + flow->queues_n); if (!flow->frxq[i].hrxq) { DEBUG("Flow %p cannot be applied", (void *)flow); @@ -2285,19 +2272,20 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list) /** * Verify the flow list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return the number of flows not released. */ int -priv_flow_verify(struct priv *priv) +mlx5_flow_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; int ret = 0; TAILQ_FOREACH(flow, &priv->flows, next) { - DEBUG("%p: flow %p still referenced", (void *)priv, + DEBUG("%p: flow %p still referenced", (void *)dev, (void *)flow); ++ret; } @@ -2378,8 +2366,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, action_rss.local.rss_conf = &priv->rss_conf; action_rss.local.num = priv->reta_idx_n; actions[0].conf = (const void *)&action_rss.rss; - flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions, - &error); + flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, + actions, &error); if (!flow) return rte_errno; return 0; @@ -2419,7 +2407,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_flow_destroy(priv, &priv->flows, flow); + mlx5_flow_list_destroy(dev, &priv->flows, flow); return 0; } @@ -2435,7 +2423,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_flow_flush(priv, &priv->flows); + mlx5_flow_list_flush(dev, &priv->flows); return 0; } @@ -2452,7 +2440,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, * 0 on success, a errno value otherwise and rte_errno is set. */ static int -priv_flow_query_count(struct ibv_counter_set *cs, +mlx5_flow_query_count(struct ibv_counter_set *cs, struct mlx5_flow_counter_stats *counter_stats, struct rte_flow_query_count *query_count, struct rte_flow_error *error) @@ -2502,7 +2490,7 @@ mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, int res = EINVAL; if (flow->cs) { - res = priv_flow_query_count(flow->cs, + res = mlx5_flow_query_count(flow->cs, &flow->counter_stats, (struct rte_flow_query_count *)data, error); @@ -2547,8 +2535,8 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, /** * Convert a flow director filter to a generic flow. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Flow director filter to add. * @param attributes @@ -2558,10 +2546,11 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, * 0 on success, errno value on error. */ static int -priv_fdir_filter_convert(struct priv *priv, +mlx5_fdir_filter_convert(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, struct mlx5_fdir *attributes) { + struct priv *priv = dev->data->dev_private; const struct rte_eth_fdir_input *input = &fdir_filter->input; /* Validate queue number. */ @@ -2733,8 +2722,8 @@ priv_fdir_filter_convert(struct priv *priv, /** * Add new flow director filter and store it in list. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Flow director filter to add. * @@ -2742,9 +2731,10 @@ priv_fdir_filter_convert(struct priv *priv, * 0 on success, errno value on failure. */ static int -priv_fdir_filter_add(struct priv *priv, +mlx5_fdir_filter_add(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { + struct priv *priv = dev->data->dev_private; struct mlx5_fdir attributes = { .attr.group = 0, .l2_mask = { @@ -2760,19 +2750,16 @@ priv_fdir_filter_add(struct priv *priv, struct rte_flow *flow; int ret; - ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); + ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) return -ret; - ret = priv_flow_convert(priv, &attributes.attr, attributes.items, + ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) return -ret; - flow = priv_flow_create(priv, - &priv->flows, - &attributes.attr, - attributes.items, - attributes.actions, - &error); + flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, + attributes.items, attributes.actions, + &error); if (flow) { DEBUG("FDIR created %p", (void *)flow); return 0; @@ -2783,8 +2770,8 @@ priv_fdir_filter_add(struct priv *priv, /** * Delete specific filter. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Filter to be deleted. * @@ -2792,9 +2779,10 @@ priv_fdir_filter_add(struct priv *priv, * 0 on success, errno value on failure. */ static int -priv_fdir_filter_delete(struct priv *priv, +mlx5_fdir_filter_delete(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { + struct priv *priv = dev->data->dev_private; struct mlx5_fdir attributes = { .attr.group = 0, }; @@ -2807,10 +2795,10 @@ priv_fdir_filter_delete(struct priv *priv, unsigned int i; int ret; - ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); + ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) return -ret; - ret = priv_flow_convert(priv, &attributes.attr, attributes.items, + ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) goto exit; @@ -2868,7 +2856,7 @@ priv_fdir_filter_delete(struct priv *priv, continue; } if (flow) - priv_flow_destroy(priv, &priv->flows, flow); + mlx5_flow_list_destroy(dev, &priv->flows, flow); exit: for (i = 0; i != hash_rxq_init_n; ++i) { if (parser.queue[i].ibv_attr) @@ -2880,8 +2868,8 @@ priv_fdir_filter_delete(struct priv *priv, /** * Update queue for specific filter. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Filter to be updated. * @@ -2889,41 +2877,44 @@ priv_fdir_filter_delete(struct priv *priv, * 0 on success, errno value on failure. */ static int -priv_fdir_filter_update(struct priv *priv, +mlx5_fdir_filter_update(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { int ret; - ret = priv_fdir_filter_delete(priv, fdir_filter); + ret = mlx5_fdir_filter_delete(dev, fdir_filter); if (ret) return ret; - ret = priv_fdir_filter_add(priv, fdir_filter); + ret = mlx5_fdir_filter_add(dev, fdir_filter); return ret; } /** * Flush all filters. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. */ static void -priv_fdir_filter_flush(struct priv *priv) +mlx5_fdir_filter_flush(struct rte_eth_dev *dev) { - priv_flow_flush(priv, &priv->flows); + struct priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->flows); } /** * Get flow director information. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] fdir_info * Resulting flow director information. */ static void -priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) +mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) { + struct priv *priv = dev->data->dev_private; struct rte_eth_fdir_masks *mask = &priv->dev->data->dev_conf.fdir_conf.mask; @@ -2941,8 +2932,8 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) /** * Deal with flow director operations. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param filter_op * Operation to perform. * @param arg @@ -2952,8 +2943,10 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) * 0 on success, errno value on failure. */ static int -priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) +mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, + void *arg) { + struct priv *priv = dev->data->dev_private; enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode; int ret = 0; @@ -2963,27 +2956,27 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) if (fdir_mode != RTE_FDIR_MODE_PERFECT && fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { ERROR("%p: flow director mode %d not supported", - (void *)priv, fdir_mode); + (void *)dev, fdir_mode); return EINVAL; } switch (filter_op) { case RTE_ETH_FILTER_ADD: - ret = priv_fdir_filter_add(priv, arg); + ret = mlx5_fdir_filter_add(dev, arg); break; case RTE_ETH_FILTER_UPDATE: - ret = priv_fdir_filter_update(priv, arg); + ret = mlx5_fdir_filter_update(dev, arg); break; case RTE_ETH_FILTER_DELETE: - ret = priv_fdir_filter_delete(priv, arg); + ret = mlx5_fdir_filter_delete(dev, arg); break; case RTE_ETH_FILTER_FLUSH: - priv_fdir_filter_flush(priv); + mlx5_fdir_filter_flush(dev); break; case RTE_ETH_FILTER_INFO: - priv_fdir_info_get(priv, arg); + mlx5_fdir_info_get(dev, arg); break; default: - DEBUG("%p: unknown operation %u", (void *)priv, + DEBUG("%p: unknown operation %u", (void *)dev, filter_op); ret = EINVAL; break; @@ -3013,7 +3006,6 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, void *arg) { int ret = EINVAL; - struct priv *priv = dev->data->dev_private; switch (filter_type) { case RTE_ETH_FILTER_GENERIC: @@ -3022,7 +3014,7 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - ret = priv_fdir_ctrl_func(priv, filter_op, arg); + ret = mlx5_fdir_ctrl_func(dev, filter_op, arg); break; default: ERROR("%p: filter type (%d) not supported", diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index a529dfeac..91c977bc5 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -35,8 +35,8 @@ /** * Get MAC address by querying netdevice. * - * @param[in] priv - * struct priv for the requested device. + * @param[in] dev + * Pointer to Ethernet device. * @param[out] mac * MAC address output buffer. * @@ -44,11 +44,11 @@ * 0 on success, -1 on failure and errno is set. */ int -priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]) +mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]) { struct ifreq request; - if (priv_ifreq(priv, SIOCGIFHWADDR, &request)) + if (mlx5_ifreq(dev, SIOCGIFHWADDR, &request)) return -1; memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); return 0; diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 3b7b6d140..fe60dd132 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -83,10 +83,6 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, * Register a Memory Region (MR) <-> Memory Pool (MP) association in * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. * - * This function should only be called by txq_mp2mr(). - * - * @param priv - * Pointer to private structure. * @param txq * Pointer to TX queue structure. * @param[in] mp @@ -98,29 +94,35 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, * mr on success, NULL on failure. */ struct mlx5_mr * -priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, - struct rte_mempool *mp, unsigned int idx) +mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, + unsigned int idx) { struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + struct rte_eth_dev *dev; struct mlx5_mr *mr; + rte_spinlock_lock(&txq_ctrl->priv->mr_lock); /* Add a new entry, register MR first. */ DEBUG("%p: discovered new memory pool \"%s\" (%p)", (void *)txq_ctrl, mp->name, (void *)mp); - mr = priv_mr_get(priv, mp); + dev = txq_ctrl->priv->dev; + mr = mlx5_mr_get(dev, mp); if (mr == NULL) { if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - DEBUG("Using unregistered mempool 0x%p(%s) in secondary process," - " please create mempool before rte_eth_dev_start()", + DEBUG("Using unregistered mempool 0x%p(%s) in " + "secondary process, please create mempool before " + " rte_eth_dev_start()", (void *)mp, mp->name); + rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); return NULL; } - mr = priv_mr_new(priv, mp); + mr = mlx5_mr_new(dev, mp); } if (unlikely(mr == NULL)) { DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", (void *)txq_ctrl); + rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); return NULL; } if (unlikely(idx == RTE_DIM(txq->mp2mr))) { @@ -128,7 +130,7 @@ priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, DEBUG("%p: MR <-> MP table full, dropping oldest entry.", (void *)txq_ctrl); --idx; - priv_mr_release(priv, txq->mp2mr[0]); + mlx5_mr_release(txq->mp2mr[0]); memmove(&txq->mp2mr[0], &txq->mp2mr[1], (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); } @@ -137,35 +139,6 @@ priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, (void *)txq_ctrl, mp->name, (void *)mp, txq_ctrl->txq.mp2mr[idx]->lkey); - return mr; -} - -/** - * Register a Memory Region (MR) <-> Memory Pool (MP) association in - * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. - * - * This function should only be called by txq_mp2mr(). - * - * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. - * @param idx - * Index of the next available entry. - * - * @return - * mr on success, NULL on failure. - */ -struct mlx5_mr* -mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, - unsigned int idx) -{ - struct mlx5_txq_ctrl *txq_ctrl = - container_of(txq, struct mlx5_txq_ctrl, txq); - struct mlx5_mr *mr; - - rte_spinlock_lock(&txq_ctrl->priv->mr_lock); - mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx); rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); return mr; } @@ -225,20 +198,20 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || data.ret == -1) return; - mr = priv_mr_get(priv, mp); + mr = mlx5_mr_get(priv->dev, mp); if (mr) { - priv_mr_release(priv, mr); + mlx5_mr_release(mr); return; } - priv_mr_new(priv, mp); + mlx5_mr_new(priv->dev, mp); } /** * Register a new memory region from the mempool and store it in the memory * region list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param mp * Pointer to the memory pool to register. * @@ -246,8 +219,9 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) * The memory region on success. */ struct mlx5_mr * -priv_mr_new(struct priv *priv, struct rte_mempool *mp) +mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp) { + struct priv *priv = dev->data->dev_private; const struct rte_memseg *ms = rte_eal_get_physmem_layout(); uintptr_t start; uintptr_t end; @@ -289,7 +263,7 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) mr->mp = mp; mr->lkey = rte_cpu_to_be_32(mr->mr->lkey); rte_atomic32_inc(&mr->refcnt); - DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv, + DEBUG("%p: new Memory Region %p refcnt: %d", (void *)dev, (void *)mr, rte_atomic32_read(&mr->refcnt)); LIST_INSERT_HEAD(&priv->mr, mr, next); return mr; @@ -298,8 +272,8 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) /** * Search the memory region object in the memory region list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param mp * Pointer to the memory pool to register. * @@ -307,8 +281,9 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) * The memory region on success. */ struct mlx5_mr * -priv_mr_get(struct priv *priv, struct rte_mempool *mp) +mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp) { + struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; assert(mp); @@ -335,7 +310,7 @@ priv_mr_get(struct priv *priv, struct rte_mempool *mp) * 0 on success, errno on failure. */ int -priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr) +mlx5_mr_release(struct mlx5_mr *mr) { assert(mr); DEBUG("Memory Region %p refcnt: %d", @@ -352,20 +327,21 @@ priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr) /** * Verify the flow list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -priv_mr_verify(struct priv *priv) +mlx5_mr_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_mr *mr; LIST_FOREACH(mr, &priv->mr, next) { - DEBUG("%p: mr %p still referenced", (void *)priv, + DEBUG("%p: mr %p still referenced", (void *)dev, (void *)mr); ++ret; } diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index 4b88215fb..a654a5a7d 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -96,8 +96,8 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, /** * Allocate/reallocate RETA index table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @praram reta_size * The size of the array to allocate. * @@ -105,8 +105,9 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, * 0 on success, errno value on failure. */ int -priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) +mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) { + struct priv *priv = dev->data->dev_private; void *mem; unsigned int old_size = priv->reta_idx_n; @@ -127,28 +128,29 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) } /** - * Query RETA table. + * DPDK callback to get the RETA indirection table. * - * @param priv - * Pointer to private structure. - * @param[in, out] reta_conf - * Pointer to the first RETA configuration structure. + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. * @param reta_size - * Number of entries. + * Size of the RETA table. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ -static int -priv_dev_rss_reta_query(struct priv *priv, +int +mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, - unsigned int reta_size) + uint16_t reta_size) { + struct priv *priv = dev->data->dev_private; unsigned int idx; unsigned int i; if (!reta_size || reta_size > priv->reta_idx_n) - return EINVAL; + return -EINVAL; /* Fill each entry of the table even if its bit is not set. */ for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; @@ -159,31 +161,32 @@ priv_dev_rss_reta_query(struct priv *priv, } /** - * Update RETA table. + * DPDK callback to update the RETA indirection table. * - * @param priv - * Pointer to private structure. - * @param[in] reta_conf - * Pointer to the first RETA configuration structure. + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. * @param reta_size - * Number of entries. + * Size of the RETA table. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ -static int -priv_dev_rss_reta_update(struct priv *priv, +int +mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, - unsigned int reta_size) + uint16_t reta_size) { + int ret; + struct priv *priv = dev->data->dev_private; unsigned int idx; unsigned int i; unsigned int pos; - int ret; if (!reta_size) - return EINVAL; - ret = priv_rss_reta_index_resize(priv, reta_size); + return -EINVAL; + ret = mlx5_rss_reta_index_resize(dev, reta_size); if (ret) return ret; for (idx = 0, i = 0; (i != reta_size); ++i) { @@ -194,56 +197,6 @@ priv_dev_rss_reta_update(struct priv *priv, assert(reta_conf[idx].reta[pos] < priv->rxqs_n); (*priv->reta_idx)[i] = reta_conf[idx].reta[pos]; } - return 0; -} - -/** - * DPDK callback to get the RETA indirection table. - * - * @param dev - * Pointer to Ethernet device structure. - * @param reta_conf - * Pointer to RETA configuration structure array. - * @param reta_size - * Size of the RETA table. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) -{ - int ret; - struct priv *priv = dev->data->dev_private; - - ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size); - return -ret; -} - -/** - * DPDK callback to update the RETA indirection table. - * - * @param dev - * Pointer to Ethernet device structure. - * @param reta_conf - * Pointer to RETA configuration structure array. - * @param reta_size - * Size of the RETA table. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) -{ - int ret; - struct priv *priv = dev->data->dev_private; - - ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size); if (dev->data->dev_started) { mlx5_dev_stop(dev); mlx5_dev_start(dev); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index de3335cb9..a3b08a1a3 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -97,7 +97,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq_ctrl->rxq.elts)[i] = buf; } /* If Rx vector is activated. */ - if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { + if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; int j; @@ -156,7 +156,7 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) * Some mbuf in the Ring belongs to the application. They cannot be * freed. */ - if (rxq_check_vec_support(rxq) > 0) { + if (mlx5_rxq_check_vec_support(rxq) > 0) { for (i = 0; i < used; ++i) (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; rxq->rq_pi = rxq->rq_ci; @@ -181,22 +181,23 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { DEBUG("cleaning up %p", (void *)rxq_ctrl); if (rxq_ctrl->ibv) - mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); + mlx5_rxq_ibv_release(rxq_ctrl->ibv); memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); } /** * Returns the per-queue supported offloads. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Supported Rx offloads. */ uint64_t -mlx5_priv_get_rx_queue_offloads(struct priv *priv) +mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_TIMESTAMP | @@ -217,13 +218,11 @@ mlx5_priv_get_rx_queue_offloads(struct priv *priv) /** * Returns the per-port supported offloads. * - * @param priv - * Pointer to private structure. * @return * Supported Rx offloads. */ uint64_t -mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused) +mlx5_get_rx_port_offloads(void) { uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; @@ -233,8 +232,8 @@ mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused) /** * Checks if the per-queue offload configuration is valid. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param offloads * Per-queue offloads configuration. * @@ -242,12 +241,11 @@ mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused) * 1 if the configuration is valid, 0 otherwise. */ static int -priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) +mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) { - uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads; - uint64_t queue_supp_offloads = - mlx5_priv_get_rx_queue_offloads(priv); - uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv); + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; + uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev); + uint64_t port_supp_offloads = mlx5_get_rx_port_offloads(); if ((offloads & (queue_supp_offloads | port_supp_offloads)) != offloads) @@ -299,24 +297,24 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, idx, priv->rxqs_n); return -EOVERFLOW; } - if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) { + if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) { ret = ENOTSUP; ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port " "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, (void *)dev, conf->offloads, dev->data->dev_conf.rxmode.offloads, - (mlx5_priv_get_rx_port_offloads(priv) | - mlx5_priv_get_rx_queue_offloads(priv))); + (mlx5_get_rx_port_offloads() | + mlx5_get_rx_queue_offloads(dev))); goto out; } - if (!mlx5_priv_rxq_releasable(priv, idx)) { + if (!mlx5_rxq_releasable(dev, idx)) { ret = EBUSY; ERROR("%p: unable to release queue index %u", (void *)dev, idx); goto out; } - mlx5_priv_rxq_release(priv, idx); - rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp); + mlx5_rxq_release(dev, idx); + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); if (!rxq_ctrl) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); @@ -347,24 +345,25 @@ mlx5_rx_queue_release(void *dpdk_rxq) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; - if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx)) + if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx)) rte_panic("Rx queue %p is still used by a flow and cannot be" " removed\n", (void *)rxq_ctrl); - mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx); + mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx); } /** * Allocate queue vector and fill epoll fd list for Rx interrupts. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * 0 on success, negative on failure. */ int -priv_rx_intr_vec_enable(struct priv *priv) +mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); @@ -373,7 +372,7 @@ priv_rx_intr_vec_enable(struct priv *priv) if (!priv->dev->data->dev_conf.intr_conf.rxq) return 0; - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); if (intr_handle->intr_vec == NULL) { ERROR("failed to allocate memory for interrupt vector," @@ -383,7 +382,7 @@ priv_rx_intr_vec_enable(struct priv *priv) intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { /* This rxq ibv must not be released in this function. */ - struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i); + struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i); int fd; int flags; int rc; @@ -400,7 +399,7 @@ priv_rx_intr_vec_enable(struct priv *priv) ERROR("too many Rx queues for interrupt vector size" " (%d), Rx interrupts cannot be enabled", RTE_MAX_RXTX_INTR_VEC_ID); - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); return -1; } fd = rxq_ibv->channel->fd; @@ -409,7 +408,7 @@ priv_rx_intr_vec_enable(struct priv *priv) if (rc < 0) { ERROR("failed to make Rx interrupt file descriptor" " %d non-blocking for queue index %d", fd, i); - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); return -1; } intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; @@ -417,7 +416,7 @@ priv_rx_intr_vec_enable(struct priv *priv) count++; } if (!count) - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); else intr_handle->nb_efd = count; return 0; @@ -426,12 +425,13 @@ priv_rx_intr_vec_enable(struct priv *priv) /** * Clean up Rx interrupts handler. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_rx_intr_vec_disable(struct priv *priv) +mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_intr_handle *intr_handle = priv->dev->intr_handle; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; @@ -454,7 +454,7 @@ priv_rx_intr_vec_disable(struct priv *priv) */ rxq_data = (*priv->rxqs)[i]; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv); + mlx5_rxq_ibv_release(rxq_ctrl->ibv); } free: rte_intr_free_epoll_fd(intr_handle); @@ -516,13 +516,13 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (rxq_ctrl->irq) { struct mlx5_rxq_ibv *rxq_ibv; - rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { ret = EINVAL; goto exit; } mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); - mlx5_priv_rxq_ibv_release(priv, rxq_ibv); + mlx5_rxq_ibv_release(rxq_ibv); } exit: if (ret) @@ -560,7 +560,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (!rxq_ctrl->irq) goto exit; - rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { ret = EINVAL; goto exit; @@ -574,7 +574,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); exit: if (rxq_ibv) - mlx5_priv_rxq_ibv_release(priv, rxq_ibv); + mlx5_rxq_ibv_release(rxq_ibv); if (ret) WARN("unable to disable interrupt on rx queue %d", rx_queue_id); @@ -584,8 +584,8 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) /** * Create the Rx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * @@ -593,8 +593,9 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) * The Verbs object initialised if it can be created. */ struct mlx5_rxq_ibv * -mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) +mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); @@ -629,9 +630,9 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) } tmpl->rxq_ctrl = rxq_ctrl; /* Use the entire RX mempool as the memory region. */ - tmpl->mr = priv_mr_get(priv, rxq_data->mp); + tmpl->mr = mlx5_mr_get(dev, rxq_data->mp); if (!tmpl->mr) { - tmpl->mr = priv_mr_new(priv, rxq_data->mp); + tmpl->mr = mlx5_mr_new(dev, rxq_data->mp); if (!tmpl->mr) { ERROR("%p: MR creation failure", (void *)rxq_ctrl); goto error; @@ -661,7 +662,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) * For vectorized Rx, it must not be doubled in order to * make cq_ci and rq_ci aligned. */ - if (rxq_check_vec_support(rxq_data) < 0) + if (mlx5_rxq_check_vec_support(rxq_data) < 0) attr.cq.ibv.cqe *= 2; } else if (config->cqe_comp && rxq_data->hw_timestamp) { DEBUG("Rx CQE compression is disabled for HW timestamp"); @@ -781,7 +782,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci); DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)dev, (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; @@ -794,7 +795,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) if (tmpl->channel) claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel)); if (tmpl->mr) - priv_mr_release(priv, tmpl->mr); + mlx5_mr_release(tmpl->mr); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return NULL; } @@ -802,8 +803,8 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) /** * Get an Rx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * @@ -811,8 +812,9 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) * The Verbs object if it exists. */ struct mlx5_rxq_ibv * -mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) +mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl; @@ -822,9 +824,9 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) return NULL; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->ibv) { - priv_mr_get(priv, rxq_data->mp); + mlx5_mr_get(dev, rxq_data->mp); rte_atomic32_inc(&rxq_ctrl->ibv->refcnt); - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)dev, (void *)rxq_ctrl->ibv, rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); } @@ -834,8 +836,6 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) /** * Release an Rx verbs queue object. * - * @param priv - * Pointer to private structure. * @param rxq_ibv * Verbs Rx queue object. * @@ -843,7 +843,7 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) * 0 on success, errno value on failure. */ int -mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) { int ret; @@ -851,10 +851,10 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) assert(rxq_ibv->wq); assert(rxq_ibv->cq); assert(rxq_ibv->mr); - ret = priv_mr_release(priv, rxq_ibv->mr); + ret = mlx5_mr_release(rxq_ibv->mr); if (!ret) rxq_ibv->mr = NULL; - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, + DEBUG("Verbs Rx queue %p: refcnt %d", (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { rxq_free_elts(rxq_ibv->rxq_ctrl); @@ -873,20 +873,21 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) /** * Verify the Verbs Rx queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_rxq_ibv_verify(struct priv *priv) +mlx5_rxq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_rxq_ibv *rxq_ibv; LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { - DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv, + DEBUG("%p: Verbs Rx queue %p still referenced", (void *)dev, (void *)rxq_ibv); ++ret; } @@ -896,14 +897,11 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv) /** * Return true if a single reference exists on the object. * - * @param priv - * Pointer to private structure. * @param rxq_ibv * Verbs Rx queue object. */ int -mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, - struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv) { assert(rxq_ibv); return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); @@ -912,8 +910,8 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, /** * Create a DPDK Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @param desc @@ -925,11 +923,11 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, * A DPDK queue object on success. */ struct mlx5_rxq_ctrl * -mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, - unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) +mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) { - struct rte_eth_dev *dev = priv->dev; + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; unsigned int mb_len = rte_pktmbuf_data_room_size(mp); struct mlx5_dev_config *config = &priv->config; @@ -1029,7 +1027,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, tmpl->rxq.elts = (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; @@ -1041,8 +1039,8 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, /** * Get a Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -1050,17 +1048,18 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, * A pointer to the queue if it exists. */ struct mlx5_rxq_ctrl * -mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) +mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl = NULL; if ((*priv->rxqs)[idx]) { rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - mlx5_priv_rxq_ibv_get(priv, idx); + mlx5_rxq_ibv_get(dev, idx); rte_atomic32_inc(&rxq_ctrl->refcnt); - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); } return rxq_ctrl; @@ -1069,8 +1068,8 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) /** * Release a Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -1078,8 +1077,9 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) * 0 on success, errno value on failure. */ int -mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) +mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; if (!(*priv->rxqs)[idx]) @@ -1089,11 +1089,11 @@ mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) if (rxq_ctrl->ibv) { int ret; - ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); + ret = mlx5_rxq_ibv_release(rxq_ctrl->ibv); if (!ret) rxq_ctrl->ibv = NULL; } - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { LIST_REMOVE(rxq_ctrl, next); @@ -1107,8 +1107,8 @@ mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) /** * Verify if the queue can be released. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -1116,8 +1116,9 @@ mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) * 1 if the queue can be released. */ int -mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) +mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; if (!(*priv->rxqs)[idx]) @@ -1129,20 +1130,21 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_rxq_verify(struct priv *priv) +mlx5_rxq_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; int ret = 0; LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { - DEBUG("%p: Rx Queue %p still referenced", (void *)priv, + DEBUG("%p: Rx Queue %p still referenced", (void *)dev, (void *)rxq_ctrl); ++ret; } @@ -1152,8 +1154,8 @@ mlx5_priv_rxq_verify(struct priv *priv) /** * Create an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param queues * Queues entering in the indirection table. * @param queues_n @@ -1163,9 +1165,10 @@ mlx5_priv_rxq_verify(struct priv *priv) * A new indirection table. */ struct mlx5_ind_table_ibv * -mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], - uint16_t queues_n) +mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[], + uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; const unsigned int wq_n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : @@ -1179,8 +1182,7 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], if (!ind_tbl) return NULL; for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = - mlx5_priv_rxq_get(priv, queues[i]); + struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); if (!rxq) goto error; @@ -1202,20 +1204,20 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], goto error; rte_atomic32_inc(&ind_tbl->refcnt); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, + DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); return ind_tbl; error: rte_free(ind_tbl); - DEBUG("%p cannot create indirection table", (void *)priv); + DEBUG("%p cannot create indirection table", (void *)dev); return NULL; } /** * Get an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param queues * Queues entering in the indirection table. * @param queues_n @@ -1225,9 +1227,10 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], * An indirection table if found. */ struct mlx5_ind_table_ibv * -mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], - uint16_t queues_n) +mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[], + uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { @@ -1241,10 +1244,10 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], unsigned int i; rte_atomic32_inc(&ind_tbl->refcnt); - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, + DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); for (i = 0; i != ind_tbl->queues_n; ++i) - mlx5_priv_rxq_get(priv, ind_tbl->queues[i]); + mlx5_rxq_get(dev, ind_tbl->queues[i]); } return ind_tbl; } @@ -1252,8 +1255,8 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], /** * Release an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param ind_table * Indirection table to release. * @@ -1261,18 +1264,18 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], * 0 on success, errno value on failure. */ int -mlx5_priv_ind_table_ibv_release(struct priv *priv, - struct mlx5_ind_table_ibv *ind_tbl) +mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_ibv *ind_tbl) { unsigned int i; - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, + DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) claim_zero(mlx5_glue->destroy_rwq_ind_table (ind_tbl->ind_table)); for (i = 0; i != ind_tbl->queues_n; ++i) - claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i])); + claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); if (!rte_atomic32_read(&ind_tbl->refcnt)) { LIST_REMOVE(ind_tbl, next); rte_free(ind_tbl); @@ -1284,21 +1287,22 @@ mlx5_priv_ind_table_ibv_release(struct priv *priv, /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_ind_table_ibv_verify(struct priv *priv) +mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; int ret = 0; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { DEBUG("%p: Verbs indirection table %p still referenced", - (void *)priv, (void *)ind_tbl); + (void *)dev, (void *)ind_tbl); ++ret; } return ret; @@ -1307,8 +1311,8 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) /** * Create an Rx Hash queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param rss_key * RSS key for the Rx hash queue. * @param rss_key_len @@ -1325,17 +1329,18 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) * An hash Rx queue on success. */ struct mlx5_hrxq * -mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, - uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, + uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; queues_n = hash_fields ? queues_n : 1; - ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); + ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) - ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n); + ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n); if (!ind_tbl) return NULL; qp = mlx5_glue->create_qp_ex @@ -1367,11 +1372,11 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, memcpy(hrxq->rss_key, rss_key, rss_key_len); rte_atomic32_inc(&hrxq->refcnt); LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev, (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); return hrxq; error: - mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + mlx5_ind_table_ibv_release(dev, ind_tbl); if (qp) claim_zero(mlx5_glue->destroy_qp(qp)); return NULL; @@ -1380,8 +1385,8 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, /** * Get an Rx Hash queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param rss_conf * RSS configuration for the Rx hash queue. * @param queues @@ -1394,9 +1399,10 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, * An hash Rx queue on success. */ struct mlx5_hrxq * -mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, - uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, + uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; queues_n = hash_fields ? queues_n : 1; @@ -1409,15 +1415,15 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, continue; if (hrxq->hash_fields != hash_fields) continue; - ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); + ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) continue; if (ind_tbl != hrxq->ind_table) { - mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + mlx5_ind_table_ibv_release(dev, ind_tbl); continue; } rte_atomic32_inc(&hrxq->refcnt); - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev, (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); return hrxq; } @@ -1427,8 +1433,8 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, /** * Release the hash Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param hrxq * Pointer to Hash Rx queue to release. * @@ -1436,39 +1442,40 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, * 0 on success, errno value on failure. */ int -mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq) +mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) { - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev, (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table); + mlx5_ind_table_ibv_release(dev, hrxq->ind_table); LIST_REMOVE(hrxq, next); rte_free(hrxq); return 0; } - claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table)); + claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table)); return EBUSY; } /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_hrxq_ibv_verify(struct priv *priv) +mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; int ret = 0; LIST_FOREACH(hrxq, &priv->hrxqs, next) { DEBUG("%p: Verbs Hash Rx queue %p still referenced", - (void *)priv, (void *)hrxq); + (void *)dev, (void *)hrxq); ++ret; } return ret; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 93d794ede..11dd1b84e 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1962,27 +1962,25 @@ mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, } int __attribute__((weak)) -priv_check_raw_vec_tx_support(struct priv *priv __rte_unused, - struct rte_eth_dev *dev __rte_unused) +mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused) { return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_tx_support(struct priv *priv __rte_unused, - struct rte_eth_dev *dev __rte_unused) +mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused) { return -ENOTSUP; } int __attribute__((weak)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) +mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) { return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_rx_support(struct priv *priv __rte_unused) +mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) { return -ENOTSUP; } diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index d0ec9a214..17a6072e2 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -215,67 +215,64 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, struct rte_mempool *mp); void mlx5_rx_queue_release(void *dpdk_rxq); -int priv_rx_intr_vec_enable(struct priv *priv); -void priv_rx_intr_vec_disable(struct priv *priv); +int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev); +void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev); int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx); -int mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv); -int mlx5_priv_rxq_ibv_releasable(struct priv *priv, - struct mlx5_rxq_ibv *rxq_ibv); -int mlx5_priv_rxq_ibv_verify(struct priv *priv); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, - uint16_t desc, - unsigned int socket, - const struct rte_eth_rxconf *conf, - struct rte_mempool *mp); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *priv, uint16_t idx); -int mlx5_priv_rxq_release(struct priv *priv, uint16_t idx); -int mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx); -int mlx5_priv_rxq_verify(struct priv *priv); +struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx); +struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_verify(struct rte_eth_dev *dev); int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *priv, - uint16_t queues[], - uint16_t queues_n); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *priv, - uint16_t queues[], - uint16_t queues_n); -int mlx5_priv_ind_table_ibv_release(struct priv *priv, - struct mlx5_ind_table_ibv *ind_tbl); -int mlx5_priv_ind_table_ibv_verify(struct priv *priv); -struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, - uint8_t rss_key_len, uint64_t hash_fields, - uint16_t queues[], uint16_t queues_n); -struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, - uint8_t rss_key_len, uint64_t hash_fields, - uint16_t queues[], uint16_t queues_n); -int mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq); -int mlx5_priv_hrxq_ibv_verify(struct priv *priv); -uint64_t mlx5_priv_get_rx_port_offloads(struct priv *priv); -uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *priv); +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, + uint16_t queues[], + uint16_t queues_n); +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, + uint16_t queues[], + uint16_t queues_n); +int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_ibv *ind_tbl); +int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, + uint8_t rss_key_len, uint64_t hash_fields, + uint16_t queues[], uint16_t queues_n); +struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, + uint8_t rss_key_len, uint64_t hash_fields, + uint16_t queues[], uint16_t queues_n); +int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq); +int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev); +uint64_t mlx5_get_rx_port_offloads(void); +uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); /* mlx5_txq.c */ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf); void mlx5_tx_queue_release(void *dpdk_txq); -int priv_tx_uar_remap(struct priv *priv, int fd); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx); -int mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv); -int mlx5_priv_txq_ibv_releasable(struct priv *priv, - struct mlx5_txq_ibv *txq_ibv); -int mlx5_priv_txq_ibv_verify(struct priv *priv); -struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *priv, uint16_t idx, - uint16_t desc, unsigned int socket, - const struct rte_eth_txconf *conf); -struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *priv, uint16_t idx); -int mlx5_priv_txq_release(struct priv *priv, uint16_t idx); -int mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx); -int mlx5_priv_txq_verify(struct priv *priv); +int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd); +struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx); +struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv); +int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv); +int mlx5_txq_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf); +struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_verify(struct rte_eth_dev *dev); void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); -uint64_t mlx5_priv_get_tx_port_offloads(struct priv *priv); +uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); /* mlx5_rxtx.c */ @@ -299,26 +296,22 @@ int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset); /* Vectorized version of mlx5_rxtx.c */ - -int priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev); -int priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev); -int rxq_check_vec_support(struct mlx5_rxq_data *rxq); -int priv_check_vec_rx_support(struct priv *priv); +int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev); +int mlx5_check_vec_tx_support(struct rte_eth_dev *dev); +int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data); +int mlx5_check_vec_rx_support(struct rte_eth_dev *dev); uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n); uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n); -uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, +uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n); /* mlx5_mr.c */ void mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg); -struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, - struct rte_mempool *mp, unsigned int idx); struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, - struct rte_mempool *mp, - unsigned int idx); + struct rte_mempool *mp, unsigned int idx); #ifndef NDEBUG /** diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index b66c2916f..257d7b11c 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -223,17 +223,14 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) /** * Check Tx queue flags are set for raw vectorized Tx. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_raw_vec_tx_support(__rte_unused struct priv *priv, - struct rte_eth_dev *dev) +mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev) { uint64_t offloads = dev->data->dev_conf.txmode.offloads; @@ -246,17 +243,16 @@ priv_check_raw_vec_tx_support(__rte_unused struct priv *priv, /** * Check a device can support vectorized TX. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +mlx5_check_vec_tx_support(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint64_t offloads = dev->data->dev_conf.txmode.offloads; if (!priv->config.tx_vec_en || @@ -277,7 +273,7 @@ priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq) +mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) { struct mlx5_rxq_ctrl *ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); @@ -290,15 +286,16 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq) /** * Check a device can support vectorized RX. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_vec_rx_support(struct priv *priv) +mlx5_check_vec_rx_support(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint16_t i; if (!priv->config.rx_vec_en) @@ -309,7 +306,7 @@ priv_check_vec_rx_support(struct priv *priv) if (!rxq) continue; - if (rxq_check_vec_support(rxq) < 0) + if (mlx5_rxq_check_vec_support(rxq) < 0) break; } if (i != priv->rxqs_n) diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c index 61c1a4a50..b8f610df3 100644 --- a/drivers/net/mlx5/mlx5_socket.c +++ b/drivers/net/mlx5/mlx5_socket.c @@ -18,15 +18,16 @@ /** * Initialise the socket to communicate with the secondary process * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return * 0 on success, errno value on failure. */ int -priv_socket_init(struct priv *priv) +mlx5_socket_init(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct sockaddr_un sun = { .sun_family = AF_UNIX, }; @@ -79,15 +80,17 @@ priv_socket_init(struct priv *priv) /** * Un-Initialise the socket to communicate with the secondary process * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return * 0 on success, errno value on failure. */ int -priv_socket_uninit(struct priv *priv) +mlx5_socket_uninit(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; @@ -98,12 +101,13 @@ priv_socket_uninit(struct priv *priv) /** * Handle socket interrupts. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_socket_handle(struct priv *priv) +mlx5_socket_handle(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int conn_sock; int ret = 0; struct cmsghdr *cmsg = NULL; @@ -179,15 +183,16 @@ priv_socket_handle(struct priv *priv) /** * Connect to the primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet structure. * * @return * fd on success, negative errno value on failure. */ int -priv_socket_connect(struct priv *priv) +mlx5_socket_connect(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct sockaddr_un sun = { .sun_family = AF_UNIX, }; diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 39be1865a..0febed878 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -122,8 +122,8 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); /** * Read device counters table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] stats * Counters table output buffer. * @@ -131,8 +131,9 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); * 0 on success and stats is filled, negative on error. */ static int -priv_read_dev_counters(struct priv *priv, uint64_t *stats) +mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) { + struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; unsigned int i; struct ifreq ifr; @@ -143,7 +144,7 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats) et_stats->cmd = ETHTOOL_GSTATS; et_stats->n_stats = xstats_ctrl->stats_n; ifr.ifr_data = (caddr_t)et_stats; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { WARN("unable to read statistic values from device"); return -1; } @@ -173,20 +174,20 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats) /** * Query the number of statistics provided by ETHTOOL. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Number of statistics on success, -1 on error. */ static int -priv_ethtool_get_stats_n(struct priv *priv) { +mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) { struct ethtool_drvinfo drvinfo; struct ifreq ifr; drvinfo.cmd = ETHTOOL_GDRVINFO; ifr.ifr_data = (caddr_t)&drvinfo; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { WARN("unable to query number of statistics"); return -1; } @@ -196,12 +197,13 @@ priv_ethtool_get_stats_n(struct priv *priv) { /** * Init the structures to read device counters. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_xstats_init(struct priv *priv) +mlx5_xstats_init(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; unsigned int i; unsigned int j; @@ -210,7 +212,7 @@ priv_xstats_init(struct priv *priv) unsigned int dev_stats_n; unsigned int str_sz; - dev_stats_n = priv_ethtool_get_stats_n(priv); + dev_stats_n = mlx5_ethtool_get_stats_n(dev); if (dev_stats_n < 1) { WARN("no extended statistics available"); return; @@ -229,7 +231,7 @@ priv_xstats_init(struct priv *priv) strings->string_set = ETH_SS_STATS; strings->len = dev_stats_n; ifr.ifr_data = (caddr_t)strings; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { WARN("unable to get statistic names"); goto free; } @@ -258,61 +260,55 @@ priv_xstats_init(struct priv *priv) } /* Copy to base at first time. */ assert(xstats_n <= MLX5_MAX_XSTATS); - priv_read_dev_counters(priv, xstats_ctrl->base); + mlx5_read_dev_counters(dev, xstats_ctrl->base); free: rte_free(strings); } /** - * Get device extended statistics. + * DPDK callback to get extended device statistics. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] stats * Pointer to rte extended stats table. + * @param n + * The size of the stats table. * * @return * Number of extended stats on success and stats is filled, * negative on error. */ -static int -priv_xstats_get(struct priv *priv, struct rte_eth_xstat *stats) +int +mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n) { - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + struct priv *priv = dev->data->dev_private; unsigned int i; - unsigned int n = xstats_n; uint64_t counters[n]; + int ret = 0; - if (priv_read_dev_counters(priv, counters) < 0) - return -1; - for (i = 0; i != xstats_n; ++i) { - stats[i].id = i; - stats[i].value = (counters[i] - xstats_ctrl->base[i]); + if (n >= xstats_n && stats) { + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + int stats_n; + + stats_n = mlx5_ethtool_get_stats_n(dev); + if (stats_n < 0) + return -1; + if (xstats_ctrl->stats_n != stats_n) + mlx5_xstats_init(dev); + ret = mlx5_read_dev_counters(dev, counters); + if (ret) + return ret; + for (i = 0; i != xstats_n; ++i) { + stats[i].id = i; + stats[i].value = (counters[i] - xstats_ctrl->base[i]); + } } return n; } /** - * Reset device extended statistics. - * - * @param priv - * Pointer to private structure. - */ -static void -priv_xstats_reset(struct priv *priv) -{ - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - unsigned int i; - unsigned int n = xstats_n; - uint64_t counters[n]; - - if (priv_read_dev_counters(priv, counters) < 0) - return; - for (i = 0; i != n; ++i) - xstats_ctrl->base[i] = counters[i]; -} - -/** * DPDK callback to get device statistics. * * @param dev @@ -409,41 +405,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev) } /** - * DPDK callback to get extended device statistics. - * - * @param dev - * Pointer to Ethernet device structure. - * @param[out] stats - * Stats table output buffer. - * @param n - * The size of the stats table. - * - * @return - * Number of xstats on success, negative on failure. - */ -int -mlx5_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstat *stats, unsigned int n) -{ - struct priv *priv = dev->data->dev_private; - int ret = xstats_n; - - if (n >= xstats_n && stats) { - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - int stats_n; - - stats_n = priv_ethtool_get_stats_n(priv); - if (stats_n < 0) { - return -1; - } - if (xstats_ctrl->stats_n != stats_n) - priv_xstats_init(priv); - ret = priv_xstats_get(priv, stats); - } - return ret; -} - -/** * DPDK callback to clear device extended statistics. * * @param dev @@ -455,13 +416,19 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; + unsigned int i; + unsigned int n = xstats_n; + uint64_t counters[n]; - stats_n = priv_ethtool_get_stats_n(priv); + stats_n = mlx5_ethtool_get_stats_n(dev); if (stats_n < 0) return; if (xstats_ctrl->stats_n != stats_n) - priv_xstats_init(priv); - priv_xstats_reset(priv); + mlx5_xstats_init(dev); + if (mlx5_read_dev_counters(dev, counters) < 0) + return; + for (i = 0; i != n; ++i) + xstats_ctrl->base[i] = counters[i]; } /** diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 3ce93910d..07226b864 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -21,12 +21,13 @@ * Pointer to Ethernet device structure. */ static void -priv_txq_stop(struct priv *priv) +mlx5_txq_stop(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->txqs_n; ++i) - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(dev, i); } /** @@ -39,8 +40,9 @@ priv_txq_stop(struct priv *priv) * 0 on success, errno on error. */ static int -priv_txq_start(struct priv *priv) +mlx5_txq_start(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; int ret = 0; @@ -48,28 +50,28 @@ priv_txq_start(struct priv *priv) for (i = 0; i != priv->txqs_n; ++i) { unsigned int idx = 0; struct mlx5_mr *mr; - struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i); + struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); if (!txq_ctrl) continue; LIST_FOREACH(mr, &priv->mr, next) { - priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++); + mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mr->mp, idx++); if (idx == MLX5_PMD_TX_MP_CACHE) break; } txq_alloc_elts(txq_ctrl); - txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i); + txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i); if (!txq_ctrl->ibv) { ret = ENOMEM; goto error; } } - ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd); + ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd); if (ret) goto error; return ret; error: - priv_txq_stop(priv); + mlx5_txq_stop(dev); return ret; } @@ -80,12 +82,13 @@ priv_txq_start(struct priv *priv) * Pointer to Ethernet device structure. */ static void -priv_rxq_stop(struct priv *priv) +mlx5_rxq_stop(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) - mlx5_priv_rxq_release(priv, i); + mlx5_rxq_release(dev, i); } /** @@ -98,20 +101,21 @@ priv_rxq_stop(struct priv *priv) * 0 on success, errno on error. */ static int -priv_rxq_start(struct priv *priv) +mlx5_rxq_start(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; int ret = 0; for (i = 0; i != priv->rxqs_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); if (!rxq_ctrl) continue; ret = rxq_alloc_elts(rxq_ctrl); if (ret) goto error; - rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i); + rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i); if (!rxq_ctrl->ibv) { ret = ENOMEM; goto error; @@ -119,7 +123,7 @@ priv_rxq_start(struct priv *priv) } return -ret; error: - priv_rxq_stop(priv); + mlx5_rxq_stop(dev); return -ret; } @@ -142,7 +146,7 @@ mlx5_dev_start(struct rte_eth_dev *dev) int err; dev->data->dev_started = 1; - err = priv_flow_create_drop_queue(priv); + err = mlx5_flow_create_drop_queue(dev); if (err) { ERROR("%p: Drop queue allocation failed: %s", (void *)dev, strerror(err)); @@ -150,46 +154,46 @@ mlx5_dev_start(struct rte_eth_dev *dev) } DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); rte_mempool_walk(mlx5_mp2mr_iter, priv); - err = priv_txq_start(priv); + err = mlx5_txq_start(dev); if (err) { ERROR("%p: TXQ allocation failed: %s", (void *)dev, strerror(err)); goto error; } - err = priv_rxq_start(priv); + err = mlx5_rxq_start(dev); if (err) { ERROR("%p: RXQ allocation failed: %s", (void *)dev, strerror(err)); goto error; } - err = priv_rx_intr_vec_enable(priv); + err = mlx5_rx_intr_vec_enable(dev); if (err) { ERROR("%p: RX interrupt vector creation failed", (void *)priv); goto error; } - priv_xstats_init(priv); + mlx5_xstats_init(dev); /* Update link status and Tx/Rx callbacks for the first time. */ memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); INFO("Forcing port %u link to be up", dev->data->port_id); - err = priv_force_link_status_change(priv, ETH_LINK_UP); + err = mlx5_force_link_status_change(dev, ETH_LINK_UP); if (err) { DEBUG("Failed to set port %u link to be up", dev->data->port_id); goto error; } - priv_dev_interrupt_handler_install(priv, dev); + mlx5_dev_interrupt_handler_install(dev); return 0; error: /* Rollback. */ dev->data->dev_started = 0; for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) - priv_mr_release(priv, mr); - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); - priv_txq_stop(priv); - priv_rxq_stop(priv); - priv_flow_delete_drop_queue(priv); + mlx5_mr_release(mr); + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); + mlx5_flow_delete_drop_queue(dev); return err; } @@ -214,21 +218,21 @@ mlx5_dev_stop(struct rte_eth_dev *dev) rte_wmb(); usleep(1000 * priv->rxqs_n); DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev); - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); - priv_rx_intr_vec_disable(priv); - priv_dev_interrupt_handler_uninstall(priv, dev); - priv_txq_stop(priv); - priv_rxq_stop(priv); + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); + mlx5_rx_intr_vec_disable(dev); + mlx5_dev_interrupt_handler_uninstall(dev); + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) - priv_mr_release(priv, mr); - priv_flow_delete_drop_queue(priv); + mlx5_mr_release(mr); + mlx5_flow_delete_drop_queue(dev); } /** * Enable traffic flows configured by control plane * - * @param priv + * @param dev * Pointer to Ethernet device private data. * @param dev * Pointer to Ethernet device structure. @@ -237,8 +241,9 @@ mlx5_dev_stop(struct rte_eth_dev *dev) * 0 on success. */ int -priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) +mlx5_traffic_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_flow_item_eth bcast = { .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", }; @@ -356,40 +361,18 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) /** * Disable traffic flows configured by control plane * - * @param priv - * Pointer to Ethernet device private data. * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success. - */ -int -priv_dev_traffic_disable(struct priv *priv, - struct rte_eth_dev *dev __rte_unused) -{ - priv_flow_flush(priv, &priv->ctrl_flows); - return 0; -} - -/** - * Restart traffic flows configured by control plane - * - * @param priv * Pointer to Ethernet device private data. - * @param dev - * Pointer to Ethernet device structure. * * @return * 0 on success. */ int -priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) +mlx5_traffic_disable(struct rte_eth_dev *dev) { - if (dev->data->dev_started) { - priv_dev_traffic_disable(priv, dev); - priv_dev_traffic_enable(priv, dev); - } + struct priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->ctrl_flows); return 0; } @@ -397,7 +380,7 @@ priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) * Restart traffic flows configured by control plane * * @param dev - * Pointer to Ethernet device structure. + * Pointer to Ethernet device private data. * * @return * 0 on success. @@ -405,8 +388,9 @@ priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) int mlx5_traffic_restart(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - - priv_dev_traffic_restart(priv, dev); + if (dev->data->dev_started) { + mlx5_traffic_disable(dev); + mlx5_traffic_enable(dev); + } return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 47ee95990..54ed972d7 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -91,15 +91,16 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) /** * Returns the per-port supported offloads. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Supported Tx offloads. */ uint64_t -mlx5_priv_get_tx_port_offloads(struct priv *priv) +mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_VLAN_INSERT); struct mlx5_dev_config *config = &priv->config; @@ -123,8 +124,8 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv) /** * Checks if the per-queue offload configuration is valid. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param offloads * Per-queue offloads configuration. * @@ -132,10 +133,10 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv) * 1 if the configuration is valid, 0 otherwise. */ static int -priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) +mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) { - uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads; - uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv); + uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev); /* There are no Tx offloads which are per queue. */ if ((offloads & port_supp_offloads) != offloads) @@ -177,13 +178,13 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, * use the old API. */ if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && - !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) { + !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) { ret = ENOTSUP; ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port " "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, (void *)dev, conf->offloads, dev->data->dev_conf.txmode.offloads, - mlx5_priv_get_tx_port_offloads(priv)); + mlx5_get_tx_port_offloads(dev)); goto out; } if (desc <= MLX5_TX_COMP_THRESH) { @@ -206,14 +207,14 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, idx, priv->txqs_n); return -EOVERFLOW; } - if (!mlx5_priv_txq_releasable(priv, idx)) { + if (!mlx5_txq_releasable(dev, idx)) { ret = EBUSY; ERROR("%p: unable to release queue index %u", (void *)dev, idx); goto out; } - mlx5_priv_txq_release(priv, idx); - txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf); + mlx5_txq_release(dev, idx); + txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf); if (!txq_ctrl) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); @@ -249,7 +250,7 @@ mlx5_tx_queue_release(void *dpdk_txq) if ((*priv->txqs)[i] == txq) { DEBUG("%p: removing TX queue %p from list", (void *)priv->dev, (void *)txq_ctrl); - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(priv->dev, i); break; } } @@ -260,8 +261,8 @@ mlx5_tx_queue_release(void *dpdk_txq) * Both primary and secondary process do mmap to make UAR address * aligned. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param fd * Verbs file descriptor to map UAR pages. * @@ -269,8 +270,9 @@ mlx5_tx_queue_release(void *dpdk_txq) * 0 on success, errno value on failure. */ int -priv_tx_uar_remap(struct priv *priv, int fd) +mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) { + struct priv *priv = dev->data->dev_private; unsigned int i, j; uintptr_t pages[priv->txqs_n]; unsigned int pages_n = 0; @@ -356,8 +358,8 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) /** * Create the Tx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * @@ -365,8 +367,9 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) * The Verbs object initialised if it can be created. */ struct mlx5_txq_ibv * -mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) +mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); @@ -383,7 +386,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) struct mlx5dv_cq cq_info; struct mlx5dv_obj obj; const int desc = 1 << txq_data->elts_n; - eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev); + eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev); int ret = 0; assert(txq_data); @@ -517,7 +520,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) ERROR("Failed to retrieve UAR info, invalid libmlx5.so version"); goto error; } - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev, (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; @@ -534,8 +537,8 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) /** * Get an Tx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * @@ -543,8 +546,9 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) * The Verbs object if it exists. */ struct mlx5_txq_ibv * -mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) +mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq_ctrl; if (idx >= priv->txqs_n) @@ -554,7 +558,7 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); if (txq_ctrl->ibv) { rte_atomic32_inc(&txq_ctrl->ibv->refcnt); - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev, (void *)txq_ctrl->ibv, rte_atomic32_read(&txq_ctrl->ibv->refcnt)); } @@ -564,8 +568,6 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) /** * Release an Tx verbs queue object. * - * @param priv - * Pointer to private structure. * @param txq_ibv * Verbs Tx queue object. * @@ -573,11 +575,10 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) * 0 on success, errno on failure. */ int -mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused, - struct mlx5_txq_ibv *txq_ibv) +mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) { assert(txq_ibv); - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + DEBUG("Verbs Tx queue %p: refcnt %d", (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp)); @@ -592,14 +593,11 @@ mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused, /** * Return true if a single reference exists on the object. * - * @param priv - * Pointer to private structure. * @param txq_ibv * Verbs Tx queue object. */ int -mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused, - struct mlx5_txq_ibv *txq_ibv) +mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv) { assert(txq_ibv); return (rte_atomic32_read(&txq_ibv->refcnt) == 1); @@ -608,20 +606,21 @@ mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused, /** * Verify the Verbs Tx queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_txq_ibv_verify(struct priv *priv) +mlx5_txq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_txq_ibv *txq_ibv; LIST_FOREACH(txq_ibv, &priv->txqsibv, next) { - DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv, + DEBUG("%p: Verbs Tx queue %p still referenced", (void *)dev, (void *)txq_ibv); ++ret; } @@ -645,7 +644,8 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) unsigned int txq_inline; unsigned int txqs_inline; unsigned int inline_max_packet_sz; - eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev); + eth_tx_burst_t tx_pkt_burst = + mlx5_select_tx_function(txq_ctrl->priv->dev); int is_empw_func = is_empw_burst_func(tx_pkt_burst); int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO); @@ -731,8 +731,8 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) /** * Create a DPDK Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @param desc @@ -746,10 +746,10 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) * A DPDK queue object on success. */ struct mlx5_txq_ctrl * -mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, - unsigned int socket, - const struct rte_eth_txconf *conf) +mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *tmpl; tmpl = rte_calloc_socket("TXQ", 1, @@ -773,7 +773,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); tmpl->txq.stats.idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev, (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; @@ -782,8 +782,8 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, /** * Get a Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -791,8 +791,9 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, * A pointer to the queue if it exists. */ struct mlx5_txq_ctrl * -mlx5_priv_txq_get(struct priv *priv, uint16_t idx) +mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *ctrl = NULL; if ((*priv->txqs)[idx]) { @@ -800,15 +801,15 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) txq); unsigned int i; - mlx5_priv_txq_ibv_get(priv, idx); + mlx5_txq_ibv_get(dev, idx); for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { if (ctrl->txq.mp2mr[i]) claim_nonzero - (priv_mr_get(priv, + (mlx5_mr_get(dev, ctrl->txq.mp2mr[i]->mp)); } rte_atomic32_inc(&ctrl->refcnt); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev, (void *)ctrl, rte_atomic32_read(&ctrl->refcnt)); } return ctrl; @@ -817,8 +818,8 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) /** * Release a Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -826,8 +827,9 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) * 0 on success, errno on failure. */ int -mlx5_priv_txq_release(struct priv *priv, uint16_t idx) +mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; unsigned int i; struct mlx5_txq_ctrl *txq; size_t page_size = sysconf(_SC_PAGESIZE); @@ -835,18 +837,18 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx) if (!(*priv->txqs)[idx]) return 0; txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev, (void *)txq, rte_atomic32_read(&txq->refcnt)); if (txq->ibv) { int ret; - ret = mlx5_priv_txq_ibv_release(priv, txq->ibv); + ret = mlx5_txq_ibv_release(txq->ibv); if (!ret) txq->ibv = NULL; } for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { if (txq->txq.mp2mr[i]) { - priv_mr_release(priv, txq->txq.mp2mr[i]); + mlx5_mr_release(txq->txq.mp2mr[i]); txq->txq.mp2mr[i] = NULL; } } @@ -866,8 +868,8 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx) /** * Verify if the queue can be released. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -875,8 +877,9 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx) * 1 if the queue can be released. */ int -mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) +mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; if (!(*priv->txqs)[idx]) @@ -888,20 +891,21 @@ mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) /** * Verify the Tx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_txq_verify(struct priv *priv) +mlx5_txq_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; int ret = 0; LIST_FOREACH(txq, &priv->txqsctrl, next) { - DEBUG("%p: Tx Queue %p still referenced", (void *)priv, + DEBUG("%p: Tx Queue %p still referenced", (void *)dev, (void *)txq); ++ret; } diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 184ae2f4e..3df962a90 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -79,25 +79,26 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) ++priv->vlan_filter_n; } if (dev->data->dev_started) - priv_dev_traffic_restart(priv, dev); + mlx5_traffic_restart(dev); out: return ret; } /** - * Set/reset VLAN stripping for a specific queue. + * Callback to set/reset VLAN stripping for a specific queue. * - * @param priv - * Pointer to private structure. - * @param idx + * @param dev + * Pointer to Ethernet device structure. + * @param queue * RX queue index. * @param on * Enable/disable VLAN stripping. */ -static void -priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) +void +mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); struct ibv_wq_attr mod; @@ -106,8 +107,18 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) 0; int err; + /* Validate hw support */ + if (!priv->config.hw_vlan_strip) { + ERROR("VLAN stripping is not supported"); + return; + } + /* Validate queue number */ + if (queue >= priv->rxqs_n) { + ERROR("VLAN stripping, invalid queue number %d", queue); + return; + } DEBUG("set VLAN offloads 0x%x for port %d queue %d", - vlan_offloads, rxq->port_id, idx); + vlan_offloads, rxq->port_id, queue); if (!rxq_ctrl->ibv) { /* Update related bits in RX queue. */ rxq->vlan_strip = !!on; @@ -121,7 +132,7 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); if (err) { ERROR("%p: failed to modified stripping mode: %s", - (void *)priv, strerror(err)); + (void *)dev, strerror(err)); return; } /* Update related bits in RX queue. */ @@ -129,34 +140,6 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) } /** - * Callback to set/reset VLAN stripping for a specific queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param queue - * RX queue index. - * @param on - * Enable/disable VLAN stripping. - */ -void -mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) -{ - struct priv *priv = dev->data->dev_private; - - /* Validate hw support */ - if (!priv->config.hw_vlan_strip) { - ERROR("VLAN stripping is not supported"); - return; - } - /* Validate queue number */ - if (queue >= priv->rxqs_n) { - ERROR("VLAN stripping, invalid queue number %d", queue); - return; - } - priv_vlan_strip_queue_set(priv, queue, on); -} - -/** * Callback to set/reset VLAN offloads for a port. * * @param dev @@ -180,7 +163,7 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) } /* Run on every RX queue and set/reset VLAN stripping. */ for (i = 0; (i != priv->rxqs_n); i++) - priv_vlan_strip_queue_set(priv, i, hw_vlan_strip); + mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip); } return 0; } -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v2 09/10] net/mlx5: change non failing function return values 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (7 preceding siblings ...) 2018-02-28 15:12 ` [PATCH v2 08/10] net/mlx5: prefix all function with mlx5 Nelio Laranjeiro @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 10/10] net/mlx5: standardize on negative errno values Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh These functions return int although they are not supposed to fail, resulting in unnecessary checks in their callers. Some are returning error where is should be a boolean. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.h | 4 ++-- drivers/net/mlx5/mlx5_mr.c | 4 ++-- drivers/net/mlx5/mlx5_rxq.c | 25 ++++++++++--------------- drivers/net/mlx5/mlx5_socket.c | 6 +----- drivers/net/mlx5/mlx5_trigger.c | 6 +----- drivers/net/mlx5/mlx5_txq.c | 17 ++++++----------- 6 files changed, 22 insertions(+), 40 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 2cb463b62..86310404a 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -242,7 +242,7 @@ int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); int mlx5_dev_start(struct rte_eth_dev *dev); void mlx5_dev_stop(struct rte_eth_dev *dev); int mlx5_traffic_enable(struct rte_eth_dev *dev); -int mlx5_traffic_disable(struct rte_eth_dev *dev); +void mlx5_traffic_disable(struct rte_eth_dev *dev); int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ @@ -287,7 +287,7 @@ void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev); /* mlx5_socket.c */ int mlx5_socket_init(struct rte_eth_dev *priv); -int mlx5_socket_uninit(struct rte_eth_dev *priv); +void mlx5_socket_uninit(struct rte_eth_dev *priv); void mlx5_socket_handle(struct rte_eth_dev *priv); int mlx5_socket_connect(struct rte_eth_dev *priv); diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index fe60dd132..5c4e68736 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -307,7 +307,7 @@ mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp) * Pointer to memory region to release. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_mr_release(struct mlx5_mr *mr) @@ -321,7 +321,7 @@ mlx5_mr_release(struct mlx5_mr *mr) rte_free(mr); return 0; } - return EBUSY; + return 1; } /** diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index a3b08a1a3..8e7693df2 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -840,7 +840,7 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) * Verbs Rx queue object. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) @@ -867,7 +867,7 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) rte_free(rxq_ibv); return 0; } - return EBUSY; + return 1; } /** @@ -1074,7 +1074,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) * TX queue index. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) @@ -1086,13 +1086,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) return 0; rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); assert(rxq_ctrl->priv); - if (rxq_ctrl->ibv) { - int ret; - - ret = mlx5_rxq_ibv_release(rxq_ctrl->ibv); - if (!ret) - rxq_ctrl->ibv = NULL; - } + if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv)) + rxq_ctrl->ibv = NULL; DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { @@ -1101,7 +1096,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) (*priv->rxqs)[idx] = NULL; return 0; } - return EBUSY; + return 1; } /** @@ -1261,7 +1256,7 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[], * Indirection table to release. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, @@ -1281,7 +1276,7 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, rte_free(ind_tbl); return 0; } - return EBUSY; + return 1; } /** @@ -1439,7 +1434,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, * Pointer to Hash Rx queue to release. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) @@ -1454,7 +1449,7 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) return 0; } claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table)); - return EBUSY; + return 1; } /** diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c index b8f610df3..8db25cff1 100644 --- a/drivers/net/mlx5/mlx5_socket.c +++ b/drivers/net/mlx5/mlx5_socket.c @@ -82,11 +82,8 @@ mlx5_socket_init(struct rte_eth_dev *dev) * * @param[in] dev * Pointer to Ethernet device. - * - * @return - * 0 on success, errno value on failure. */ -int +void mlx5_socket_uninit(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; @@ -95,7 +92,6 @@ mlx5_socket_uninit(struct rte_eth_dev *dev) claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; claim_zero(remove(path)); - return 0; } /** diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 07226b864..a3ccebdd6 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -363,17 +363,13 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device private data. - * - * @return - * 0 on success. */ -int +void mlx5_traffic_disable(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->ctrl_flows); - return 0; } /** diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 54ed972d7..dbf743397 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -572,7 +572,7 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) * Verbs Tx queue object. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) @@ -587,7 +587,7 @@ mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) rte_free(txq_ibv); return 0; } - return EBUSY; + return 1; } /** @@ -824,7 +824,7 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) * TX queue index. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) @@ -839,13 +839,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev, (void *)txq, rte_atomic32_read(&txq->refcnt)); - if (txq->ibv) { - int ret; - - ret = mlx5_txq_ibv_release(txq->ibv); - if (!ret) - txq->ibv = NULL; - } + if (txq->ibv && mlx5_txq_ibv_release(txq->ibv)) + txq->ibv = NULL; for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { if (txq->txq.mp2mr[i]) { mlx5_mr_release(txq->txq.mp2mr[i]); @@ -862,7 +857,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) (*priv->txqs)[idx] = NULL; return 0; } - return EBUSY; + return 1; } /** -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v2 10/10] net/mlx5: standardize on negative errno values 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (8 preceding siblings ...) 2018-02-28 15:12 ` [PATCH v2 09/10] net/mlx5: change non failing function return values Nelio Laranjeiro @ 2018-02-28 15:12 ` Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-02-28 15:12 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Set rte_errno systematically as well. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 88 ++++++----- drivers/net/mlx5/mlx5_ethdev.c | 231 ++++++++++++++++------------- drivers/net/mlx5/mlx5_flow.c | 317 +++++++++++++++++++++++----------------- drivers/net/mlx5/mlx5_mac.c | 33 +++-- drivers/net/mlx5/mlx5_mr.c | 15 +- drivers/net/mlx5/mlx5_rss.c | 50 ++++--- drivers/net/mlx5/mlx5_rxmode.c | 28 +++- drivers/net/mlx5/mlx5_rxq.c | 142 ++++++++++-------- drivers/net/mlx5/mlx5_socket.c | 82 +++++++---- drivers/net/mlx5/mlx5_stats.c | 53 +++++-- drivers/net/mlx5/mlx5_trigger.c | 89 ++++++----- drivers/net/mlx5/mlx5_txq.c | 54 ++++--- drivers/net/mlx5/mlx5_vlan.c | 24 +-- 13 files changed, 719 insertions(+), 487 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 768cb9e5a..bc4691cba 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -108,7 +108,7 @@ mlx5_getenv_int(const char *name) * A pointer to the callback data. * * @return - * a pointer to the allocate space. + * Allocated buffer, NULL otherwise and rte_errno is set. */ static void * mlx5_alloc_verbs_buf(size_t size, void *data) @@ -130,6 +130,8 @@ mlx5_alloc_verbs_buf(size_t size, void *data) } assert(data != NULL); ret = rte_malloc_socket(__func__, size, alignment, socket); + if (!ret && size) + rte_errno = ENOMEM; DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret); return ret; } @@ -365,7 +367,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr) * User data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_args_check(const char *key, const char *val, void *opaque) @@ -376,8 +378,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque) errno = 0; tmp = strtoul(val, NULL, 0); if (errno) { + rte_errno = errno; WARN("%s: \"%s\" is not a valid integer", key, val); - return errno; + return -rte_errno; } if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { config->cqe_comp = !!tmp; @@ -397,7 +400,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque) config->rx_vec_en = !!tmp; } else { WARN("%s: unknown parameter", key); - return -EINVAL; + rte_errno = EINVAL; + return -rte_errno; } return 0; } @@ -411,7 +415,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque) * Device arguments structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) @@ -442,9 +446,10 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) if (rte_kvargs_count(kvlist, params[i])) { ret = rte_kvargs_process(kvlist, params[i], mlx5_args_check, config); - if (ret != 0) { + if (ret) { + rte_errno = EINVAL; rte_kvargs_free(kvlist); - return ret; + return -rte_errno; } } } @@ -470,7 +475,7 @@ static void *uar_base; * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_uar_init_primary(struct rte_eth_dev *dev) @@ -479,7 +484,6 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev) void *addr = (void *)0; int i; const struct rte_mem_config *mcfg; - int ret; if (uar_base) { /* UAR address space mapped. */ priv->uar_base = uar_base; @@ -501,8 +505,8 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev) if (addr == MAP_FAILED) { ERROR("Failed to reserve UAR address space, please adjust " "MLX5_UAR_SIZE or try --base-virtaddr"); - ret = ENOMEM; - return ret; + rte_errno = ENOMEM; + return -rte_errno; } /* Accept either same addr or a new addr returned from mmap if target * range occupied. @@ -521,14 +525,13 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev) * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_uar_init_secondary(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; void *addr; - int ret; assert(priv->uar_base); if (uar_base) { /* already reserved. */ @@ -541,15 +544,15 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev) if (addr == MAP_FAILED) { ERROR("UAR mmap failed: %p size: %llu", priv->uar_base, MLX5_UAR_SIZE); - ret = ENXIO; - return ret; + rte_errno = ENXIO; + return -rte_errno; } if (priv->uar_base != addr) { ERROR("UAR address %p size %llu occupied, please adjust " "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr", priv->uar_base, MLX5_UAR_SIZE); - ret = ENXIO; - return ret; + rte_errno = ENXIO; + return -rte_errno; } uar_base = addr; /* process local, don't reserve again */ INFO("Reserved UAR address space: %p", addr); @@ -568,13 +571,13 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev) * PCI device information. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - struct ibv_device **list; + struct ibv_device **list = NULL; struct ibv_device *ibv_dev; int err = 0; struct ibv_context *attr_ctx = NULL; @@ -594,7 +597,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, idx = mlx5_dev_idx(&pci_dev->addr); if (idx == -1) { ERROR("this driver cannot support any more adapters"); - return -ENOMEM; + err = ENOMEM; + goto error; } DEBUG("using driver device index %d", idx); /* Save PCI address. */ @@ -602,9 +606,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, list = mlx5_glue->get_device_list(&i); if (list == NULL) { assert(errno); + err = errno; if (errno == ENOSYS) ERROR("cannot list devices, is ib_uverbs loaded?"); - return -errno; + goto error; } assert(i >= 0); /* @@ -626,7 +631,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, INFO("PCI information matches, using device \"%s\"", list[i]->name); attr_ctx = mlx5_glue->open_device(list[i]); - err = errno; + rte_errno = errno; + err = rte_errno; break; } if (attr_ctx == NULL) { @@ -634,13 +640,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, switch (err) { case 0: ERROR("cannot access device, is mlx5_ib loaded?"); - return -ENODEV; + err = ENODEV; + goto error; case EINVAL: ERROR("cannot use device, are drivers up to date?"); - return -EINVAL; + goto error; } - assert(err > 0); - return -err; } ibv_dev = list[i]; DEBUG("device opened"); @@ -680,8 +685,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, #else WARN("Tunnel offloading disabled due to old OFED/rdma-core version"); #endif - if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) + if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) { + err = errno; goto error; + } INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt); for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) { char name[RTE_ETH_NAME_MAX_LEN]; @@ -718,22 +725,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, eth_dev = rte_eth_dev_attach_secondary(name); if (eth_dev == NULL) { ERROR("can not attach rte ethdev"); - err = ENOMEM; + rte_errno = ENOMEM; + err = rte_errno; goto error; } eth_dev->device = &pci_dev->device; eth_dev->dev_ops = &mlx5_dev_sec_ops; err = mlx5_uar_init_secondary(eth_dev); - if (err < 0) { - err = -err; + if (err) goto error; - } /* Receive command fd from primary process */ err = mlx5_socket_connect(eth_dev); - if (err < 0) { - err = -err; + if (err) goto error; - } /* Remap UAR for Tx queues. */ err = mlx5_tx_uar_remap(eth_dev, err); if (err) @@ -804,6 +808,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) { ERROR("ibv_query_device_ex() failed"); + err = errno; goto port_error; } @@ -901,7 +906,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } #endif /* Get actual MTU if possible. */ - mlx5_get_mtu(eth_dev, &priv->mtu); + err = mlx5_get_mtu(eth_dev, &priv->mtu); + if (err) + goto port_error; DEBUG("port %u MTU is %u", priv->port, priv->mtu); /* * Initialize burst functions to prevent crashes before link-up. @@ -945,16 +952,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, */ /* no port found, complain */ if (!mlx5_dev[idx].ports) { - err = ENODEV; - goto error; + rte_errno = ENODEV; + err = rte_errno; } error: if (attr_ctx) claim_zero(mlx5_glue->close_device(attr_ctx)); if (list) mlx5_glue->free_device_list(list); - assert(err >= 0); - return -err; + if (err) { + rte_errno = err; + return -rte_errno; + } + return 0; } static const struct rte_pci_id mlx5_pci_id_map[] = { diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 1fde3d842..d7e85577f 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -101,7 +101,7 @@ struct ethtool_link_settings { * Interface name output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) @@ -117,8 +117,10 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) MKSTR(path, "%s/device/net", priv->ibdev_path); dir = opendir(path); - if (dir == NULL) - return -1; + if (dir == NULL) { + rte_errno = errno; + return -rte_errno; + } } while ((dent = readdir(dir)) != NULL) { char *name = dent->d_name; @@ -168,8 +170,10 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) snprintf(match, sizeof(match), "%s", name); } closedir(dir); - if (match[0] == '\0') - return -1; + if (match[0] == '\0') { + rte_errno = ENOENT; + return -rte_errno; + } strncpy(*ifname, match, sizeof(*ifname)); return 0; } @@ -185,20 +189,31 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) * Interface request structure output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) { int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); - int ret = -1; + int ret = 0; - if (sock == -1) - return ret; - if (mlx5_get_ifname(dev, &ifr->ifr_name) == 0) - ret = ioctl(sock, req, ifr); + if (sock == -1) { + rte_errno = errno; + return -rte_errno; + } + ret = mlx5_get_ifname(dev, &ifr->ifr_name); + if (ret) + goto error; + ret = ioctl(sock, req, ifr); + if (ret == -1) { + rte_errno = errno; + goto error; + } close(sock); - return ret; + return 0; +error: + close(sock); + return -rte_errno; } /** @@ -210,7 +225,7 @@ mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) * MTU value output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) @@ -233,7 +248,7 @@ mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) * MTU value to set. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) @@ -254,7 +269,7 @@ mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * Bitmask for flags to modify. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) @@ -276,7 +291,7 @@ mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) * Pointer to Ethernet device structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_configure(struct rte_eth_dev *dev) @@ -295,31 +310,36 @@ mlx5_dev_configure(struct rte_eth_dev *dev) (mlx5_get_rx_port_offloads() | mlx5_get_rx_queue_offloads(dev)); uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; + int ret = 0; if ((tx_offloads & supp_tx_offloads) != tx_offloads) { ERROR("Some Tx offloads are not supported " "requested 0x%" PRIx64 " supported 0x%" PRIx64, tx_offloads, supp_tx_offloads); - return ENOTSUP; + rte_errno = ENOTSUP; + return -rte_errno; } if ((rx_offloads & supp_rx_offloads) != rx_offloads) { ERROR("Some Rx offloads are not supported " "requested 0x%" PRIx64 " supported 0x%" PRIx64, rx_offloads, supp_rx_offloads); - return ENOTSUP; + rte_errno = ENOTSUP; + return -rte_errno; } if (use_app_rss_key && (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != rss_hash_default_key_len)) { /* MLX5 RSS only support 40bytes key. */ - return EINVAL; + rte_errno = EINVAL; + return -rte_errno; } priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, rss_hash_default_key_len, 0); if (!priv->rss_conf.rss_key) { ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n); - return ENOMEM; + rte_errno = ENOMEM; + return -rte_errno; } memcpy(priv->rss_conf.rss_key, use_app_rss_key ? @@ -337,7 +357,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev) } if (rxqs_n > priv->config.ind_table_max_size) { ERROR("cannot handle this many RX queues (%u)", rxqs_n); - return EINVAL; + rte_errno = EINVAL; + return -rte_errno; } if (rxqs_n == priv->rxqs_n) return 0; @@ -350,8 +371,9 @@ mlx5_dev_configure(struct rte_eth_dev *dev) reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? priv->config.ind_table_max_size : rxqs_n)); - if (mlx5_rss_reta_index_resize(dev, reta_idx_n)) - return ENOMEM; + ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); + if (ret) + return ret; /* When the number of RX queues is not a power of two, the remaining * table entries are padded with reused WQs and hashes are not spread * uniformly. */ @@ -361,7 +383,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev) j = 0; } return 0; - } /** @@ -452,7 +473,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, -1 on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) @@ -464,19 +485,22 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) struct ifreq ifr; struct rte_eth_link dev_link; int link_speed = 0; + int ret; - if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) { - WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); + if (ret) { + WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno)); + return ret; } memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&edata; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", - strerror(errno)); - return -1; + strerror(rte_errno)); + return ret; } link_speed = ethtool_cmd_speed(&edata); if (link_speed == -1) @@ -506,7 +530,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) return 0; } /* Link status is still the same. */ - return -1; + rte_errno = EAGAIN; + return -rte_errno; } /** @@ -516,7 +541,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, -1 on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) @@ -526,19 +551,22 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) struct ifreq ifr; struct rte_eth_link dev_link; uint64_t sc; + int ret; - if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) { - WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); + if (ret) { + WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno)); + return ret; } memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&gcmd; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", - strerror(errno)); - return -1; + strerror(rte_errno)); + return ret; } gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; @@ -549,10 +577,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) *ecmd = gcmd; ifr.ifr_data = (void *)ecmd; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", - strerror(errno)); - return -1; + strerror(rte_errno)); + return ret; } dev_link.link_speed = ecmd->speed; sc = ecmd->link_mode_masks[0] | @@ -602,7 +631,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) return 0; } /* Link status is still the same. */ - return -1; + rte_errno = EAGAIN; + return -rte_errno; } /** @@ -615,18 +645,21 @@ static void mlx5_link_start(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - int err; + int ret; dev->tx_pkt_burst = mlx5_select_tx_function(dev); dev->rx_pkt_burst = mlx5_select_rx_function(dev); - err = mlx5_traffic_enable(dev); - if (err) + ret = mlx5_traffic_enable(dev); + if (ret) { ERROR("%p: error occurred while configuring control flows: %s", - (void *)dev, strerror(err)); - err = mlx5_flow_start(dev, &priv->flows); - if (err) + (void *)dev, strerror(rte_errno)); + return; + } + ret = mlx5_flow_start(dev, &priv->flows); + if (ret) { ERROR("%p: error occurred while configuring flows: %s", - (void *)dev, strerror(err)); + (void *)dev, strerror(rte_errno)); + } } /** @@ -656,7 +689,7 @@ mlx5_link_stop(struct rte_eth_dev *dev) * Link desired status. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status) @@ -670,7 +703,8 @@ mlx5_force_link_status_change(struct rte_eth_dev *dev, int status) try++; sleep(1); } - return -EAGAIN; + rte_errno = EAGAIN; + return -rte_errno; } /** @@ -682,7 +716,7 @@ mlx5_force_link_status_change(struct rte_eth_dev *dev, int status) * Wait for request completion (ignored). * * @return - * 0 on success, -1 on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) @@ -699,10 +733,12 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) ret = mlx5_link_update_unlocked_gset(dev); else ret = mlx5_link_update_unlocked_gs(dev); + if (ret) + return ret; /* If lsc interrupt is disabled, should always be ready for traffic. */ if (!dev->data->dev_conf.intr_conf.lsc) { mlx5_link_start(dev); - return ret; + return 0; } /* Re-select burst callbacks only if link status has been changed. */ if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { @@ -711,7 +747,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) else mlx5_link_stop(dev); } - return ret; + return 0; } /** @@ -723,36 +759,32 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) * New MTU. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct priv *priv = dev->data->dev_private; - uint16_t kern_mtu; - int ret = 0; + uint16_t kern_mtu = 0; + int ret; ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) - goto out; + return ret; /* Set kernel interface MTU first. */ ret = mlx5_set_mtu(dev, mtu); if (ret) - goto out; + return ret; ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) - goto out; + return ret; if (kern_mtu == mtu) { priv->mtu = mtu; DEBUG("adapter port %u MTU set to %u", priv->port, mtu); + return 0; } - return 0; -out: - ret = errno; - WARN("cannot set port %u MTU to %u: %s", priv->port, mtu, - strerror(ret)); - assert(ret >= 0); - return -ret; + rte_errno = EAGAIN; + return -rte_errno; } /** @@ -764,7 +796,7 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * Flow control output buffer. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) @@ -776,11 +808,11 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret; ifr.ifr_data = (void *)ðpause; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { - ret = errno; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed: %s", - strerror(ret)); - goto out; + strerror(rte_errno)); + return ret; } fc_conf->autoneg = ethpause.autoneg; if (ethpause.rx_pause && ethpause.tx_pause) @@ -791,10 +823,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) fc_conf->mode = RTE_FC_TX_PAUSE; else fc_conf->mode = RTE_FC_NONE; - ret = 0; -out: - assert(ret >= 0); - return -ret; + return 0; } /** @@ -806,7 +835,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * Flow control parameters. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) @@ -830,17 +859,14 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { - ret = errno; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" " failed: %s", - strerror(ret)); - goto out; + strerror(rte_errno)); + return ret; } - ret = 0; -out: - assert(ret >= 0); - return -ret; + return 0; } /** @@ -852,7 +878,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * PCI bus address output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, @@ -863,8 +889,10 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, MKSTR(path, "%s/device/uevent", device->ibdev_path); file = fopen(path, "rb"); - if (file == NULL) - return -1; + if (file == NULL) { + rte_errno = errno; + return -rte_errno; + } while (fgets(line, sizeof(line), file) == line) { size_t len = strlen(line); int ret; @@ -900,15 +928,19 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, * Pointer to Ethernet device. * * @return - * Zero if the callback process can be called immediately. + * Zero if the callback process can be called immediately, negative errno + * value otherwise and rte_errno is set. */ static int mlx5_link_status_update(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct rte_eth_link *link = &dev->data->dev_link; + int ret; - mlx5_link_update(dev, 0); + ret = mlx5_link_update(dev, 0); + if (ret) + return ret; if (((link->link_speed == 0) && link->link_status) || ((link->link_speed != 0) && !link->link_status)) { /* @@ -1062,12 +1094,13 @@ void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - int rc, flags; + int ret; + int flags; assert(priv->ctx->async_fd > 0); flags = fcntl(priv->ctx->async_fd, F_GETFL); - rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); - if (rc < 0) { + ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); + if (ret) { INFO("failed to change file descriptor async event queue"); dev->data->dev_conf.intr_conf.lsc = 0; dev->data->dev_conf.intr_conf.rmv = 0; @@ -1079,8 +1112,10 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) rte_intr_callback_register(&priv->intr_handle, mlx5_dev_interrupt_handler, dev); } - rc = mlx5_socket_init(dev); - if (!rc && priv->primary_socket) { + ret = mlx5_socket_init(dev); + if (ret) + ERROR("cannot initialise socket: %s", strerror(rte_errno)); + else if (priv->primary_socket) { priv->intr_handle_socket.fd = priv->primary_socket; priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT; rte_intr_callback_register(&priv->intr_handle_socket, @@ -1095,7 +1130,7 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_set_link_down(struct rte_eth_dev *dev) @@ -1110,7 +1145,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_set_link_up(struct rte_eth_dev *dev) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 6b53b3ea5..1435516dc 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -247,7 +247,8 @@ struct mlx5_flow_items { * Internal structure to store the conversion. * * @return - * 0 on success, negative value otherwise. + * 0 on success, a negative errno value otherwise and rte_errno is + * set. */ int (*convert)(const struct rte_flow_item *item, const void *default_mask, @@ -460,45 +461,52 @@ struct ibv_spec_header { * Bit-Mask size in bytes. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_item_validate(const struct rte_flow_item *item, const uint8_t *mask, unsigned int size) { - int ret = 0; - - if (!item->spec && (item->mask || item->last)) - return -1; + if (!item->spec && (item->mask || item->last)) { + rte_errno = EINVAL; + return -rte_errno; + } if (item->spec && !item->mask) { unsigned int i; const uint8_t *spec = item->spec; for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; + if ((spec[i] | mask[i]) != mask[i]) { + rte_errno = EINVAL; + return -rte_errno; + } } if (item->last && !item->mask) { unsigned int i; const uint8_t *spec = item->last; for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; + if ((spec[i] | mask[i]) != mask[i]) { + rte_errno = EINVAL; + return -rte_errno; + } } if (item->mask) { unsigned int i; const uint8_t *spec = item->spec; for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; + if ((spec[i] | mask[i]) != mask[i]) { + rte_errno = EINVAL; + return -rte_errno; + } } if (item->spec && item->last) { uint8_t spec[size]; uint8_t last[size]; const uint8_t *apply = mask; unsigned int i; + int ret; if (item->mask) apply = item->mask; @@ -507,8 +515,12 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, last[i] = ((const uint8_t *)item->last)[i] & apply[i]; } ret = memcmp(spec, last, size); + if (ret != 0) { + rte_errno = EINVAL; + return -rte_errno; + } } - return ret; + return 0; } /** @@ -521,7 +533,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * User RSS configuration to save. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser, @@ -533,10 +545,14 @@ mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser, * device default RSS configuration. */ if (rss_conf) { - if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) - return EINVAL; - if (rss_conf->rss_key_len != 40) - return EINVAL; + if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) { + rte_errno = EINVAL; + return -rte_errno; + } + if (rss_conf->rss_key_len != 40) { + rte_errno = EINVAL; + return -rte_errno; + } if (rss_conf->rss_key_len && rss_conf->rss_key) { parser->rss_conf.rss_key_len = rss_conf->rss_key_len; memcpy(parser->rss_key, rss_conf->rss_key, @@ -616,14 +632,17 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser) { struct priv *priv = dev->data->dev_private; + int ret; /* * Add default RSS configuration necessary for Verbs to create QP even * if no RSS is necessary. */ - mlx5_flow_convert_rss_conf(parser, - (const struct rte_eth_rss_conf *) - &priv->rss_conf); + ret = mlx5_flow_convert_rss_conf(parser, + (const struct rte_eth_rss_conf *) + &priv->rss_conf); + if (ret) + return ret; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { continue; @@ -772,6 +791,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], { const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; + int ret = 0; /* Initialise the offsets to start after verbs attribute. */ for (i = 0; i != hash_rxq_init_n; ++i) @@ -779,7 +799,6 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { const struct mlx5_flow_items *token = NULL; unsigned int n; - int err; if (items->type == RTE_FLOW_ITEM_TYPE_VOID) continue; @@ -795,10 +814,10 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], if (!token) goto exit_item_not_supported; cur_item = token; - err = mlx5_flow_item_validate(items, + ret = mlx5_flow_item_validate(items, (const uint8_t *)cur_item->mask, cur_item->mask_sz); - if (err) + if (ret) goto exit_item_not_supported; if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) { if (parser->inner) { @@ -835,9 +854,8 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], } return 0; exit_item_not_supported: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, - items, "item not supported"); - return -rte_errno; + return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, + items, "item not supported"); } /** @@ -851,7 +869,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], * Perform verbose error reporting if not NULL. * * @return - * A verbs flow attribute on success, NULL otherwise. + * A verbs flow attribute on success, NULL otherwise and rte_errno is set. */ static struct ibv_flow_attr * mlx5_flow_convert_allocate(unsigned int priority, @@ -1054,7 +1072,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev, parser->queue[HASH_RXQ_ETH].ibv_attr = mlx5_flow_convert_allocate(priority, offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) - return ENOMEM; + goto exit_enomem; parser->queue[HASH_RXQ_ETH].offset = sizeof(struct ibv_flow_attr); } else { @@ -1089,7 +1107,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev, cur_item->mask), parser); if (ret) { - rte_flow_error_set(error, ret, + rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ITEM, items, "item not supported"); goto exit_free; @@ -1131,13 +1149,13 @@ mlx5_flow_convert(struct rte_eth_dev *dev, parser->queue[i].ibv_attr = NULL; } } - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes."); - return ret; + return -rte_errno; exit_count_error: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create counter."); - return rte_errno; + return -rte_errno; } /** @@ -1183,6 +1201,9 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_eth(const struct rte_flow_item *item, @@ -1232,6 +1253,9 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_vlan(const struct rte_flow_item *item, @@ -1272,6 +1296,9 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_ipv4(const struct rte_flow_item *item, @@ -1324,6 +1351,9 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_ipv6(const struct rte_flow_item *item, @@ -1396,6 +1426,9 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_udp(const struct rte_flow_item *item, @@ -1442,6 +1475,9 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_tcp(const struct rte_flow_item *item, @@ -1488,6 +1524,9 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_vxlan(const struct rte_flow_item *item, @@ -1527,8 +1566,10 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ - if (!vxlan.val.tunnel_id) - return EINVAL; + if (!vxlan.val.tunnel_id) { + rte_errno = EINVAL; + return -rte_errno; + } mlx5_flow_create_copy(parser, &vxlan, size); return 0; } @@ -1540,6 +1581,9 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, * Internal parser structure. * @param mark_id * Mark identifier. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) @@ -1565,7 +1609,7 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) * Pointer to MLX5 flow parser structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused, @@ -1583,8 +1627,10 @@ mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused, init_attr.counter_set_id = 0; parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr); - if (!parser->cs) - return EINVAL; + if (!parser->cs) { + rte_errno = EINVAL; + return -rte_errno; + } counter.counter_set_handle = parser->cs->handle; mlx5_flow_create_copy(parser, &counter, size); #endif @@ -1604,7 +1650,7 @@ mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, @@ -1615,7 +1661,6 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; struct ibv_flow_spec_action_drop *drop; unsigned int size = sizeof(struct ibv_flow_spec_action_drop); - int err = 0; assert(priv->pd); assert(priv->ctx); @@ -1641,7 +1686,6 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); - err = ENOMEM; goto error; } return 0; @@ -1661,7 +1705,7 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, flow->cs = NULL; parser->cs = NULL; } - return err; + return -rte_errno; } /** @@ -1677,7 +1721,7 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, @@ -1715,10 +1759,10 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, parser->queues, parser->queues_n); if (!flow->frxq[i].hrxq) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot create hash rxq"); - return ENOMEM; + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "cannot create hash rxq"); } } return 0; @@ -1737,7 +1781,7 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue(struct rte_eth_dev *dev, @@ -1746,14 +1790,14 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; - int err = 0; + int ret; unsigned int i; assert(priv->pd); assert(priv->ctx); assert(!parser->drop); - err = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); - if (err) + ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); + if (ret) goto error; if (parser->count) flow->cs = parser->cs; @@ -1769,7 +1813,6 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); - err = ENOMEM; goto error; } DEBUG("%p type %d QP %p ibv_flow %p", @@ -1785,6 +1828,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, } return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ assert(flow); for (i = 0; i != hash_rxq_init_n; ++i) { if (flow->frxq[i].ibv_flow) { @@ -1802,7 +1846,8 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, flow->cs = NULL; parser->cs = NULL; } - return err; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -1822,7 +1867,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. * * @return - * A flow on success, NULL otherwise. + * A flow on success, NULL otherwise and rte_errno is set. */ static struct rte_flow * mlx5_flow_list_create(struct rte_eth_dev *dev, @@ -1835,10 +1880,10 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, struct mlx5_flow_parse parser = { .create = 1, }; struct rte_flow *flow = NULL; unsigned int i; - int err; + int ret; - err = mlx5_flow_convert(dev, attr, items, actions, error, &parser); - if (err) + ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser); + if (ret) goto exit; flow = rte_calloc(__func__, 1, sizeof(*flow) + parser.queues_n * sizeof(uint16_t), @@ -1861,11 +1906,11 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len); /* finalise the flow. */ if (parser.drop) - err = mlx5_flow_create_action_queue_drop(dev, &parser, flow, + ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow, error); else - err = mlx5_flow_create_action_queue(dev, &parser, flow, error); - if (err) + ret = mlx5_flow_create_action_queue(dev, &parser, flow, error); + if (ret) goto exit; TAILQ_INSERT_TAIL(list, flow, next); DEBUG("Flow created %p", (void *)flow); @@ -1893,11 +1938,9 @@ mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - int ret; struct mlx5_flow_parse parser = { .create = 0, }; - ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser); - return ret; + return mlx5_flow_convert(dev, attr, items, actions, error, &parser); } /** @@ -2021,7 +2064,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) * Pointer to Ethernet device. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) @@ -2034,11 +2077,13 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0); if (!fdq) { WARN("cannot allocate memory for drop queue"); - goto error; + rte_errno = ENOMEM; + return -rte_errno; } fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0); if (!fdq->cq) { WARN("cannot allocate CQ for drop queue"); + rte_errno = errno; goto error; } fdq->wq = mlx5_glue->create_wq @@ -2052,6 +2097,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) }); if (!fdq->wq) { WARN("cannot allocate WQ for drop queue"); + rte_errno = errno; goto error; } fdq->ind_table = mlx5_glue->create_rwq_ind_table @@ -2063,6 +2109,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) }); if (!fdq->ind_table) { WARN("cannot allocate indirection table for drop queue"); + rte_errno = errno; goto error; } fdq->qp = mlx5_glue->create_qp_ex @@ -2085,6 +2132,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) }); if (!fdq->qp) { WARN("cannot allocate QP for drop queue"); + rte_errno = errno; goto error; } priv->flow_drop_queue = fdq; @@ -2101,7 +2149,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) if (fdq) rte_free(fdq); priv->flow_drop_queue = NULL; - return -1; + return -rte_errno; } /** @@ -2200,7 +2248,7 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) * Pointer to a TAILQ flow list. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) @@ -2220,7 +2268,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } DEBUG("Flow %p applied", (void *)flow); /* Next flow. */ @@ -2247,7 +2295,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } flow_create: flow->frxq[i].ibv_flow = @@ -2257,7 +2305,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } DEBUG("Flow %p applied", (void *)flow); } @@ -2307,7 +2355,7 @@ mlx5_flow_verify(struct rte_eth_dev *dev) * A VLAN flow mask to apply. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, @@ -2359,8 +2407,10 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, } local; } action_rss; - if (!priv->reta_idx_n) - return EINVAL; + if (!priv->reta_idx_n) { + rte_errno = EINVAL; + return -rte_errno; + } for (i = 0; i != priv->reta_idx_n; ++i) action_rss.local.queue[i] = (*priv->reta_idx)[i]; action_rss.local.rss_conf = &priv->rss_conf; @@ -2369,7 +2419,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions, &error); if (!flow) - return rte_errno; + return -rte_errno; return 0; } @@ -2384,7 +2434,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, * An Ethernet flow mask to apply. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ctrl_flow(struct rte_eth_dev *dev, @@ -2437,7 +2487,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, * returned data from the counter. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_query_count(struct ibv_counter_set *cs, @@ -2454,15 +2504,13 @@ mlx5_flow_query_count(struct ibv_counter_set *cs, .out = counters, .outlen = 2 * sizeof(uint64_t), }; - int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out); + int err = mlx5_glue->query_counter_set(&query_cs_attr, &query_out); - if (res) { - rte_flow_error_set(error, -res, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot read counter"); - return -res; - } + if (err) + return rte_flow_error_set(error, err, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot read counter"); query_count->hits_set = 1; query_count->bytes_set = 1; query_count->hits = counters[0] - counter_stats->hits; @@ -2487,20 +2535,22 @@ mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, void *data, struct rte_flow_error *error) { - int res = EINVAL; - if (flow->cs) { - res = mlx5_flow_query_count(flow->cs, - &flow->counter_stats, - (struct rte_flow_query_count *)data, - error); + int ret; + + ret = mlx5_flow_query_count(flow->cs, + &flow->counter_stats, + (struct rte_flow_query_count *)data, + error); + if (ret) + return ret; } else { - rte_flow_error_set(error, res, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "no counter found for flow"); + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "no counter found for flow"); } - return -res; + return 0; } #endif @@ -2543,7 +2593,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, * Generic flow parameters structure. * * @return - * 0 on success, errno value on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_fdir_filter_convert(struct rte_eth_dev *dev, @@ -2556,7 +2606,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, /* Validate queue number. */ if (fdir_filter->action.rx_queue >= priv->rxqs_n) { ERROR("invalid queue number %d", fdir_filter->action.rx_queue); - return EINVAL; + rte_errno = EINVAL; + return -rte_errno; } attributes->attr.ingress = 1; attributes->items[0] = (struct rte_flow_item) { @@ -2578,7 +2629,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, break; default: ERROR("invalid behavior %d", fdir_filter->action.behavior); - return ENOTSUP; + rte_errno = ENOTSUP; + return -rte_errno; } attributes->queue.index = fdir_filter->action.rx_queue; switch (fdir_filter->input.flow_type) { @@ -2712,9 +2764,9 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; default: - ERROR("invalid flow type%d", - fdir_filter->input.flow_type); - return ENOTSUP; + ERROR("invalid flow type%d", fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; } return 0; } @@ -2728,7 +2780,7 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, * Flow director filter to add. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_fdir_filter_add(struct rte_eth_dev *dev, @@ -2752,11 +2804,11 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) - return -ret; + return ret; ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) - return -ret; + return ret; flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, attributes.items, attributes.actions, &error); @@ -2764,7 +2816,7 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, DEBUG("FDIR created %p", (void *)flow); return 0; } - return ENOTSUP; + return -rte_errno; } /** @@ -2776,7 +2828,7 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, * Filter to be deleted. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_fdir_filter_delete(struct rte_eth_dev *dev, @@ -2797,7 +2849,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev, ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) - return -ret; + return ret; ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) @@ -2855,6 +2907,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev, /* The flow does not match. */ continue; } + ret = rte_errno; /* Save rte_errno before cleanup. */ if (flow) mlx5_flow_list_destroy(dev, &priv->flows, flow); exit: @@ -2862,7 +2915,8 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev, if (parser.queue[i].ibv_attr) rte_free(parser.queue[i].ibv_attr); } - return -ret; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -2874,7 +2928,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev, * Filter to be updated. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_fdir_filter_update(struct rte_eth_dev *dev, @@ -2885,8 +2939,7 @@ mlx5_fdir_filter_update(struct rte_eth_dev *dev, ret = mlx5_fdir_filter_delete(dev, fdir_filter); if (ret) return ret; - ret = mlx5_fdir_filter_add(dev, fdir_filter); - return ret; + return mlx5_fdir_filter_add(dev, fdir_filter); } /** @@ -2940,7 +2993,7 @@ mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) * Pointer to operation-specific structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, @@ -2949,7 +3002,6 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, struct priv *priv = dev->data->dev_private; enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode; - int ret = 0; if (filter_op == RTE_ETH_FILTER_NOP) return 0; @@ -2957,18 +3009,16 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { ERROR("%p: flow director mode %d not supported", (void *)dev, fdir_mode); - return EINVAL; + rte_errno = EINVAL; + return -rte_errno; } switch (filter_op) { case RTE_ETH_FILTER_ADD: - ret = mlx5_fdir_filter_add(dev, arg); - break; + return mlx5_fdir_filter_add(dev, arg); case RTE_ETH_FILTER_UPDATE: - ret = mlx5_fdir_filter_update(dev, arg); - break; + return mlx5_fdir_filter_update(dev, arg); case RTE_ETH_FILTER_DELETE: - ret = mlx5_fdir_filter_delete(dev, arg); - break; + return mlx5_fdir_filter_delete(dev, arg); case RTE_ETH_FILTER_FLUSH: mlx5_fdir_filter_flush(dev); break; @@ -2976,12 +3026,11 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, mlx5_fdir_info_get(dev, arg); break; default: - DEBUG("%p: unknown operation %u", (void *)dev, - filter_op); - ret = EINVAL; - break; + DEBUG("%p: unknown operation %u", (void *)dev, filter_op); + rte_errno = EINVAL; + return -rte_errno; } - return ret; + return 0; } /** @@ -2997,7 +3046,7 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, * Pointer to operation-specific structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, @@ -3005,21 +3054,21 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - int ret = EINVAL; - switch (filter_type) { case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; + if (filter_op != RTE_ETH_FILTER_GET) { + rte_errno = EINVAL; + return -rte_errno; + } *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - ret = mlx5_fdir_ctrl_func(dev, filter_op, arg); - break; + return mlx5_fdir_ctrl_func(dev, filter_op, arg); default: ERROR("%p: filter type (%d) not supported", (void *)dev, filter_type); - break; + rte_errno = ENOTSUP; + return -rte_errno; } - return -ret; + return 0; } diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index 91c977bc5..ba54c055e 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -41,15 +41,17 @@ * MAC address output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]) { struct ifreq request; + int ret; - if (mlx5_ifreq(dev, SIOCGIFHWADDR, &request)) - return -1; + ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request); + if (ret) + return ret; memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); return 0; } @@ -67,8 +69,13 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) { assert(index < MLX5_MAX_MAC_ADDRESSES); memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr)); - if (!dev->data->promiscuous) - mlx5_traffic_restart(dev); + if (!dev->data->promiscuous) { + int ret = mlx5_traffic_restart(dev); + + if (ret) + ERROR("%p cannot remove mac address: %s", (void *)dev, + strerror(rte_errno)); + } } /** @@ -84,14 +91,13 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) * VMDq pool index to associate address with (ignored). * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, uint32_t index, uint32_t vmdq __rte_unused) { unsigned int i; - int ret = 0; assert(index < MLX5_MAX_MAC_ADDRESSES); /* First, make sure this address isn't already configured. */ @@ -102,12 +108,13 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac))) continue; /* Address already configured elsewhere, return with error. */ - return EADDRINUSE; + rte_errno = EADDRINUSE; + return -rte_errno; } dev->data->mac_addrs[index] = *mac; if (!dev->data->promiscuous) - mlx5_traffic_restart(dev); - return ret; + return mlx5_traffic_restart(dev); + return 0; } /** @@ -121,6 +128,10 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, void mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { + int ret; + DEBUG("%p: setting primary MAC address", (void *)dev); - mlx5_mac_addr_add(dev, mac_addr, 0, 0); + ret = mlx5_mac_addr_add(dev, mac_addr, 0, 0); + if (ret) + ERROR("cannot set mac address: %s", strerror(rte_errno)); } diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 5c4e68736..884ac33eb 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -91,7 +91,7 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, * Index of the next available entry. * * @return - * mr on success, NULL on failure. + * mr on success, NULL on failure and rte_errno is set. */ struct mlx5_mr * mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, @@ -115,6 +115,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, " rte_eth_dev_start()", (void *)mp, mp->name); rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); + rte_errno = ENOTSUP; return NULL; } mr = mlx5_mr_new(dev, mp); @@ -203,7 +204,9 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) mlx5_mr_release(mr); return; } - mlx5_mr_new(priv->dev, mp); + mr = mlx5_mr_new(priv->dev, mp); + if (!mr) + ERROR("cannot create memory region: %s", strerror(rte_errno)); } /** @@ -216,7 +219,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) * Pointer to the memory pool to register. * * @return - * The memory region on success. + * The memory region on success, NULL on failure and rte_errno is set. */ struct mlx5_mr * mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp) @@ -231,11 +234,13 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp) mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id); if (!mr) { DEBUG("unable to configure MR, ibv_reg_mr() failed."); + rte_errno = ENOMEM; return NULL; } if (mlx5_check_mempool(mp, &start, &end) != 0) { ERROR("mempool %p: not virtually contiguous", (void *)mp); + rte_errno = ENOMEM; return NULL; } DEBUG("mempool %p area start=%p end=%p size=%zu", @@ -260,6 +265,10 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp) (size_t)(end - start)); mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start, IBV_ACCESS_LOCAL_WRITE); + if (!mr->mr) { + rte_errno = ENOMEM; + return NULL; + } mr->mp = mp; mr->lkey = rte_cpu_to_be_32(mr->mr->lkey); rte_atomic32_inc(&mr->refcnt); diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index a654a5a7d..5ac650163 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -35,33 +35,31 @@ * RSS configuration data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct priv *priv = dev->data->dev_private; - int ret = 0; if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) { - ret = -EINVAL; - goto out; + rte_errno = EINVAL; + return -rte_errno; } if (rss_conf->rss_key && rss_conf->rss_key_len) { priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, rss_conf->rss_key_len, 0); if (!priv->rss_conf.rss_key) { - ret = -ENOMEM; - goto out; + rte_errno = ENOMEM; + return -rte_errno; } memcpy(priv->rss_conf.rss_key, rss_conf->rss_key, rss_conf->rss_key_len); priv->rss_conf.rss_key_len = rss_conf->rss_key_len; } priv->rss_conf.rss_hf = rss_conf->rss_hf; -out: - return ret; + return 0; } /** @@ -73,7 +71,7 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev, * RSS configuration data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, @@ -81,8 +79,10 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - if (!rss_conf) - return -EINVAL; + if (!rss_conf) { + rte_errno = EINVAL; + return -rte_errno; + } if (rss_conf->rss_key && (rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) { memcpy(rss_conf->rss_key, priv->rss_conf.rss_key, @@ -102,7 +102,7 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, * The size of the array to allocate. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) @@ -116,8 +116,10 @@ mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) mem = rte_realloc(priv->reta_idx, reta_size * sizeof((*priv->reta_idx)[0]), 0); - if (!mem) - return ENOMEM; + if (!mem) { + rte_errno = ENOMEM; + return -rte_errno; + } priv->reta_idx = mem; priv->reta_idx_n = reta_size; if (old_size < reta_size) @@ -138,7 +140,7 @@ mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) * Size of the RETA table. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, @@ -149,8 +151,10 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, unsigned int idx; unsigned int i; - if (!reta_size || reta_size > priv->reta_idx_n) - return -EINVAL; + if (!reta_size || reta_size > priv->reta_idx_n) { + rte_errno = EINVAL; + return -rte_errno; + } /* Fill each entry of the table even if its bit is not set. */ for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; @@ -171,7 +175,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, * Size of the RETA table. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, @@ -184,8 +188,10 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, unsigned int i; unsigned int pos; - if (!reta_size) - return -EINVAL; + if (!reta_size) { + rte_errno = EINVAL; + return -rte_errno; + } ret = mlx5_rss_reta_index_resize(dev, reta_size); if (ret) return ret; @@ -199,7 +205,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, } if (dev->data->dev_started) { mlx5_dev_stop(dev); - mlx5_dev_start(dev); + return mlx5_dev_start(dev); } - return -ret; + return 0; } diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 4ffc869ad..0c1e9eb2a 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -32,8 +32,13 @@ void mlx5_promiscuous_enable(struct rte_eth_dev *dev) { + int ret; + dev->data->promiscuous = 1; - mlx5_traffic_restart(dev); + ret = mlx5_traffic_restart(dev); + if (ret) + ERROR("%p cannot enable promiscuous mode: %s", (void *)dev, + strerror(rte_errno)); } /** @@ -45,8 +50,13 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev) void mlx5_promiscuous_disable(struct rte_eth_dev *dev) { + int ret; + dev->data->promiscuous = 0; - mlx5_traffic_restart(dev); + ret = mlx5_traffic_restart(dev); + if (ret) + ERROR("%p cannot disable promiscuous mode: %s", (void *)dev, + strerror(rte_errno)); } /** @@ -58,8 +68,13 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev) void mlx5_allmulticast_enable(struct rte_eth_dev *dev) { + int ret; + dev->data->all_multicast = 1; - mlx5_traffic_restart(dev); + ret = mlx5_traffic_restart(dev); + if (ret) + ERROR("%p cannot enable allmulicast mode: %s", (void *)dev, + strerror(rte_errno)); } /** @@ -71,6 +86,11 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev) void mlx5_allmulticast_disable(struct rte_eth_dev *dev) { + int ret; + dev->data->all_multicast = 0; - mlx5_traffic_restart(dev); + ret = mlx5_traffic_restart(dev); + if (ret) + ERROR("%p cannot disable allmulicast mode: %s", (void *)dev, + strerror(rte_errno)); } diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 8e7693df2..477aa2631 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -61,7 +61,7 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); * Pointer to RX queue structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) @@ -69,7 +69,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; unsigned int i; - int ret = 0; + int err; /* Iterate on segments. */ for (i = 0; (i != elts_n); ++i) { @@ -78,7 +78,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); if (buf == NULL) { ERROR("%p: empty mbuf pool", (void *)rxq_ctrl); - ret = ENOMEM; + rte_errno = ENOMEM; goto error; } /* Headroom is reserved by rte_pktmbuf_alloc(). */ @@ -120,9 +120,9 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) } DEBUG("%p: allocated and configured %u segments (max %u packets)", (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); - assert(ret == 0); return 0; error: + err = rte_errno; /* Save rte_errno before cleanup. */ elts_n = i; for (i = 0; (i != elts_n); ++i) { if ((*rxq_ctrl->rxq.elts)[i] != NULL) @@ -130,8 +130,8 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq_ctrl->rxq.elts)[i] = NULL; } DEBUG("%p: failed, freed everything", (void *)rxq_ctrl); - assert(ret > 0); - return ret; + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -271,7 +271,7 @@ mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) * Memory pool for buffer allocations. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -282,7 +282,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); - int ret = 0; if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); @@ -295,37 +294,37 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (idx >= priv->rxqs_n) { ERROR("%p: queue index out of range (%u >= %u)", (void *)dev, idx, priv->rxqs_n); - return -EOVERFLOW; + rte_errno = EOVERFLOW; + return -rte_errno; } if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) { - ret = ENOTSUP; ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port " "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, (void *)dev, conf->offloads, dev->data->dev_conf.rxmode.offloads, (mlx5_get_rx_port_offloads() | mlx5_get_rx_queue_offloads(dev))); - goto out; + rte_errno = ENOTSUP; + return -rte_errno; } if (!mlx5_rxq_releasable(dev, idx)) { - ret = EBUSY; ERROR("%p: unable to release queue index %u", (void *)dev, idx); - goto out; + rte_errno = EBUSY; + return -rte_errno; } mlx5_rxq_release(dev, idx); rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); if (!rxq_ctrl) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); - ret = ENOMEM; - goto out; + rte_errno = ENOMEM; + return -rte_errno; } DEBUG("%p: adding RX queue %p to list", (void *)dev, (void *)rxq_ctrl); (*priv->rxqs)[idx] = &rxq_ctrl->rxq; -out: - return -ret; + return 0; } /** @@ -358,7 +357,7 @@ mlx5_rx_queue_release(void *dpdk_rxq) * Pointer to Ethernet device. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) @@ -377,7 +376,8 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) if (intr_handle->intr_vec == NULL) { ERROR("failed to allocate memory for interrupt vector," " Rx interrupts will not be supported"); - return -ENOMEM; + rte_errno = ENOMEM; + return -rte_errno; } intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { @@ -400,16 +400,18 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) " (%d), Rx interrupts cannot be enabled", RTE_MAX_RXTX_INTR_VEC_ID); mlx5_rx_intr_vec_disable(dev); - return -1; + rte_errno = ENOMEM; + return -rte_errno; } fd = rxq_ibv->channel->fd; flags = fcntl(fd, F_GETFL); rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); if (rc < 0) { + rte_errno = errno; ERROR("failed to make Rx interrupt file descriptor" " %d non-blocking for queue index %d", fd, i); mlx5_rx_intr_vec_disable(dev); - return -1; + return -rte_errno; } intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; intr_handle->efds[count] = fd; @@ -497,7 +499,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) * Rx queue number. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) @@ -505,12 +507,11 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; - int ret = 0; rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->irq) { @@ -518,16 +519,13 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); mlx5_rxq_ibv_release(rxq_ibv); } -exit: - if (ret) - WARN("unable to arm interrupt on rx queue %d", rx_queue_id); - return -ret; + return 0; } /** @@ -539,7 +537,7 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) * Rx queue number. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) @@ -550,35 +548,36 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct mlx5_rxq_ibv *rxq_ibv = NULL; struct ibv_cq *ev_cq; void *ev_ctx; - int ret = 0; + int ret; rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (!rxq_ctrl->irq) - goto exit; + return 0; rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx); if (ret || ev_cq != rxq_ibv->cq) { - ret = EINVAL; + rte_errno = EINVAL; goto exit; } rxq_data->cq_arm_sn++; mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); + return 0; exit: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (rxq_ibv) mlx5_rxq_ibv_release(rxq_ibv); - if (ret) - WARN("unable to disable interrupt on rx queue %d", - rx_queue_id); - return -ret; + WARN("unable to disable interrupt on rx queue %d", rx_queue_id); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -590,7 +589,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) * Queue index in DPDK Rx queue array * * @return - * The Verbs object initialised if it can be created. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ struct mlx5_rxq_ibv * mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) @@ -626,6 +625,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (!tmpl) { ERROR("%p: cannot allocate verbs resources", (void *)rxq_ctrl); + rte_errno = ENOMEM; goto error; } tmpl->rxq_ctrl = rxq_ctrl; @@ -643,6 +643,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (!tmpl->channel) { ERROR("%p: Comp Channel creation failure", (void *)rxq_ctrl); + rte_errno = ENOMEM; goto error; } } @@ -672,6 +673,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) &attr.cq.mlx5)); if (tmpl->cq == NULL) { ERROR("%p: CQ creation failure", (void *)rxq_ctrl); + rte_errno = ENOMEM; goto error; } DEBUG("priv->device_attr.max_qp_wr is %d", @@ -708,6 +710,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq); if (tmpl->wq == NULL) { ERROR("%p: WQ creation failure", (void *)rxq_ctrl); + rte_errno = ENOMEM; goto error; } /* @@ -722,6 +725,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) ((1 << rxq_data->elts_n) >> rxq_data->sges_n), (1 << rxq_data->sges_n), attr.wq.max_wr, attr.wq.max_sge); + rte_errno = EINVAL; goto error; } /* Change queue state to ready. */ @@ -733,6 +737,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (ret) { ERROR("%p: WQ state to IBV_WQS_RDY failed", (void *)rxq_ctrl); + rte_errno = ret; goto error; } obj.cq.in = tmpl->cq; @@ -740,11 +745,14 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) obj.rwq.in = tmpl->wq; obj.rwq.out = &rwq; ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ); - if (ret != 0) + if (ret) { + rte_errno = ret; goto error; + } if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { ERROR("Wrong MLX5_CQE_SIZE environment variable value: " "it should be set to %u", RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; goto error; } /* Fill the rings. */ @@ -788,6 +796,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return tmpl; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (tmpl->wq) claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); if (tmpl->cq) @@ -797,6 +806,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (tmpl->mr) mlx5_mr_release(tmpl->mr); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ return NULL; } @@ -920,7 +930,7 @@ mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv) * NUMA socket on which memory must be allocated. * * @return - * A DPDK queue object on success. + * A DPDK queue object on success, NULL otherwise and rte_errno is set. */ struct mlx5_rxq_ctrl * mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -942,8 +952,10 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *), 0, socket); - if (!tmpl) + if (!tmpl) { + rte_errno = ENOMEM; return NULL; + } tmpl->socket = socket; if (priv->dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; @@ -973,6 +985,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, 1 << sges_n, dev->data->dev_conf.rxmode.max_rx_pkt_len); + rte_errno = EOVERFLOW; goto error; } } else { @@ -991,6 +1004,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, desc, 1 << tmpl->rxq.sges_n); + rte_errno = EINVAL; goto error; } /* Toggle RX checksum offload if hardware supports it. */ @@ -1045,7 +1059,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, * TX queue index. * * @return - * A pointer to the queue if it exists. + * A pointer to the queue if it exists, NULL otherwise. */ struct mlx5_rxq_ctrl * mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) @@ -1108,7 +1122,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) * TX queue index. * * @return - * 1 if the queue can be released. + * 1 if the queue can be released, negative errno otherwise and rte_errno is + * set. */ int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) @@ -1116,8 +1131,10 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; - if (!(*priv->rxqs)[idx]) - return -1; + if (!(*priv->rxqs)[idx]) { + rte_errno = EINVAL; + return -rte_errno; + } rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1); } @@ -1157,7 +1174,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev) * Number of queues in the array. * * @return - * A new indirection table. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ struct mlx5_ind_table_ibv * mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[], @@ -1174,8 +1191,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[], ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) + queues_n * sizeof(uint16_t), 0); - if (!ind_tbl) + if (!ind_tbl) { + rte_errno = ENOMEM; return NULL; + } for (i = 0; i != queues_n; ++i) { struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); @@ -1195,8 +1214,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[], .ind_tbl = wq, .comp_mask = 0, }); - if (!ind_tbl->ind_table) + if (!ind_tbl->ind_table) { + rte_errno = errno; goto error; + } rte_atomic32_inc(&ind_tbl->refcnt); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, @@ -1321,7 +1342,7 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) * Number of queues. * * @return - * An hash Rx queue on success. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ struct mlx5_hrxq * mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, @@ -1331,13 +1352,16 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, struct mlx5_hrxq *hrxq; struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; + int err; queues_n = hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n); - if (!ind_tbl) + if (!ind_tbl) { + rte_errno = ENOMEM; return NULL; + } qp = mlx5_glue->create_qp_ex (priv->ctx, &(struct ibv_qp_init_attr_ex){ @@ -1355,8 +1379,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, .rwq_ind_tbl = ind_tbl->ind_table, .pd = priv->pd, }); - if (!qp) + if (!qp) { + rte_errno = errno; goto error; + } hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0); if (!hrxq) goto error; @@ -1371,9 +1397,11 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); return hrxq; error: + err = rte_errno; /* Save rte_errno before cleanup. */ mlx5_ind_table_ibv_release(dev, ind_tbl); if (qp) claim_zero(mlx5_glue->destroy_qp(qp)); + rte_errno = err; /* Restore rte_errno. */ return NULL; } diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c index 8db25cff1..6e2d971c7 100644 --- a/drivers/net/mlx5/mlx5_socket.c +++ b/drivers/net/mlx5/mlx5_socket.c @@ -22,7 +22,7 @@ * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_socket_init(struct rte_eth_dev *dev) @@ -41,16 +41,21 @@ mlx5_socket_init(struct rte_eth_dev *dev) */ ret = socket(AF_UNIX, SOCK_STREAM, 0); if (ret < 0) { + rte_errno = errno; WARN("secondary process not supported: %s", strerror(errno)); - return ret; + goto error; } priv->primary_socket = ret; flags = fcntl(priv->primary_socket, F_GETFL, 0); - if (flags == -1) - goto out; + if (flags == -1) { + rte_errno = errno; + goto error; + } ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK); - if (ret < 0) - goto out; + if (ret < 0) { + rte_errno = errno; + goto error; + } snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); ret = stat(sun.sun_path, &file_stat); @@ -59,29 +64,30 @@ mlx5_socket_init(struct rte_eth_dev *dev) ret = bind(priv->primary_socket, (const struct sockaddr *)&sun, sizeof(sun)); if (ret < 0) { + rte_errno = errno; WARN("cannot bind socket, secondary process not supported: %s", strerror(errno)); goto close; } ret = listen(priv->primary_socket, 0); if (ret < 0) { + rte_errno = errno; WARN("Secondary process not supported: %s", strerror(errno)); goto close; } - return ret; + return 0; close: remove(sun.sun_path); -out: +error: claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; - return -(ret); + return -rte_errno; } /** * Un-Initialise the socket to communicate with the secondary process * * @param[in] dev - * Pointer to Ethernet device. */ void mlx5_socket_uninit(struct rte_eth_dev *dev) @@ -131,19 +137,21 @@ mlx5_socket_handle(struct rte_eth_dev *dev) ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1}, sizeof(int)); if (ret < 0) { - WARN("cannot change socket options"); - goto out; + ret = errno; + WARN("cannot change socket options: %s", strerror(rte_errno)); + goto error; } ret = recvmsg(conn_sock, &msg, MSG_WAITALL); if (ret < 0) { - WARN("received an empty message: %s", strerror(errno)); - goto out; + ret = errno; + WARN("received an empty message: %s", strerror(rte_errno)); + goto error; } /* Expect to receive credentials only. */ cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { WARN("no message"); - goto out; + goto error; } if ((cmsg->cmsg_type == SCM_CREDENTIALS) && (cmsg->cmsg_len >= sizeof(*cred))) { @@ -153,13 +161,13 @@ mlx5_socket_handle(struct rte_eth_dev *dev) cmsg = CMSG_NXTHDR(&msg, cmsg); if (cmsg != NULL) { WARN("Message wrongly formatted"); - goto out; + goto error; } /* Make sure all the ancillary data was received and valid. */ if ((cred == NULL) || (cred->uid != getuid()) || (cred->gid != getgid())) { WARN("wrong credentials"); - goto out; + goto error; } /* Set-up the ancillary data. */ cmsg = CMSG_FIRSTHDR(&msg); @@ -172,7 +180,7 @@ mlx5_socket_handle(struct rte_eth_dev *dev) ret = sendmsg(conn_sock, &msg, 0); if (ret < 0) WARN("cannot send response"); -out: +error: close(conn_sock); } @@ -183,7 +191,7 @@ mlx5_socket_handle(struct rte_eth_dev *dev) * Pointer to Ethernet structure. * * @return - * fd on success, negative errno value on failure. + * fd on success, negative errno value otherwise and rte_errno is set. */ int mlx5_socket_connect(struct rte_eth_dev *dev) @@ -192,7 +200,7 @@ mlx5_socket_connect(struct rte_eth_dev *dev) struct sockaddr_un sun = { .sun_family = AF_UNIX, }; - int socket_fd; + int socket_fd = -1; int *fd = NULL; int ret; struct ucred *cred; @@ -212,57 +220,67 @@ mlx5_socket_connect(struct rte_eth_dev *dev) ret = socket(AF_UNIX, SOCK_STREAM, 0); if (ret < 0) { + rte_errno = errno; WARN("cannot connect to primary"); - return ret; + goto error; } socket_fd = ret; snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun)); if (ret < 0) { + rte_errno = errno; WARN("cannot connect to primary"); - goto out; + goto error; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { + rte_errno = EINVAL; DEBUG("cannot get first message"); - goto out; + goto error; } cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_CREDENTIALS; cmsg->cmsg_len = CMSG_LEN(sizeof(*cred)); cred = (struct ucred *)CMSG_DATA(cmsg); if (cred == NULL) { + rte_errno = EINVAL; DEBUG("no credentials received"); - goto out; + goto error; } cred->pid = getpid(); cred->uid = getuid(); cred->gid = getgid(); ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT); if (ret < 0) { + rte_errno = errno; WARN("cannot send credentials to primary: %s", strerror(errno)); - goto out; + goto error; } ret = recvmsg(socket_fd, &msg, MSG_WAITALL); if (ret <= 0) { + rte_errno = errno; WARN("no message from primary: %s", strerror(errno)); - goto out; + goto error; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { + rte_errno = EINVAL; WARN("No file descriptor received"); - goto out; + goto error; } fd = (int *)CMSG_DATA(cmsg); - if (*fd <= 0) { + if (*fd < 0) { WARN("no file descriptor received: %s", strerror(errno)); - ret = *fd; - goto out; + rte_errno = *fd; + goto error; } ret = *fd; -out: close(socket_fd); - return ret; + return 0; +error: + if (socket_fd != -1) + close(socket_fd); + return -rte_errno; } diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 0febed878..06e9a1f19 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -128,7 +128,8 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); * Counters table output buffer. * * @return - * 0 on success and stats is filled, negative on error. + * 0 on success and stats is filled, negative errno value otherwise and + * rte_errno is set. */ static int mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) @@ -140,13 +141,15 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; + int ret; et_stats->cmd = ETHTOOL_GSTATS; et_stats->n_stats = xstats_ctrl->stats_n; ifr.ifr_data = (caddr_t)et_stats; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("unable to read statistic values from device"); - return -1; + return ret; } for (i = 0; i != xstats_n; ++i) { if (mlx5_counters_init[i].ib) { @@ -178,18 +181,21 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) * Pointer to Ethernet device. * * @return - * Number of statistics on success, -1 on error. + * Number of statistics on success, negative errno value otherwise and + * rte_errno is set. */ static int mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) { struct ethtool_drvinfo drvinfo; struct ifreq ifr; + int ret; drvinfo.cmd = ETHTOOL_GDRVINFO; ifr.ifr_data = (caddr_t)&drvinfo; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("unable to query number of statistics"); - return -1; + return ret; } return drvinfo.n_stats; } @@ -211,12 +217,14 @@ mlx5_xstats_init(struct rte_eth_dev *dev) struct ethtool_gstrings *strings = NULL; unsigned int dev_stats_n; unsigned int str_sz; + int ret; - dev_stats_n = mlx5_ethtool_get_stats_n(dev); - if (dev_stats_n < 1) { + ret = mlx5_ethtool_get_stats_n(dev); + if (ret < 0) { WARN("no extended statistics available"); return; } + dev_stats_n = ret; xstats_ctrl->stats_n = dev_stats_n; /* Allocate memory to grab stat names and values. */ str_sz = dev_stats_n * ETH_GSTRING_LEN; @@ -231,7 +239,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev) strings->string_set = ETH_SS_STATS; strings->len = dev_stats_n; ifr.ifr_data = (caddr_t)strings; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("unable to get statistic names"); goto free; } @@ -260,7 +269,9 @@ mlx5_xstats_init(struct rte_eth_dev *dev) } /* Copy to base at first time. */ assert(xstats_n <= MLX5_MAX_XSTATS); - mlx5_read_dev_counters(dev, xstats_ctrl->base); + ret = mlx5_read_dev_counters(dev, xstats_ctrl->base); + if (ret) + ERROR("cannot read device counters: %s", strerror(rte_errno)); free: rte_free(strings); } @@ -277,7 +288,7 @@ mlx5_xstats_init(struct rte_eth_dev *dev) * * @return * Number of extended stats on success and stats is filled, - * negative on error. + * negative on error and rte_errno is set. */ int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, @@ -286,15 +297,15 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, struct priv *priv = dev->data->dev_private; unsigned int i; uint64_t counters[n]; - int ret = 0; if (n >= xstats_n && stats) { struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; + int ret; stats_n = mlx5_ethtool_get_stats_n(dev); if (stats_n < 0) - return -1; + return stats_n; if (xstats_ctrl->stats_n != stats_n) mlx5_xstats_init(dev); ret = mlx5_read_dev_counters(dev, counters); @@ -315,6 +326,10 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, * Pointer to Ethernet device structure. * @param[out] stats * Stats structure output buffer. + * + * @return + * 0 on success and stats is filled, negative errno value otherwise and + * rte_errno is set. */ int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) @@ -419,14 +434,22 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) unsigned int i; unsigned int n = xstats_n; uint64_t counters[n]; + int ret; stats_n = mlx5_ethtool_get_stats_n(dev); - if (stats_n < 0) + if (stats_n < 0) { + ERROR("%p cannot get stats: %s", (void *)dev, + strerror(-stats_n)); return; + } if (xstats_ctrl->stats_n != stats_n) mlx5_xstats_init(dev); - if (mlx5_read_dev_counters(dev, counters) < 0) + ret = mlx5_read_dev_counters(dev, counters); + if (ret) { + ERROR("%p cannot read device counters: %s", (void *)dev, + strerror(rte_errno)); return; + } for (i = 0; i != n; ++i) xstats_ctrl->base[i] = counters[i]; } diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index a3ccebdd6..4e396b7f0 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -37,14 +37,14 @@ mlx5_txq_stop(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_txq_start(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; unsigned int i; - int ret = 0; + int ret; /* Add memory regions to Tx queues. */ for (i = 0; i != priv->txqs_n; ++i) { @@ -62,17 +62,19 @@ mlx5_txq_start(struct rte_eth_dev *dev) txq_alloc_elts(txq_ctrl); txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i); if (!txq_ctrl->ibv) { - ret = ENOMEM; + rte_errno = ENOMEM; goto error; } } ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd); if (ret) goto error; - return ret; + return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ mlx5_txq_stop(dev); - return ret; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -98,7 +100,7 @@ mlx5_rxq_stop(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_rxq_start(struct rte_eth_dev *dev) @@ -116,15 +118,15 @@ mlx5_rxq_start(struct rte_eth_dev *dev) if (ret) goto error; rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i); - if (!rxq_ctrl->ibv) { - ret = ENOMEM; + if (!rxq_ctrl->ibv) goto error; - } } - return -ret; + return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ mlx5_rxq_stop(dev); - return -ret; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -136,48 +138,48 @@ mlx5_rxq_start(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_start(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr = NULL; - int err; + int ret; dev->data->dev_started = 1; - err = mlx5_flow_create_drop_queue(dev); - if (err) { + ret = mlx5_flow_create_drop_queue(dev); + if (ret) { ERROR("%p: Drop queue allocation failed: %s", - (void *)dev, strerror(err)); + (void *)dev, strerror(rte_errno)); goto error; } DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); rte_mempool_walk(mlx5_mp2mr_iter, priv); - err = mlx5_txq_start(dev); - if (err) { - ERROR("%p: TXQ allocation failed: %s", - (void *)dev, strerror(err)); + ret = mlx5_txq_start(dev); + if (ret) { + ERROR("%p: Tx Queue allocation failed: %s", + (void *)dev, strerror(rte_errno)); goto error; } - err = mlx5_rxq_start(dev); - if (err) { - ERROR("%p: RXQ allocation failed: %s", - (void *)dev, strerror(err)); + ret = mlx5_rxq_start(dev); + if (ret) { + ERROR("%p: Rx Queue allocation failed: %s", + (void *)dev, strerror(rte_errno)); goto error; } - err = mlx5_rx_intr_vec_enable(dev); - if (err) { - ERROR("%p: RX interrupt vector creation failed", - (void *)priv); + ret = mlx5_rx_intr_vec_enable(dev); + if (ret) { + ERROR("%p: Rx interrupt vector creation failed", + (void *)dev); goto error; } mlx5_xstats_init(dev); /* Update link status and Tx/Rx callbacks for the first time. */ memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); INFO("Forcing port %u link to be up", dev->data->port_id); - err = mlx5_force_link_status_change(dev, ETH_LINK_UP); - if (err) { + ret = mlx5_force_link_status_change(dev, ETH_LINK_UP); + if (ret) { DEBUG("Failed to set port %u link to be up", dev->data->port_id); goto error; @@ -185,6 +187,7 @@ mlx5_dev_start(struct rte_eth_dev *dev) mlx5_dev_interrupt_handler_install(dev); return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ /* Rollback. */ dev->data->dev_started = 0; for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) @@ -194,7 +197,8 @@ mlx5_dev_start(struct rte_eth_dev *dev) mlx5_txq_stop(dev); mlx5_rxq_stop(dev); mlx5_flow_delete_drop_queue(dev); - return err; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -238,7 +242,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_traffic_enable(struct rte_eth_dev *dev) @@ -276,8 +280,9 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) .type = 0, }; - claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc)); - return 0; + ret = mlx5_ctrl_flow(dev, &promisc, &promisc); + if (ret) + goto error; } if (dev->data->all_multicast) { struct rte_flow_item_eth multicast = { @@ -286,7 +291,9 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) .type = 0, }; - claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast)); + ret = mlx5_ctrl_flow(dev, &multicast, &multicast); + if (ret) + goto error; } else { /* Add broadcast/multicast flows. */ for (i = 0; i != vlan_filter_n; ++i) { @@ -346,15 +353,17 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) goto error; } if (!vlan_filter_n) { - ret = mlx5_ctrl_flow(dev, &unicast, - &unicast_mask); + ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask); if (ret) goto error; } } return 0; error: - return rte_errno; + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_flow_list_flush(dev, &priv->ctrl_flows); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } @@ -379,14 +388,14 @@ mlx5_traffic_disable(struct rte_eth_dev *dev) * Pointer to Ethernet device private data. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_traffic_restart(struct rte_eth_dev *dev) { if (dev->data->dev_started) { mlx5_traffic_disable(dev); - mlx5_traffic_enable(dev); + return mlx5_traffic_enable(dev); } return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index dbf743397..4719fdc21 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -161,7 +161,7 @@ mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) * Thresholds parameters. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -171,7 +171,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_txq_data *txq = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - int ret = 0; /* * Don't verify port offloads for application which @@ -179,13 +178,13 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, */ if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) { - ret = ENOTSUP; + rte_errno = ENOTSUP; ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port " "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, (void *)dev, conf->offloads, dev->data->dev_conf.txmode.offloads, mlx5_get_tx_port_offloads(dev)); - goto out; + return -rte_errno; } if (desc <= MLX5_TX_COMP_THRESH) { WARN("%p: number of descriptors requested for TX queue %u" @@ -205,27 +204,26 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (idx >= priv->txqs_n) { ERROR("%p: queue index out of range (%u >= %u)", (void *)dev, idx, priv->txqs_n); - return -EOVERFLOW; + rte_errno = EOVERFLOW; + return -rte_errno; } if (!mlx5_txq_releasable(dev, idx)) { - ret = EBUSY; + rte_errno = EBUSY; ERROR("%p: unable to release queue index %u", (void *)dev, idx); - goto out; + return -rte_errno; } mlx5_txq_release(dev, idx); txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf); if (!txq_ctrl) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); - ret = ENOMEM; - goto out; + return -rte_errno; } DEBUG("%p: adding TX queue %p to list", (void *)dev, (void *)txq_ctrl); (*priv->txqs)[idx] = &txq_ctrl->txq; -out: - return -ret; + return 0; } /** @@ -248,9 +246,9 @@ mlx5_tx_queue_release(void *dpdk_txq) priv = txq_ctrl->priv; for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { + mlx5_txq_release(priv->dev, i); DEBUG("%p: removing TX queue %p from list", (void *)priv->dev, (void *)txq_ctrl); - mlx5_txq_release(priv->dev, i); break; } } @@ -267,7 +265,7 @@ mlx5_tx_queue_release(void *dpdk_txq) * Verbs file descriptor to map UAR pages. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) @@ -284,7 +282,6 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) struct mlx5_txq_ctrl *txq_ctrl; int already_mapped; size_t page_size = sysconf(_SC_PAGESIZE); - int r; memset(pages, 0, priv->txqs_n * sizeof(uintptr_t)); /* @@ -323,8 +320,8 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) /* fixed mmap have to return same address */ ERROR("call to mmap failed on UAR for txq %d\n", i); - r = ENXIO; - return r; + rte_errno = ENXIO; + return -rte_errno; } } if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */ @@ -364,7 +361,7 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) * Queue index in DPDK Rx queue array * * @return - * The Verbs object initialised if it can be created. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ struct mlx5_txq_ibv * mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) @@ -394,7 +391,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) priv->verbs_alloc_ctx.obj = txq_ctrl; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set"); - goto error; + rte_errno = EINVAL; + return NULL; } memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv)); /* MRs will be registered in mp2mr[] later. */ @@ -408,6 +406,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0); if (tmpl.cq == NULL) { ERROR("%p: CQ creation failure", (void *)txq_ctrl); + rte_errno = errno; goto error; } attr.init = (struct ibv_qp_init_attr_ex){ @@ -449,6 +448,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init); if (tmpl.qp == NULL) { ERROR("%p: QP creation failure", (void *)txq_ctrl); + rte_errno = errno; goto error; } attr.mod = (struct ibv_qp_attr){ @@ -461,6 +461,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) (IBV_QP_STATE | IBV_QP_PORT)); if (ret) { ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl); + rte_errno = errno; goto error; } attr.mod = (struct ibv_qp_attr){ @@ -469,18 +470,21 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl); + rte_errno = errno; goto error; } attr.mod.qp_state = IBV_QPS_RTS; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl); + rte_errno = errno; goto error; } txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0, txq_ctrl->socket); if (!txq_ibv) { ERROR("%p: cannot allocate memory", (void *)txq_ctrl); + rte_errno = ENOMEM; goto error; } obj.cq.in = tmpl.cq; @@ -488,11 +492,14 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) obj.qp.in = tmpl.qp; obj.qp.out = &qp; ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); - if (ret != 0) + if (ret != 0) { + rte_errno = errno; goto error; + } if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { ERROR("Wrong MLX5_CQE_SIZE environment variable value: " "it should be set to %u", RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; goto error; } txq_data->cqe_n = log2above(cq_info.cqe_cnt); @@ -518,6 +525,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; } else { ERROR("Failed to retrieve UAR info, invalid libmlx5.so version"); + rte_errno = EINVAL; goto error; } DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev, @@ -526,11 +534,13 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return txq_ibv; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (tmpl.cq) claim_zero(mlx5_glue->destroy_cq(tmpl.cq)); if (tmpl.qp) claim_zero(mlx5_glue->destroy_qp(tmpl.qp)); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ return NULL; } @@ -743,7 +753,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) * Thresholds parameters. * * @return - * A DPDK queue object on success. + * A DPDK queue object on success, NULL otherwise and rte_errno is set. */ struct mlx5_txq_ctrl * mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -756,8 +766,10 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, sizeof(*tmpl) + desc * sizeof(struct rte_mbuf *), 0, socket); - if (!tmpl) + if (!tmpl) { + rte_errno = ENOMEM; return NULL; + } assert(desc > MLX5_TX_COMP_THRESH); tmpl->txq.offloads = conf->offloads; tmpl->priv = priv; diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 3df962a90..3246c0a38 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -37,14 +37,13 @@ * Toggle filter. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct priv *priv = dev->data->dev_private; unsigned int i; - int ret = 0; DEBUG("%p: %s VLAN filter ID %" PRIu16, (void *)dev, (on ? "enable" : "disable"), vlan_id); @@ -54,8 +53,8 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) break; /* Check if there's room for another VLAN filter. */ if (i == RTE_DIM(priv->vlan_filter)) { - ret = -ENOMEM; - goto out; + rte_errno = ENOMEM; + return -rte_errno; } if (i < priv->vlan_filter_n) { assert(priv->vlan_filter_n != 0); @@ -78,10 +77,10 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) priv->vlan_filter[priv->vlan_filter_n] = vlan_id; ++priv->vlan_filter_n; } - if (dev->data->dev_started) - mlx5_traffic_restart(dev); out: - return ret; + if (dev->data->dev_started) + return mlx5_traffic_restart(dev); + return 0; } /** @@ -105,7 +104,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) uint16_t vlan_offloads = (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) | 0; - int err; + int ret; /* Validate hw support */ if (!priv->config.hw_vlan_strip) { @@ -129,10 +128,10 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING, .flags = vlan_offloads, }; - err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); - if (err) { + ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); + if (ret) { ERROR("%p: failed to modified stripping mode: %s", - (void *)dev, strerror(err)); + (void *)dev, strerror(rte_errno)); return; } /* Update related bits in RX queue. */ @@ -146,6 +145,9 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) * Pointer to Ethernet device structure. * @param mask * VLAN offload bit mask. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v3 00/10] net/mlx5: clean driver 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (9 preceding siblings ...) 2018-02-28 15:12 ` [PATCH v2 10/10] net/mlx5: standardize on negative errno values Nelio Laranjeiro @ 2018-03-05 12:20 ` Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 01/10] net/mlx5: fix sriov flag Nelio Laranjeiro ` (10 more replies) 10 siblings, 11 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:20 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh - Removes unused SR-IOV flag. - Adds missing documentation on some functions. - Removes the spin-lock on the private structure. - Standardize the return values of all functions as discussed on the mailing list [1]. [1] https://dpdk.org/ml/archives/dev/2018-January/087991.html Changes in v2: - fix a segfault in Tx queue release. Nelio Laranjeiro (10): net/mlx5: fix sriov flag net/mlx5: name parameters in function prototypes net/mlx5: mark parameters with unused attribute net/mlx5: normalize function prototypes net/mlx5: add missing function documentation net/mlx5: remove useless empty lines net/mlx5: remove control path locks net/mlx5: prefix all function with mlx5 net/mlx5: change non failing function return values net/mlx5: standardize on negative errno values drivers/net/mlx5/mlx5.c | 236 ++++++-------- drivers/net/mlx5/mlx5.h | 240 ++++++-------- drivers/net/mlx5/mlx5_ethdev.c | 611 +++++++++++++++-------------------- drivers/net/mlx5/mlx5_flow.c | 664 ++++++++++++++++++++------------------- drivers/net/mlx5/mlx5_mac.c | 42 ++- drivers/net/mlx5/mlx5_mr.c | 130 ++++---- drivers/net/mlx5/mlx5_rss.c | 159 ++++------ drivers/net/mlx5/mlx5_rxmode.c | 28 +- drivers/net/mlx5/mlx5_rxq.c | 488 ++++++++++++++-------------- drivers/net/mlx5/mlx5_rxtx.c | 49 ++- drivers/net/mlx5/mlx5_rxtx.h | 161 +++++----- drivers/net/mlx5/mlx5_rxtx_vec.c | 25 +- drivers/net/mlx5/mlx5_socket.c | 115 ++++--- drivers/net/mlx5/mlx5_stats.c | 189 +++++------ drivers/net/mlx5/mlx5_trigger.c | 234 +++++++------- drivers/net/mlx5/mlx5_txq.c | 229 +++++++------- drivers/net/mlx5/mlx5_vlan.c | 93 ++---- 17 files changed, 1761 insertions(+), 1932 deletions(-) -- 2.11.0 ^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH v3 01/10] net/mlx5: fix sriov flag 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro @ 2018-03-05 12:20 ` Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 02/10] net/mlx5: name parameters in function prototypes Nelio Laranjeiro ` (9 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:20 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh priv_get_num_vfs() was used to help the PMD in prefetching the mbuf in datapath when the PMD was behaving in VF mode. This knowledge is no more used. Fixes: 528a9fbec6de ("net/mlx5: support ConnectX-5 devices") Cc: yskoh@mellanox.com Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 18 ++---------------- drivers/net/mlx5/mlx5.h | 2 -- drivers/net/mlx5/mlx5_ethdev.c | 37 ------------------------------------- 3 files changed, 2 insertions(+), 55 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 61cb93101..7e8a214ce 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -578,7 +578,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) int err = 0; struct ibv_context *attr_ctx = NULL; struct ibv_device_attr_ex device_attr; - unsigned int sriov; unsigned int mps; unsigned int cqe_comp; unsigned int tunnel_en = 0; @@ -625,18 +624,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) (pci_dev->addr.devid != pci_addr.devid) || (pci_dev->addr.function != pci_addr.function)) continue; - sriov = ((pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)); - INFO("PCI information matches, using device \"%s\"" - " (SR-IOV: %s)", - list[i]->name, - sriov ? "true" : "false"); + INFO("PCI information matches, using device \"%s\"", + list[i]->name); attr_ctx = mlx5_glue->open_device(list[i]); err = errno; break; @@ -709,7 +698,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct rte_eth_dev *eth_dev; struct ibv_device_attr_ex device_attr_ex; struct ether_addr mac; - uint16_t num_vfs = 0; struct ibv_device_attr_ex device_attr; struct mlx5_dev_config config = { .cqe_comp = cqe_comp, @@ -870,8 +858,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) DEBUG("hardware RX end alignment padding is %ssupported", (config.hw_padding ? "" : "not ")); - priv_get_num_vfs(priv, &num_vfs); - config.sriov = (num_vfs || sriov); config.tso = ((device_attr_ex.tso_caps.max_tso > 0) && (device_attr_ex.tso_caps.supported_qpts & (1 << IBV_QPT_RAW_PACKET))); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 9ad0533fc..5e90d99cc 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -78,7 +78,6 @@ struct mlx5_dev_config { unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */ unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ unsigned int hw_padding:1; /* End alignment padding is supported. */ - unsigned int sriov:1; /* This is a VF or PF with VF devices. */ unsigned int mps:2; /* Multi-packet send supported mode. */ unsigned int tunnel_en:1; /* Whether tunnel stateless offloads are supported. */ @@ -209,7 +208,6 @@ struct priv *mlx5_get_priv(struct rte_eth_dev *dev); int mlx5_is_secondary(void); int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]); int priv_ifreq(const struct priv *, int req, struct ifreq *); -int priv_get_num_vfs(struct priv *, uint16_t *); int priv_get_mtu(struct priv *, uint16_t *); int priv_set_flags(struct priv *, unsigned int, unsigned int); int mlx5_dev_configure(struct rte_eth_dev *); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index b73cb53df..f98fc4c3b 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -201,43 +201,6 @@ priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) } /** - * Return the number of active VFs for the current device. - * - * @param[in] priv - * Pointer to private structure. - * @param[out] num_vfs - * Number of active VFs. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -int -priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs) -{ - /* The sysfs entry name depends on the operating system. */ - const char **name = (const char *[]){ - "sriov_numvfs", - "mlx5_num_vfs", - NULL, - }; - - do { - int n; - FILE *file; - MKSTR(path, "%s/device/%s", priv->ibdev_path, *name); - - file = fopen(path, "rb"); - if (!file) - continue; - n = fscanf(file, "%" SCNu16, num_vfs); - fclose(file); - if (n == 1) - return 0; - } while (*(++name)); - return -1; -} - -/** * Get device MTU. * * @param priv -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v3 02/10] net/mlx5: name parameters in function prototypes 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 01/10] net/mlx5: fix sriov flag Nelio Laranjeiro @ 2018-03-05 12:20 ` Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 03/10] net/mlx5: mark parameters with unused attribute Nelio Laranjeiro ` (8 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:20 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.h | 191 ++++++++++++++++++++++++------------------- drivers/net/mlx5/mlx5_rxtx.h | 162 ++++++++++++++++++++---------------- 2 files changed, 195 insertions(+), 158 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 5e90d99cc..b65962df9 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -206,113 +206,132 @@ int mlx5_getenv_int(const char *); struct priv *mlx5_get_priv(struct rte_eth_dev *dev); int mlx5_is_secondary(void); -int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]); -int priv_ifreq(const struct priv *, int req, struct ifreq *); -int priv_get_mtu(struct priv *, uint16_t *); -int priv_set_flags(struct priv *, unsigned int, unsigned int); -int mlx5_dev_configure(struct rte_eth_dev *); -void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *); +int priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]); +int priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr); +int priv_get_mtu(struct priv *priv, uint16_t *mtu); +int priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags); +int mlx5_dev_configure(struct rte_eth_dev *dev); +void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info); const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); -int priv_link_update(struct priv *, int); -int priv_force_link_status_change(struct priv *, int); -int mlx5_link_update(struct rte_eth_dev *, int); -int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t); -int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); -int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); -int mlx5_ibv_device_to_pci_addr(const struct ibv_device *, - struct rte_pci_addr *); -void mlx5_dev_link_status_handler(void *); -void mlx5_dev_interrupt_handler(void *); -void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *); -void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *); +int priv_link_update(struct priv *priv, int wait_to_complete); +int priv_force_link_status_change(struct priv *priv, int status); +int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete); +int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); +int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, + struct rte_pci_addr *pci_addr); +void mlx5_dev_link_status_handler(void *arg); +void mlx5_dev_interrupt_handler(void *cb_arg); +void priv_dev_interrupt_handler_uninstall(struct priv *priv, + struct rte_eth_dev *dev); +void priv_dev_interrupt_handler_install(struct priv *priv, + struct rte_eth_dev *dev); int mlx5_set_link_down(struct rte_eth_dev *dev); int mlx5_set_link_up(struct rte_eth_dev *dev); +eth_tx_burst_t priv_select_tx_function(struct priv *priv, + struct rte_eth_dev *dev); +eth_rx_burst_t priv_select_rx_function(struct priv *priv, + struct rte_eth_dev *dev); int mlx5_is_removed(struct rte_eth_dev *dev); -eth_tx_burst_t priv_select_tx_function(struct priv *, struct rte_eth_dev *); -eth_rx_burst_t priv_select_rx_function(struct priv *, struct rte_eth_dev *); /* mlx5_mac.c */ -int priv_get_mac(struct priv *, uint8_t (*)[ETHER_ADDR_LEN]); -void mlx5_mac_addr_remove(struct rte_eth_dev *, uint32_t); -int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t, - uint32_t); -void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *); +int priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]); +void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); +int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index, uint32_t vmdq); +void mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); /* mlx5_rss.c */ -int mlx5_rss_hash_update(struct rte_eth_dev *, struct rte_eth_rss_conf *); -int mlx5_rss_hash_conf_get(struct rte_eth_dev *, struct rte_eth_rss_conf *); -int priv_rss_reta_index_resize(struct priv *, unsigned int); -int mlx5_dev_rss_reta_query(struct rte_eth_dev *, - struct rte_eth_rss_reta_entry64 *, uint16_t); -int mlx5_dev_rss_reta_update(struct rte_eth_dev *, - struct rte_eth_rss_reta_entry64 *, uint16_t); +int mlx5_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size); +int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); /* mlx5_rxmode.c */ -void mlx5_promiscuous_enable(struct rte_eth_dev *); -void mlx5_promiscuous_disable(struct rte_eth_dev *); -void mlx5_allmulticast_enable(struct rte_eth_dev *); -void mlx5_allmulticast_disable(struct rte_eth_dev *); +void mlx5_promiscuous_enable(struct rte_eth_dev *dev); +void mlx5_promiscuous_disable(struct rte_eth_dev *dev); +void mlx5_allmulticast_enable(struct rte_eth_dev *dev); +void mlx5_allmulticast_disable(struct rte_eth_dev *dev); /* mlx5_stats.c */ -void priv_xstats_init(struct priv *); -int mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *); -void mlx5_stats_reset(struct rte_eth_dev *); -int mlx5_xstats_get(struct rte_eth_dev *, - struct rte_eth_xstat *, unsigned int); -void mlx5_xstats_reset(struct rte_eth_dev *); -int mlx5_xstats_get_names(struct rte_eth_dev *, - struct rte_eth_xstat_name *, unsigned int); +void priv_xstats_init(struct priv *priv); +int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +void mlx5_stats_reset(struct rte_eth_dev *dev); +int mlx5_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, unsigned int n); +void mlx5_xstats_reset(struct rte_eth_dev *dev); +int mlx5_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int n); /* mlx5_vlan.c */ -int mlx5_vlan_filter_set(struct rte_eth_dev *, uint16_t, int); -int mlx5_vlan_offload_set(struct rte_eth_dev *, int); -void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int); +int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); /* mlx5_trigger.c */ -int mlx5_dev_start(struct rte_eth_dev *); -void mlx5_dev_stop(struct rte_eth_dev *); -int priv_dev_traffic_enable(struct priv *, struct rte_eth_dev *); -int priv_dev_traffic_disable(struct priv *, struct rte_eth_dev *); -int priv_dev_traffic_restart(struct priv *, struct rte_eth_dev *); -int mlx5_traffic_restart(struct rte_eth_dev *); +int mlx5_dev_start(struct rte_eth_dev *dev); +void mlx5_dev_stop(struct rte_eth_dev *dev); +int priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev); +int priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev); +int priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev); +int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ -int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type, - enum rte_filter_op, void *); -int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *, - const struct rte_flow_item [], - const struct rte_flow_action [], - struct rte_flow_error *); -struct rte_flow *mlx5_flow_create(struct rte_eth_dev *, - const struct rte_flow_attr *, - const struct rte_flow_item [], - const struct rte_flow_action [], - struct rte_flow_error *); -int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *, - struct rte_flow_error *); -void priv_flow_flush(struct priv *, struct mlx5_flows *); -int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *); -int mlx5_flow_query(struct rte_eth_dev *, struct rte_flow *, - enum rte_flow_action_type, void *, - struct rte_flow_error *); -int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *); -int priv_flow_start(struct priv *, struct mlx5_flows *); -void priv_flow_stop(struct priv *, struct mlx5_flows *); -int priv_flow_verify(struct priv *); -int mlx5_ctrl_flow_vlan(struct rte_eth_dev *, struct rte_flow_item_eth *, - struct rte_flow_item_eth *, struct rte_flow_item_vlan *, - struct rte_flow_item_vlan *); -int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *, - struct rte_flow_item_eth *); -int priv_flow_create_drop_queue(struct priv *); -void priv_flow_delete_drop_queue(struct priv *); +int mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +void priv_flow_flush(struct priv *priv, struct mlx5_flows *list); +int priv_flow_create_drop_queue(struct priv *priv); +void priv_flow_stop(struct priv *priv, struct mlx5_flows *list); +int priv_flow_start(struct priv *priv, struct mlx5_flows *list); +int priv_flow_verify(struct priv *priv); +int priv_flow_create_drop_queue(struct priv *priv); +void priv_flow_delete_drop_queue(struct priv *priv); +int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask, + struct rte_flow_item_vlan *vlan_spec, + struct rte_flow_item_vlan *vlan_mask); +int mlx5_ctrl_flow(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask); +struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error); +int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + enum rte_flow_action_type action, void *data, + struct rte_flow_error *error); +int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, + struct rte_flow_error *error); +int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); /* mlx5_socket.c */ @@ -323,9 +342,9 @@ int priv_socket_connect(struct priv *priv); /* mlx5_mr.c */ -struct mlx5_mr *priv_mr_new(struct priv *, struct rte_mempool *); -struct mlx5_mr *priv_mr_get(struct priv *, struct rte_mempool *); -int priv_mr_release(struct priv *, struct mlx5_mr *); -int priv_mr_verify(struct priv *); +struct mlx5_mr *priv_mr_new(struct priv *priv, struct rte_mempool *mp); +struct mlx5_mr *priv_mr_get(struct priv *priv, struct rte_mempool *mp); +int priv_mr_release(struct priv *priv, struct mlx5_mr *mr); +int priv_mr_verify(struct priv *priv); #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index d7e890558..d0ec9a214 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -210,97 +210,115 @@ struct mlx5_txq_ctrl { extern uint8_t rss_hash_default_key[]; extern const size_t rss_hash_default_key_len; -void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *); -int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, - const struct rte_eth_rxconf *, struct rte_mempool *); -void mlx5_rx_queue_release(void *); +void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl); +int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +void mlx5_rx_queue_release(void *dpdk_rxq); int priv_rx_intr_vec_enable(struct priv *priv); void priv_rx_intr_vec_disable(struct priv *priv); int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *, uint16_t); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t); -int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *); -int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *); -int mlx5_priv_rxq_ibv_verify(struct priv *); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t, - uint16_t, unsigned int, - const struct rte_eth_rxconf *, - struct rte_mempool *); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t); -int mlx5_priv_rxq_release(struct priv *, uint16_t); -int mlx5_priv_rxq_releasable(struct priv *, uint16_t); -int mlx5_priv_rxq_verify(struct priv *); -int rxq_alloc_elts(struct mlx5_rxq_ctrl *); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *, - uint16_t [], - uint16_t); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *, - uint16_t [], - uint16_t); -int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *); -int mlx5_priv_ind_table_ibv_verify(struct priv *); -struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t, - uint64_t, uint16_t [], uint16_t); -struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t, - uint64_t, uint16_t [], uint16_t); -int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *); -int mlx5_priv_hrxq_ibv_verify(struct priv *); -uint64_t mlx5_priv_get_rx_port_offloads(struct priv *); -uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *); +struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx); +struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx); +int mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_priv_rxq_ibv_releasable(struct priv *priv, + struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_priv_rxq_ibv_verify(struct priv *priv); +struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, + uint16_t desc, + unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *priv, uint16_t idx); +int mlx5_priv_rxq_release(struct priv *priv, uint16_t idx); +int mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx); +int mlx5_priv_rxq_verify(struct priv *priv); +int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); +struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *priv, + uint16_t queues[], + uint16_t queues_n); +struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *priv, + uint16_t queues[], + uint16_t queues_n); +int mlx5_priv_ind_table_ibv_release(struct priv *priv, + struct mlx5_ind_table_ibv *ind_tbl); +int mlx5_priv_ind_table_ibv_verify(struct priv *priv); +struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, + uint8_t rss_key_len, uint64_t hash_fields, + uint16_t queues[], uint16_t queues_n); +struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, + uint8_t rss_key_len, uint64_t hash_fields, + uint16_t queues[], uint16_t queues_n); +int mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq); +int mlx5_priv_hrxq_ibv_verify(struct priv *priv); +uint64_t mlx5_priv_get_rx_port_offloads(struct priv *priv); +uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *priv); /* mlx5_txq.c */ -int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, - const struct rte_eth_txconf *); -void mlx5_tx_queue_release(void *); +int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf); +void mlx5_tx_queue_release(void *dpdk_txq); int priv_tx_uar_remap(struct priv *priv, int fd); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *, uint16_t); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *, uint16_t); -int mlx5_priv_txq_ibv_release(struct priv *, struct mlx5_txq_ibv *); -int mlx5_priv_txq_ibv_releasable(struct priv *, struct mlx5_txq_ibv *); -int mlx5_priv_txq_ibv_verify(struct priv *); -struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *, uint16_t, - uint16_t, unsigned int, - const struct rte_eth_txconf *); -struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *, uint16_t); -int mlx5_priv_txq_release(struct priv *, uint16_t); -int mlx5_priv_txq_releasable(struct priv *, uint16_t); -int mlx5_priv_txq_verify(struct priv *); -void txq_alloc_elts(struct mlx5_txq_ctrl *); -uint64_t mlx5_priv_get_tx_port_offloads(struct priv *); +struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx); +struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx); +int mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv); +int mlx5_priv_txq_ibv_releasable(struct priv *priv, + struct mlx5_txq_ibv *txq_ibv); +int mlx5_priv_txq_ibv_verify(struct priv *priv); +struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *priv, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf); +struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *priv, uint16_t idx); +int mlx5_priv_txq_release(struct priv *priv, uint16_t idx); +int mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx); +int mlx5_priv_txq_verify(struct priv *priv); +void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); +uint64_t mlx5_priv_get_tx_port_offloads(struct priv *priv); /* mlx5_rxtx.c */ extern uint32_t mlx5_ptype_table[]; void mlx5_set_ptype_table(void); -uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); -int mlx5_rx_descriptor_status(void *, uint16_t); -int mlx5_tx_descriptor_status(void *, uint16_t); +uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); +uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); +int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset); /* Vectorized version of mlx5_rxtx.c */ -int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *); -int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *); -int rxq_check_vec_support(struct mlx5_rxq_data *); -int priv_check_vec_rx_support(struct priv *); -uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t); + +int priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev); +int priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev); +int rxq_check_vec_support(struct mlx5_rxq_data *rxq); +int priv_check_vec_rx_support(struct priv *priv); +uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); /* mlx5_mr.c */ -void mlx5_mp2mr_iter(struct rte_mempool *, void *); -struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *, - struct rte_mempool *, unsigned int); -struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *, - unsigned int); +void mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg); +struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, + struct rte_mempool *mp, unsigned int idx); +struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, + struct rte_mempool *mp, + unsigned int idx); #ifndef NDEBUG /** -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v3 03/10] net/mlx5: mark parameters with unused attribute 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 01/10] net/mlx5: fix sriov flag Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 02/10] net/mlx5: name parameters in function prototypes Nelio Laranjeiro @ 2018-03-05 12:20 ` Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 04/10] net/mlx5: normalize function prototypes Nelio Laranjeiro ` (7 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:20 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Replaces all (void)foo; by __rte_unused macro except when variables are under #if statements. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 4 ++-- drivers/net/mlx5/mlx5_ethdev.c | 18 +++++---------- drivers/net/mlx5/mlx5_flow.c | 25 ++++++++------------ drivers/net/mlx5/mlx5_mac.c | 3 +-- drivers/net/mlx5/mlx5_mr.c | 10 +++----- drivers/net/mlx5/mlx5_rxq.c | 4 ++-- drivers/net/mlx5/mlx5_rxtx.c | 51 +++++++++++++++++------------------------ drivers/net/mlx5/mlx5_stats.c | 2 +- drivers/net/mlx5/mlx5_trigger.c | 4 ++-- drivers/net/mlx5/mlx5_txq.c | 19 +++++++-------- 10 files changed, 55 insertions(+), 85 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 7e8a214ce..cdf99b5ad 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -571,7 +571,8 @@ priv_uar_init_secondary(struct priv *priv) * 0 on success, negative errno value on failure. */ static int -mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) { struct ibv_device **list; struct ibv_device *ibv_dev; @@ -588,7 +589,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_counter_set_description cs_desc; #endif - (void)pci_drv; assert(pci_drv == &mlx5_driver); /* Get mlx5_dev[] index. */ idx = mlx5_dev_idx(&pci_dev->addr); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index f98fc4c3b..0c383deba 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -467,11 +467,9 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). */ static int -mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct ethtool_cmd edata = { @@ -483,7 +481,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) /* priv_lock() is not taken to allow concurrent calls. */ - (void)wait_to_complete; if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -533,11 +530,9 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). */ static int -mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; @@ -545,7 +540,6 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) struct rte_eth_link dev_link; uint64_t sc; - (void)wait_to_complete; if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -675,7 +669,7 @@ priv_link_stop(struct priv *priv) * Wait for request completion (ignored). */ int -priv_link_update(struct priv *priv, int wait_to_complete) +priv_link_update(struct priv *priv, int wait_to_complete __rte_unused) { struct rte_eth_dev *dev = priv->dev; struct utsname utsname; @@ -687,9 +681,9 @@ priv_link_update(struct priv *priv, int wait_to_complete) sscanf(utsname.release, "%d.%d.%d", &ver[0], &ver[1], &ver[2]) != 3 || KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) - ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete); + ret = mlx5_link_update_unlocked_gset(dev); else - ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete); + ret = mlx5_link_update_unlocked_gs(dev); /* If lsc interrupt is disabled, should always be ready for traffic. */ if (!dev->data->dev_conf.intr_conf.lsc) { priv_link_start(priv); @@ -741,7 +735,7 @@ priv_force_link_status_change(struct priv *priv, int status) * Wait for request completion (ignored). */ int -mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) { struct priv *priv = dev->data->dev_private; int ret; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 42381c578..bb98fb4c5 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -526,7 +526,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * 0 on success, errno value on failure. */ static int -priv_flow_convert_rss_conf(struct priv *priv, +priv_flow_convert_rss_conf(struct priv *priv __rte_unused, struct mlx5_flow_parse *parser, const struct rte_eth_rss_conf *rss_conf) { @@ -535,7 +535,6 @@ priv_flow_convert_rss_conf(struct priv *priv, * priv_flow_convert_actions() to initialize the parser with the * device default RSS configuration. */ - (void)priv; if (rss_conf) { if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) return EINVAL; @@ -568,13 +567,11 @@ priv_flow_convert_rss_conf(struct priv *priv, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_attributes(struct priv *priv, +priv_flow_convert_attributes(struct priv *priv __rte_unused, const struct rte_flow_attr *attr, struct rte_flow_error *error, - struct mlx5_flow_parse *parser) + struct mlx5_flow_parse *parser __rte_unused) { - (void)priv; - (void)parser; if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -779,7 +776,7 @@ priv_flow_convert_actions(struct priv *priv, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_items_validate(struct priv *priv, +priv_flow_convert_items_validate(struct priv *priv __rte_unused, const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) @@ -787,7 +784,6 @@ priv_flow_convert_items_validate(struct priv *priv, const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; - (void)priv; /* Initialise the offsets to start after verbs attribute. */ for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset = sizeof(struct ibv_flow_attr); @@ -871,14 +867,13 @@ priv_flow_convert_items_validate(struct priv *priv, * A verbs flow attribute on success, NULL otherwise. */ static struct ibv_flow_attr* -priv_flow_convert_allocate(struct priv *priv, +priv_flow_convert_allocate(struct priv *priv __rte_unused, unsigned int priority, unsigned int size, struct rte_flow_error *error) { struct ibv_flow_attr *ibv_attr; - (void)priv; ibv_attr = rte_calloc(__func__, 1, size, 0); if (!ibv_attr) { rte_flow_error_set(error, ENOMEM, @@ -900,7 +895,8 @@ priv_flow_convert_allocate(struct priv *priv, * Internal parser structure. */ static void -priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) +priv_flow_convert_finalise(struct priv *priv __rte_unused, + struct mlx5_flow_parse *parser) { const unsigned int ipv4 = hash_rxq_init[parser->layer].ip_version == MLX5_IPV4; @@ -911,7 +907,6 @@ priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; unsigned int i; - (void)priv; /* Remove any other flow not matching the pattern. */ if (parser->queues_n == 1) { for (i = 0; i != hash_rxq_init_n; ++i) { @@ -2424,11 +2419,10 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; priv_lock(priv); priv_flow_destroy(priv, &priv->flows, flow); priv_unlock(priv); @@ -2443,11 +2437,10 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, */ int mlx5_flow_flush(struct rte_eth_dev *dev, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; priv_lock(priv); priv_flow_flush(priv, &priv->flows); priv_unlock(priv); diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index e8a8d4594..a529dfeac 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -88,12 +88,11 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) */ int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, - uint32_t index, uint32_t vmdq) + uint32_t index, uint32_t vmdq __rte_unused) { unsigned int i; int ret = 0; - (void)vmdq; assert(index < MLX5_MAX_MAC_ADDRESSES); /* First, make sure this address isn't already configured. */ for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) { diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 857dfcd83..38a8e2f40 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -26,15 +26,12 @@ struct mlx5_check_mempool_data { /* Called by mlx5_check_mempool() when iterating the memory chunks. */ static void -mlx5_check_mempool_cb(struct rte_mempool *mp, +mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused, void *opaque, struct rte_mempool_memhdr *memhdr, - unsigned int mem_idx) + unsigned int mem_idx __rte_unused) { struct mlx5_check_mempool_data *data = opaque; - (void)mp; - (void)mem_idx; - /* It already failed, skip the next chunks. */ if (data->ret != 0) return; @@ -336,9 +333,8 @@ priv_mr_get(struct priv *priv, struct rte_mempool *mp) * 0 on success, errno on failure. */ int -priv_mr_release(struct priv *priv, struct mlx5_mr *mr) +priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr) { - (void)priv; assert(mr); DEBUG("Memory Region %p refcnt: %d", (void *)mr, rte_atomic32_read(&mr->refcnt)); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 238fa7e56..8b9cc1dd0 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -910,9 +910,9 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv) * Verbs Rx queue object. */ int -mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, + struct mlx5_rxq_ibv *rxq_ibv) { - (void)priv; assert(rxq_ibv); return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); } diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 049f7e6c1..93d794ede 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1899,11 +1899,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets successfully transmitted (<= pkts_n). */ uint16_t -removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_tx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1924,11 +1923,10 @@ removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets successfully received (<= pkts_n). */ uint16_t -removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_rx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1940,58 +1938,51 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) */ uint16_t __attribute__((weak)) -mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } int __attribute__((weak)) -priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +priv_check_raw_vec_tx_support(struct priv *priv __rte_unused, + struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +priv_check_vec_tx_support(struct priv *priv __rte_unused, + struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq) +rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) { - (void)rxq; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_rx_support(struct priv *priv) +priv_check_vec_rx_support(struct priv *priv __rte_unused) { - (void)priv; return -ENOTSUP; } diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index eb9c65dcc..167e40548 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -488,7 +488,7 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) * Number of xstats names. */ int -mlx5_xstats_get_names(struct rte_eth_dev *dev, +mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, struct rte_eth_xstat_name *xstats_names, unsigned int n) { struct priv *priv = dev->data->dev_private; diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index f5711a998..72e8ff644 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -340,9 +340,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) * 0 on success. */ int -priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) +priv_dev_traffic_disable(struct priv *priv, + struct rte_eth_dev *dev __rte_unused) { - (void)dev; priv_flow_flush(priv, &priv->ctrl_flows); return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index ed1c713ea..071d88a1f 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -578,9 +578,9 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) * 0 on success, errno on failure. */ int -mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused, + struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); @@ -603,9 +603,9 @@ mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) * Verbs Tx queue object. */ int -mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused, + struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); return (rte_atomic32_read(&txq_ibv->refcnt) == 1); } @@ -806,13 +806,10 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) mlx5_priv_txq_ibv_get(priv, idx); for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { - struct mlx5_mr *mr = NULL; - - (void)mr; - if (ctrl->txq.mp2mr[i]) { - mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp); - assert(mr); - } + if (ctrl->txq.mp2mr[i]) + claim_nonzero + (priv_mr_get(priv, + ctrl->txq.mp2mr[i]->mp)); } rte_atomic32_inc(&ctrl->refcnt); DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v3 04/10] net/mlx5: normalize function prototypes 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (2 preceding siblings ...) 2018-03-05 12:20 ` [PATCH v3 03/10] net/mlx5: mark parameters with unused attribute Nelio Laranjeiro @ 2018-03-05 12:21 ` Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 05/10] net/mlx5: add missing function documentation Nelio Laranjeiro ` (6 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:21 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5_flow.c | 2 +- drivers/net/mlx5/mlx5_mr.c | 11 ++++++----- drivers/net/mlx5/mlx5_rxq.c | 16 ++++++++-------- drivers/net/mlx5/mlx5_txq.c | 8 ++++---- 4 files changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index bb98fb4c5..d8d124749 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -866,7 +866,7 @@ priv_flow_convert_items_validate(struct priv *priv __rte_unused, * @return * A verbs flow attribute on success, NULL otherwise. */ -static struct ibv_flow_attr* +static struct ibv_flow_attr * priv_flow_convert_allocate(struct priv *priv __rte_unused, unsigned int priority, unsigned int size, diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 38a8e2f40..4e1495800 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -66,8 +66,9 @@ mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused, * @return * 0 on success (mempool is virtually contiguous), -1 on error. */ -static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, - uintptr_t *end) +static int +mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, + uintptr_t *end) { struct mlx5_check_mempool_data data; @@ -97,7 +98,7 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, * @return * mr on success, NULL on failure. */ -struct mlx5_mr* +struct mlx5_mr * priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, struct rte_mempool *mp, unsigned int idx) { @@ -244,7 +245,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) * @return * The memory region on success. */ -struct mlx5_mr* +struct mlx5_mr * priv_mr_new(struct priv *priv, struct rte_mempool *mp) { const struct rte_memseg *ms = rte_eal_get_physmem_layout(); @@ -304,7 +305,7 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) * @return * The memory region on success. */ -struct mlx5_mr* +struct mlx5_mr * priv_mr_get(struct priv *priv, struct rte_mempool *mp) { struct mlx5_mr *mr; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 8b9cc1dd0..2fc6e08aa 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -601,7 +601,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) * @return * The Verbs object initialised if it can be created. */ -struct mlx5_rxq_ibv* +struct mlx5_rxq_ibv * mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) { struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; @@ -819,7 +819,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) * @return * The Verbs object if it exists. */ -struct mlx5_rxq_ibv* +struct mlx5_rxq_ibv * mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) { struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; @@ -932,7 +932,7 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, * @return * A DPDK queue object on success. */ -struct mlx5_rxq_ctrl* +struct mlx5_rxq_ctrl * mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, struct rte_mempool *mp) @@ -1057,7 +1057,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, * @return * A pointer to the queue if it exists. */ -struct mlx5_rxq_ctrl* +struct mlx5_rxq_ctrl * mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) { struct mlx5_rxq_ctrl *rxq_ctrl = NULL; @@ -1170,7 +1170,7 @@ mlx5_priv_rxq_verify(struct priv *priv) * @return * A new indirection table. */ -struct mlx5_ind_table_ibv* +struct mlx5_ind_table_ibv * mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], uint16_t queues_n) { @@ -1232,7 +1232,7 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], * @return * An indirection table if found. */ -struct mlx5_ind_table_ibv* +struct mlx5_ind_table_ibv * mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], uint16_t queues_n) { @@ -1331,7 +1331,7 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) * @return * An hash Rx queue on success. */ -struct mlx5_hrxq* +struct mlx5_hrxq * mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) { @@ -1400,7 +1400,7 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, * @return * An hash Rx queue on success. */ -struct mlx5_hrxq* +struct mlx5_hrxq * mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) { diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 071d88a1f..1a508a488 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -369,7 +369,7 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) * @return * The Verbs object initialised if it can be created. */ -struct mlx5_txq_ibv* +struct mlx5_txq_ibv * mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) { struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; @@ -547,7 +547,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) * @return * The Verbs object if it exists. */ -struct mlx5_txq_ibv* +struct mlx5_txq_ibv * mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) { struct mlx5_txq_ctrl *txq_ctrl; @@ -749,7 +749,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) * @return * A DPDK queue object on success. */ -struct mlx5_txq_ctrl* +struct mlx5_txq_ctrl * mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf) @@ -794,7 +794,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, * @return * A pointer to the queue if it exists. */ -struct mlx5_txq_ctrl* +struct mlx5_txq_ctrl * mlx5_priv_txq_get(struct priv *priv, uint16_t idx) { struct mlx5_txq_ctrl *ctrl = NULL; -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v3 05/10] net/mlx5: add missing function documentation 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (3 preceding siblings ...) 2018-03-05 12:21 ` [PATCH v3 04/10] net/mlx5: normalize function prototypes Nelio Laranjeiro @ 2018-03-05 12:21 ` Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 06/10] net/mlx5: remove useless empty lines Nelio Laranjeiro ` (5 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:21 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5_ethdev.c | 18 ++++++++++++++++++ drivers/net/mlx5/mlx5_mr.c | 7 +++++-- drivers/net/mlx5/mlx5_rxq.c | 20 ++++++++++++-------- drivers/net/mlx5/mlx5_trigger.c | 30 ++++++++++++++++++++++++++++++ drivers/net/mlx5/mlx5_txq.c | 10 ++++++---- 5 files changed, 71 insertions(+), 14 deletions(-) diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 0c383deba..9bbf1eb7d 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -435,6 +435,15 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) priv_unlock(priv); } +/** + * Get supported packet types. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * A pointer to the supported Packet types array. + */ const uint32_t * mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) { @@ -467,6 +476,9 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. + * + * @return + * 0 on success, -1 on error. */ static int mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) @@ -530,6 +542,9 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. + * + * @return + * 0 on success, -1 on error. */ static int mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) @@ -733,6 +748,9 @@ priv_force_link_status_change(struct priv *priv, int status) * Pointer to Ethernet device structure. * @param wait_to_complete * Wait for request completion (ignored). + * + * @return + * 0 on success, -1 on error. */ int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 4e1495800..8748ddcf5 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -242,6 +242,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) * Pointer to private structure. * @param mp * Pointer to the memory pool to register. + * * @return * The memory region on success. */ @@ -302,6 +303,7 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) * Pointer to private structure. * @param mp * Pointer to the memory pool to register. + * * @return * The memory region on success. */ @@ -352,9 +354,10 @@ priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr) * Verify the flow list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int priv_mr_verify(struct priv *priv) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 2fc6e08aa..6924202cc 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -883,9 +883,10 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) * Verify the Verbs Rx queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_rxq_ibv_verify(struct priv *priv) @@ -1139,9 +1140,10 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) * Verify the Rx Queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_rxq_verify(struct priv *priv) @@ -1293,9 +1295,10 @@ mlx5_priv_ind_table_ibv_release(struct priv *priv, * Verify the Rx Queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_ind_table_ibv_verify(struct priv *priv) @@ -1462,9 +1465,10 @@ mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq) * Verify the Rx Queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_hrxq_ibv_verify(struct priv *priv) diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 72e8ff644..b147fb4f8 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -14,6 +14,12 @@ #include "mlx5_rxtx.h" #include "mlx5_utils.h" +/** + * Stop traffic on Tx queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ static void priv_txq_stop(struct priv *priv) { @@ -23,6 +29,15 @@ priv_txq_stop(struct priv *priv) mlx5_priv_txq_release(priv, i); } +/** + * Start traffic on Tx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno on error. + */ static int priv_txq_start(struct priv *priv) { @@ -58,6 +73,12 @@ priv_txq_start(struct priv *priv) return ret; } +/** + * Stop traffic on Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ static void priv_rxq_stop(struct priv *priv) { @@ -67,6 +88,15 @@ priv_rxq_stop(struct priv *priv) mlx5_priv_rxq_release(priv, i); } +/** + * Start traffic on Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno on error. + */ static int priv_rxq_start(struct priv *priv) { diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 1a508a488..9be707840 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -614,9 +614,10 @@ mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused, * Verify the Verbs Tx queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_txq_ibv_verify(struct priv *priv) @@ -893,9 +894,10 @@ mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) * Verify the Tx Queue list is empty * * @param priv - * Pointer to private structure. + * Pointer to private structure. * - * @return the number of object not released. + * @return + * The number of object not released. */ int mlx5_priv_txq_verify(struct priv *priv) -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v3 06/10] net/mlx5: remove useless empty lines 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (4 preceding siblings ...) 2018-03-05 12:21 ` [PATCH v3 05/10] net/mlx5: add missing function documentation Nelio Laranjeiro @ 2018-03-05 12:21 ` Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 07/10] net/mlx5: remove control path locks Nelio Laranjeiro ` (4 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:21 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Some empty lines have been added in the middle of the code without any reason. This commit removes them. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 22 ---------------------- drivers/net/mlx5/mlx5_ethdev.c | 7 ------- drivers/net/mlx5/mlx5_mr.c | 1 - drivers/net/mlx5/mlx5_rss.c | 2 -- drivers/net/mlx5/mlx5_rxq.c | 1 - drivers/net/mlx5/mlx5_vlan.c | 6 ------ 6 files changed, 39 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index cdf99b5ad..91149ccee 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -597,7 +597,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return -ENOMEM; } DEBUG("using driver device index %d", idx); - /* Save PCI address. */ mlx5_dev[idx].pci_addr = pci_dev->addr; list = mlx5_glue->get_device_list(&i); @@ -644,7 +643,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return -err; } ibv_dev = list[i]; - DEBUG("device opened"); /* * Multi-packet send is supported by ConnectX-4 Lx PF as well @@ -685,7 +683,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) goto error; INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt); - for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) { char name[RTE_ETH_NAME_MAX_LEN]; int len; @@ -716,9 +713,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, pci_dev->addr.devid, pci_dev->addr.function); if (device_attr.orig_attr.phys_port_cnt > 1) snprintf(name + len, sizeof(name), " port %u", i); - mlx5_dev[idx].ports |= test; - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { eth_dev = rte_eth_dev_attach_secondary(name); if (eth_dev == NULL) { @@ -755,15 +750,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, priv_select_tx_function(priv, eth_dev); continue; } - DEBUG("using port %u (%08" PRIx32 ")", port, test); - ctx = mlx5_glue->open_device(ibv_dev); if (ctx == NULL) { err = ENODEV; goto port_error; } - mlx5_glue->query_device_ex(ctx, NULL, &device_attr); /* Check port status. */ err = mlx5_glue->query_port(ctx, port, &port_attr); @@ -771,19 +763,16 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, ERROR("port query failed: %s", strerror(err)); goto port_error; } - if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { ERROR("port %d is not configured in Ethernet mode", port); err = EINVAL; goto port_error; } - if (port_attr.state != IBV_PORT_ACTIVE) DEBUG("port %d is not active: \"%s\" (%d)", port, mlx5_glue->port_state_str(port_attr.state), port_attr.state); - /* Allocate protection domain. */ pd = mlx5_glue->alloc_pd(ctx); if (pd == NULL) { @@ -791,9 +780,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, err = ENOMEM; goto port_error; } - mlx5_dev[idx].ports |= test; - /* from rte_ethdev.c */ priv = rte_zmalloc("ethdev private structure", sizeof(*priv), @@ -803,7 +790,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, err = ENOMEM; goto port_error; } - priv->ctx = ctx; strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path, sizeof(priv->ibdev_path)); @@ -821,7 +807,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, ERROR("ibv_query_device_ex() failed"); goto port_error; } - config.hw_csum = !!(device_attr_ex.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM); DEBUG("checksum offloading is %ssupported", @@ -857,7 +842,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, #endif DEBUG("hardware RX end alignment padding is %ssupported", (config.hw_padding ? "" : "not ")); - config.tso = ((device_attr_ex.tso_caps.max_tso > 0) && (device_attr_ex.tso_caps.supported_qpts & (1 << IBV_QPT_RAW_PACKET))); @@ -906,7 +890,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, /* Get actual MTU if possible. */ priv_get_mtu(priv, &priv->mtu); DEBUG("port %u MTU is %u", priv->port, priv->mtu); - eth_dev = rte_eth_dev_allocate(name); if (eth_dev == NULL) { ERROR("can not allocate rte ethdev"); @@ -929,7 +912,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); TAILQ_INIT(&priv->flows); TAILQ_INIT(&priv->ctrl_flows); - /* Hint libmlx5 to use PMD allocator for data plane resources */ struct mlx5dv_ctx_allocators alctr = { .alloc = &mlx5_alloc_verbs_buf, @@ -946,7 +928,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, /* Store device configuration on private structure. */ priv->config = config; continue; - port_error: if (priv) rte_free(priv); @@ -956,20 +937,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, claim_zero(mlx5_glue->close_device(ctx)); break; } - /* * XXX if something went wrong in the loop above, there is a resource * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as * long as the dpdk does not provide a way to deallocate a ethdev and a * way to enumerate the registered ethdevs to free the previous ones. */ - /* no port found, complain */ if (!mlx5_dev[idx].ports) { err = ENODEV; goto error; } - error: if (attr_ctx) claim_zero(mlx5_glue->close_device(attr_ctx)); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 9bbf1eb7d..5c43755d0 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -403,7 +403,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) char ifname[IF_NAMESIZE]; info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - priv_lock(priv); /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; @@ -492,7 +491,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) int link_speed = 0; /* priv_lock() is not taken to allow concurrent calls. */ - if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -838,7 +836,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) strerror(ret)); goto out; } - fc_conf->autoneg = ethpause.autoneg; if (ethpause.rx_pause && ethpause.tx_pause) fc_conf->mode = RTE_FC_FULL; @@ -849,7 +846,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) else fc_conf->mode = RTE_FC_NONE; ret = 0; - out: priv_unlock(priv); assert(ret >= 0); @@ -890,7 +886,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - priv_lock(priv); if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { ret = errno; @@ -900,7 +895,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) goto out; } ret = 0; - out: priv_unlock(priv); assert(ret >= 0); @@ -1155,7 +1149,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) rte_intr_callback_register(&priv->intr_handle, mlx5_dev_interrupt_handler, dev); } - rc = priv_socket_init(priv); if (!rc && priv->primary_socket) { priv->intr_handle_socket.fd = priv->primary_socket; diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 8748ddcf5..6624d0ffa 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -76,7 +76,6 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data); *start = (uintptr_t)data.start; *end = (uintptr_t)data.end; - return data.ret; } diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index d06b0bee1..8f5c8beff 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -123,7 +123,6 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) return ENOMEM; priv->reta_idx = mem; priv->reta_idx_n = reta_size; - if (old_size < reta_size) memset(&(*priv->reta_idx)[old_size], 0, (reta_size - old_size) * @@ -191,7 +190,6 @@ priv_dev_rss_reta_update(struct priv *priv, ret = priv_rss_reta_index_resize(priv, reta_size); if (ret) return ret; - for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; pos = i % RTE_RETA_GROUP_SIZE; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 6924202cc..320a12be9 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1067,7 +1067,6 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - mlx5_priv_rxq_ibv_get(priv, idx); rte_atomic32_inc(&rxq_ctrl->refcnt); DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 75c345626..85ed546cb 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -120,14 +120,12 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING, .flags = vlan_offloads, }; - err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); if (err) { ERROR("%p: failed to modified stripping mode: %s", (void *)priv, strerror(err)); return; } - /* Update related bits in RX queue. */ rxq->vlan_strip = !!on; } @@ -152,13 +150,11 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ERROR("VLAN stripping is not supported"); return; } - /* Validate queue number */ if (queue >= priv->rxqs_n) { ERROR("VLAN stripping, invalid queue number %d", queue); return; } - priv_lock(priv); priv_vlan_strip_queue_set(priv, queue, on); priv_unlock(priv); @@ -186,13 +182,11 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) ERROR("VLAN stripping is not supported"); return 0; } - /* Run on every RX queue and set/reset VLAN stripping. */ priv_lock(priv); for (i = 0; (i != priv->rxqs_n); i++) priv_vlan_strip_queue_set(priv, i, hw_vlan_strip); priv_unlock(priv); } - return 0; } -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v3 07/10] net/mlx5: remove control path locks 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (5 preceding siblings ...) 2018-03-05 12:21 ` [PATCH v3 06/10] net/mlx5: remove useless empty lines Nelio Laranjeiro @ 2018-03-05 12:21 ` Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 08/10] net/mlx5: prefix all function with mlx5 Nelio Laranjeiro ` (3 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:21 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh In priv struct only the memory region needs to be protected against concurrent access between the control plane and the data plane. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 2 -- drivers/net/mlx5/mlx5.h | 43 +----------------------------- drivers/net/mlx5/mlx5_ethdev.c | 58 +++-------------------------------------- drivers/net/mlx5/mlx5_flow.c | 18 +------------ drivers/net/mlx5/mlx5_mr.c | 4 +-- drivers/net/mlx5/mlx5_rss.c | 8 ------ drivers/net/mlx5/mlx5_rxq.c | 9 ------- drivers/net/mlx5/mlx5_stats.c | 15 +---------- drivers/net/mlx5/mlx5_trigger.c | 7 ----- drivers/net/mlx5/mlx5_txq.c | 5 ---- drivers/net/mlx5/mlx5_vlan.c | 6 ----- 11 files changed, 9 insertions(+), 166 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 91149ccee..872edab9d 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -165,7 +165,6 @@ mlx5_dev_close(struct rte_eth_dev *dev) unsigned int i; int ret; - priv_lock(priv); DEBUG("%p: closing device \"%s\"", (void *)dev, ((priv->ctx != NULL) ? priv->ctx->device->name : "")); @@ -227,7 +226,6 @@ mlx5_dev_close(struct rte_eth_dev *dev) ret = priv_mr_verify(priv); if (ret) WARN("%p: some Memory Region still remain", (void *)priv); - priv_unlock(priv); memset(priv, 0, sizeof(*priv)); } diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index b65962df9..8e021544c 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -148,7 +148,7 @@ struct priv { LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls; uint32_t link_speed_capa; /* Link speed capabilities. */ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ - rte_spinlock_t lock; /* Lock for control functions. */ + rte_spinlock_t mr_lock; /* MR Lock. */ int primary_socket; /* Unix socket for primary process. */ void *uar_base; /* Reserved address space for UAR mapping */ struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */ @@ -157,47 +157,6 @@ struct priv { /* Context for Verbs allocator. */ }; -/** - * Lock private structure to protect it from concurrent access in the - * control path. - * - * @param priv - * Pointer to private structure. - */ -static inline void -priv_lock(struct priv *priv) -{ - rte_spinlock_lock(&priv->lock); -} - -/** - * Try to lock private structure to protect it from concurrent access in the - * control path. - * - * @param priv - * Pointer to private structure. - * - * @return - * 1 if the lock is successfully taken; 0 otherwise. - */ -static inline int -priv_trylock(struct priv *priv) -{ - return rte_spinlock_trylock(&priv->lock); -} - -/** - * Unlock private structure. - * - * @param priv - * Pointer to private structure. - */ -static inline void -priv_unlock(struct priv *priv) -{ - rte_spinlock_unlock(&priv->lock); -} - /* mlx5.c */ int mlx5_getenv_int(const char *); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 5c43755d0..f0defc69d 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -269,18 +269,16 @@ priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) } /** - * Ethernet device configuration. - * - * Prepare the driver for a given number of TX and RX queues. + * DPDK callback for Ethernet device configuration. * * @param dev * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ -static int -dev_configure(struct rte_eth_dev *dev) +int +mlx5_dev_configure(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; unsigned int rxqs_n = dev->data->nb_rx_queues; @@ -362,28 +360,7 @@ dev_configure(struct rte_eth_dev *dev) j = 0; } return 0; -} - -/** - * DPDK callback for Ethernet device configuration. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -mlx5_dev_configure(struct rte_eth_dev *dev) -{ - struct priv *priv = dev->data->dev_private; - int ret; - priv_lock(priv); - ret = dev_configure(dev); - assert(ret >= 0); - priv_unlock(priv); - return -ret; } /** @@ -403,7 +380,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) char ifname[IF_NAMESIZE]; info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - priv_lock(priv); /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; info->max_rx_pktlen = 65536; @@ -431,7 +407,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->hash_key_size = priv->rss_conf.rss_key_len; info->speed_capa = priv->link_speed_capa; info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; - priv_unlock(priv); } /** @@ -490,7 +465,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) struct rte_eth_link dev_link; int link_speed = 0; - /* priv_lock() is not taken to allow concurrent calls. */ if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -756,9 +730,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) struct priv *priv = dev->data->dev_private; int ret; - priv_lock(priv); ret = priv_link_update(priv, wait_to_complete); - priv_unlock(priv); return ret; } @@ -780,7 +752,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) uint16_t kern_mtu; int ret = 0; - priv_lock(priv); ret = priv_get_mtu(priv, &kern_mtu); if (ret) goto out; @@ -795,13 +766,11 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) priv->mtu = mtu; DEBUG("adapter port %u MTU set to %u", priv->port, mtu); } - priv_unlock(priv); return 0; out: ret = errno; WARN("cannot set port %u MTU to %u: %s", priv->port, mtu, strerror(ret)); - priv_unlock(priv); assert(ret >= 0); return -ret; } @@ -828,7 +797,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret; ifr.ifr_data = (void *)ðpause; - priv_lock(priv); if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { ret = errno; WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" @@ -847,7 +815,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) fc_conf->mode = RTE_FC_NONE; ret = 0; out: - priv_unlock(priv); assert(ret >= 0); return -ret; } @@ -886,7 +853,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - priv_lock(priv); if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { ret = errno; WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" @@ -896,7 +862,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) } ret = 0; out: - priv_unlock(priv); assert(ret >= 0); return -ret; } @@ -1039,15 +1004,8 @@ mlx5_dev_link_status_handler(void *arg) struct priv *priv = dev->data->dev_private; int ret; - while (!priv_trylock(priv)) { - /* Alarm is being canceled. */ - if (priv->pending_alarm == 0) - return; - rte_pause(); - } priv->pending_alarm = 0; ret = priv_link_status_update(priv); - priv_unlock(priv); if (!ret) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } @@ -1067,9 +1025,7 @@ mlx5_dev_interrupt_handler(void *cb_arg) struct priv *priv = dev->data->dev_private; uint32_t events; - priv_lock(priv); events = priv_dev_status_handler(priv); - priv_unlock(priv); if (events & (1 << RTE_ETH_EVENT_INTR_LSC)) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); if (events & (1 << RTE_ETH_EVENT_INTR_RMV)) @@ -1088,9 +1044,7 @@ mlx5_dev_handler_socket(void *cb_arg) struct rte_eth_dev *dev = cb_arg; struct priv *priv = dev->data->dev_private; - priv_lock(priv); priv_socket_handle(priv); - priv_unlock(priv); } /** @@ -1190,9 +1144,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; int err; - priv_lock(priv); err = priv_dev_set_link(priv, 0); - priv_unlock(priv); return err; } @@ -1211,9 +1163,7 @@ mlx5_set_link_up(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; int err; - priv_lock(priv); err = priv_dev_set_link(priv, 1); - priv_unlock(priv); return err; } diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index d8d124749..137c34988 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -1911,9 +1911,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev, int ret; struct mlx5_flow_parse parser = { .create = 0, }; - priv_lock(priv); ret = priv_flow_convert(priv, attr, items, actions, error, &parser); - priv_unlock(priv); return ret; } @@ -1933,10 +1931,8 @@ mlx5_flow_create(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; struct rte_flow *flow; - priv_lock(priv); flow = priv_flow_create(priv, &priv->flows, attr, items, actions, error); - priv_unlock(priv); return flow; } @@ -2423,9 +2419,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_lock(priv); priv_flow_destroy(priv, &priv->flows, flow); - priv_unlock(priv); return 0; } @@ -2441,9 +2435,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_lock(priv); priv_flow_flush(priv, &priv->flows); - priv_unlock(priv); return 0; } @@ -2501,16 +2493,14 @@ priv_flow_query_count(struct ibv_counter_set *cs, * @see rte_flow_ops */ int -mlx5_flow_query(struct rte_eth_dev *dev, +mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow, enum rte_flow_action_type action __rte_unused, void *data, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; int res = EINVAL; - priv_lock(priv); if (flow->cs) { res = priv_flow_query_count(flow->cs, &flow->counter_stats, @@ -2522,7 +2512,6 @@ mlx5_flow_query(struct rte_eth_dev *dev, NULL, "no counter found for flow"); } - priv_unlock(priv); return -res; } #endif @@ -2540,13 +2529,11 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_lock(priv); if (dev->data->dev_started) { rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "port must be stopped first"); - priv_unlock(priv); return -rte_errno; } priv->isolated = !!enable; @@ -2554,7 +2541,6 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, priv->dev->dev_ops = &mlx5_dev_ops_isolate; else priv->dev->dev_ops = &mlx5_dev_ops; - priv_unlock(priv); return 0; } @@ -3036,9 +3022,7 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - priv_lock(priv); ret = priv_fdir_ctrl_func(priv, filter_op, arg); - priv_unlock(priv); break; default: ERROR("%p: filter type (%d) not supported", diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 6624d0ffa..3b7b6d140 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -164,9 +164,9 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, container_of(txq, struct mlx5_txq_ctrl, txq); struct mlx5_mr *mr; - priv_lock(txq_ctrl->priv); + rte_spinlock_lock(&txq_ctrl->priv->mr_lock); mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx); - priv_unlock(txq_ctrl->priv); + rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); return mr; } diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index 8f5c8beff..4b88215fb 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -44,7 +44,6 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; int ret = 0; - priv_lock(priv); if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) { ret = -EINVAL; goto out; @@ -62,7 +61,6 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev, } priv->rss_conf.rss_hf = rss_conf->rss_hf; out: - priv_unlock(priv); return ret; } @@ -85,7 +83,6 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, if (!rss_conf) return -EINVAL; - priv_lock(priv); if (rss_conf->rss_key && (rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) { memcpy(rss_conf->rss_key, priv->rss_conf.rss_key, @@ -93,7 +90,6 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, } rss_conf->rss_key_len = priv->rss_conf.rss_key_len; rss_conf->rss_hf = priv->rss_conf.rss_hf; - priv_unlock(priv); return 0; } @@ -222,9 +218,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, int ret; struct priv *priv = dev->data->dev_private; - priv_lock(priv); ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size); - priv_unlock(priv); return -ret; } @@ -249,9 +243,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, int ret; struct priv *priv = dev->data->dev_private; - priv_lock(priv); ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size); - priv_unlock(priv); if (dev->data->dev_started) { mlx5_dev_stop(dev); mlx5_dev_start(dev); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 320a12be9..de3335cb9 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -286,7 +286,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, container_of(rxq, struct mlx5_rxq_ctrl, rxq); int ret = 0; - priv_lock(priv); if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); WARN("%p: increased number of descriptors in RX queue %u" @@ -298,7 +297,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (idx >= priv->rxqs_n) { ERROR("%p: queue index out of range (%u >= %u)", (void *)dev, idx, priv->rxqs_n); - priv_unlock(priv); return -EOVERFLOW; } if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) { @@ -329,7 +327,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, (void *)rxq_ctrl); (*priv->rxqs)[idx] = &rxq_ctrl->rxq; out: - priv_unlock(priv); return -ret; } @@ -350,12 +347,10 @@ mlx5_rx_queue_release(void *dpdk_rxq) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; - priv_lock(priv); if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx)) rte_panic("Rx queue %p is still used by a flow and cannot be" " removed\n", (void *)rxq_ctrl); mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx); - priv_unlock(priv); } /** @@ -512,7 +507,6 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct mlx5_rxq_ctrl *rxq_ctrl; int ret = 0; - priv_lock(priv); rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { ret = EINVAL; @@ -531,7 +525,6 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) mlx5_priv_rxq_ibv_release(priv, rxq_ibv); } exit: - priv_unlock(priv); if (ret) WARN("unable to arm interrupt on rx queue %d", rx_queue_id); return -ret; @@ -559,7 +552,6 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) void *ev_ctx; int ret = 0; - priv_lock(priv); rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { ret = EINVAL; @@ -583,7 +575,6 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) exit: if (rxq_ibv) mlx5_priv_rxq_ibv_release(priv, rxq_ibv); - priv_unlock(priv); if (ret) WARN("unable to disable interrupt on rx queue %d", rx_queue_id); diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 167e40548..39be1865a 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -328,7 +328,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) unsigned int i; unsigned int idx; - priv_lock(priv); /* Add software counters. */ for (i = 0; (i != priv->rxqs_n); ++i) { struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; @@ -374,7 +373,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* FIXME: retrieve and add hardware counters. */ #endif *stats = tmp; - priv_unlock(priv); return 0; } @@ -391,7 +389,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev) unsigned int i; unsigned int idx; - priv_lock(priv); for (i = 0; (i != priv->rxqs_n); ++i) { if ((*priv->rxqs)[i] == NULL) continue; @@ -409,7 +406,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev) #ifndef MLX5_PMD_SOFT_COUNTERS /* FIXME: reset hardware counters. */ #endif - priv_unlock(priv); } /** @@ -436,16 +432,13 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; - priv_lock(priv); stats_n = priv_ethtool_get_stats_n(priv); if (stats_n < 0) { - priv_unlock(priv); return -1; } if (xstats_ctrl->stats_n != stats_n) priv_xstats_init(priv); ret = priv_xstats_get(priv, stats); - priv_unlock(priv); } return ret; } @@ -463,15 +456,12 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; - priv_lock(priv); stats_n = priv_ethtool_get_stats_n(priv); if (stats_n < 0) - goto unlock; + return; if (xstats_ctrl->stats_n != stats_n) priv_xstats_init(priv); priv_xstats_reset(priv); -unlock: - priv_unlock(priv); } /** @@ -491,18 +481,15 @@ int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, struct rte_eth_xstat_name *xstats_names, unsigned int n) { - struct priv *priv = dev->data->dev_private; unsigned int i; if (n >= xstats_n && xstats_names) { - priv_lock(priv); for (i = 0; i != xstats_n; ++i) { strncpy(xstats_names[i].name, mlx5_counters_init[i].dpdk_name, RTE_ETH_XSTATS_NAME_SIZE); xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; } - priv_unlock(priv); } return xstats_n; } diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index b147fb4f8..3ce93910d 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -142,7 +142,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) int err; dev->data->dev_started = 1; - priv_lock(priv); err = priv_flow_create_drop_queue(priv); if (err) { ERROR("%p: Drop queue allocation failed: %s", @@ -180,7 +179,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) goto error; } priv_dev_interrupt_handler_install(priv, dev); - priv_unlock(priv); return 0; error: /* Rollback. */ @@ -192,7 +190,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) priv_txq_stop(priv); priv_rxq_stop(priv); priv_flow_delete_drop_queue(priv); - priv_unlock(priv); return err; } @@ -210,7 +207,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; - priv_lock(priv); dev->data->dev_started = 0; /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; @@ -227,7 +223,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev) for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) priv_mr_release(priv, mr); priv_flow_delete_drop_queue(priv); - priv_unlock(priv); } /** @@ -412,8 +407,6 @@ mlx5_traffic_restart(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - priv_lock(priv); priv_dev_traffic_restart(priv, dev); - priv_unlock(priv); return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 9be707840..47ee95990 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -172,7 +172,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, container_of(txq, struct mlx5_txq_ctrl, txq); int ret = 0; - priv_lock(priv); /* * Don't verify port offloads for application which * use the old API. @@ -205,7 +204,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (idx >= priv->txqs_n) { ERROR("%p: queue index out of range (%u >= %u)", (void *)dev, idx, priv->txqs_n); - priv_unlock(priv); return -EOVERFLOW; } if (!mlx5_priv_txq_releasable(priv, idx)) { @@ -226,7 +224,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, (void *)txq_ctrl); (*priv->txqs)[idx] = &txq_ctrl->txq; out: - priv_unlock(priv); return -ret; } @@ -248,7 +245,6 @@ mlx5_tx_queue_release(void *dpdk_txq) return; txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); priv = txq_ctrl->priv; - priv_lock(priv); for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { DEBUG("%p: removing TX queue %p from list", @@ -256,7 +252,6 @@ mlx5_tx_queue_release(void *dpdk_txq) mlx5_priv_txq_release(priv, i); break; } - priv_unlock(priv); } diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 85ed546cb..184ae2f4e 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -46,7 +46,6 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) unsigned int i; int ret = 0; - priv_lock(priv); DEBUG("%p: %s VLAN filter ID %" PRIu16, (void *)dev, (on ? "enable" : "disable"), vlan_id); assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter)); @@ -82,7 +81,6 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) if (dev->data->dev_started) priv_dev_traffic_restart(priv, dev); out: - priv_unlock(priv); return ret; } @@ -155,9 +153,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ERROR("VLAN stripping, invalid queue number %d", queue); return; } - priv_lock(priv); priv_vlan_strip_queue_set(priv, queue, on); - priv_unlock(priv); } /** @@ -183,10 +179,8 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) return 0; } /* Run on every RX queue and set/reset VLAN stripping. */ - priv_lock(priv); for (i = 0; (i != priv->rxqs_n); i++) priv_vlan_strip_queue_set(priv, i, hw_vlan_strip); - priv_unlock(priv); } return 0; } -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v3 08/10] net/mlx5: prefix all function with mlx5 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (6 preceding siblings ...) 2018-03-05 12:21 ` [PATCH v3 07/10] net/mlx5: remove control path locks Nelio Laranjeiro @ 2018-03-05 12:21 ` Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 09/10] net/mlx5: change non failing function return values Nelio Laranjeiro ` (2 subsequent siblings) 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:21 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh This change removes the need to distinguish unlocked priv_*() functions which are therefore renamed using a mlx5_*() prefix for consistency. At the same time, all functions from mlx5 uses a pointer to the ETH device instead of the one to the PMD private data. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 104 ++++++------ drivers/net/mlx5/mlx5.h | 90 +++++------ drivers/net/mlx5/mlx5_ethdev.c | 288 ++++++++++++++------------------- drivers/net/mlx5/mlx5_flow.c | 342 +++++++++++++++++++-------------------- drivers/net/mlx5/mlx5_mac.c | 8 +- drivers/net/mlx5/mlx5_mr.c | 86 ++++------ drivers/net/mlx5/mlx5_rss.c | 107 ++++-------- drivers/net/mlx5/mlx5_rxq.c | 285 ++++++++++++++++---------------- drivers/net/mlx5/mlx5_rxtx.c | 10 +- drivers/net/mlx5/mlx5_rxtx.h | 115 +++++++------ drivers/net/mlx5/mlx5_rxtx_vec.c | 25 ++- drivers/net/mlx5/mlx5_socket.c | 29 ++-- drivers/net/mlx5/mlx5_stats.c | 135 ++++++---------- drivers/net/mlx5/mlx5_trigger.c | 118 ++++++-------- drivers/net/mlx5/mlx5_txq.c | 134 +++++++-------- drivers/net/mlx5/mlx5_vlan.c | 61 +++---- 16 files changed, 868 insertions(+), 1069 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 872edab9d..b6211e9c1 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -169,8 +169,8 @@ mlx5_dev_close(struct rte_eth_dev *dev) (void *)dev, ((priv->ctx != NULL) ? priv->ctx->device->name : "")); /* In case mlx5_dev_stop() has not been called. */ - priv_dev_interrupt_handler_uninstall(priv, dev); - priv_dev_traffic_disable(priv, dev); + mlx5_dev_interrupt_handler_uninstall(dev); + mlx5_traffic_disable(dev); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; @@ -178,7 +178,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_rx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) - mlx5_priv_rxq_release(priv, i); + mlx5_rxq_release(dev, i); priv->rxqs_n = 0; priv->rxqs = NULL; } @@ -186,7 +186,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_tx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->txqs_n); ++i) - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(dev, i); priv->txqs_n = 0; priv->txqs = NULL; } @@ -201,31 +201,31 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (priv->reta_idx != NULL) rte_free(priv->reta_idx); if (priv->primary_socket) - priv_socket_uninit(priv); - ret = mlx5_priv_hrxq_ibv_verify(priv); + mlx5_socket_uninit(dev); + ret = mlx5_hrxq_ibv_verify(dev); if (ret) - WARN("%p: some Hash Rx queue still remain", (void *)priv); - ret = mlx5_priv_ind_table_ibv_verify(priv); + WARN("%p: some Hash Rx queue still remain", (void *)dev); + ret = mlx5_ind_table_ibv_verify(dev); if (ret) - WARN("%p: some Indirection table still remain", (void *)priv); - ret = mlx5_priv_rxq_ibv_verify(priv); + WARN("%p: some Indirection table still remain", (void *)dev); + ret = mlx5_rxq_ibv_verify(dev); if (ret) - WARN("%p: some Verbs Rx queue still remain", (void *)priv); - ret = mlx5_priv_rxq_verify(priv); + WARN("%p: some Verbs Rx queue still remain", (void *)dev); + ret = mlx5_rxq_verify(dev); if (ret) - WARN("%p: some Rx Queues still remain", (void *)priv); - ret = mlx5_priv_txq_ibv_verify(priv); + WARN("%p: some Rx Queues still remain", (void *)dev); + ret = mlx5_txq_ibv_verify(dev); if (ret) - WARN("%p: some Verbs Tx queue still remain", (void *)priv); - ret = mlx5_priv_txq_verify(priv); + WARN("%p: some Verbs Tx queue still remain", (void *)dev); + ret = mlx5_txq_verify(dev); if (ret) - WARN("%p: some Tx Queues still remain", (void *)priv); - ret = priv_flow_verify(priv); + WARN("%p: some Tx Queues still remain", (void *)dev); + ret = mlx5_flow_verify(dev); if (ret) - WARN("%p: some flows still remain", (void *)priv); - ret = priv_mr_verify(priv); + WARN("%p: some flows still remain", (void *)dev); + ret = mlx5_mr_verify(dev); if (ret) - WARN("%p: some Memory Region still remain", (void *)priv); + WARN("%p: some Memory Region still remain", (void *)dev); memset(priv, 0, sizeof(*priv)); } @@ -466,15 +466,16 @@ static void *uar_base; /** * Reserve UAR address space for primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return * 0 on success, errno value on failure. */ static int -priv_uar_init_primary(struct priv *priv) +mlx5_uar_init_primary(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; void *addr = (void *)0; int i; const struct rte_mem_config *mcfg; @@ -516,15 +517,16 @@ priv_uar_init_primary(struct priv *priv) * Reserve UAR address space for secondary process, align with * primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return * 0 on success, errno value on failure. */ static int -priv_uar_init_secondary(struct priv *priv) +mlx5_uar_init_secondary(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; void *addr; int ret; @@ -690,7 +692,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct ibv_port_attr port_attr; struct ibv_pd *pd = NULL; struct priv *priv = NULL; - struct rte_eth_dev *eth_dev; + struct rte_eth_dev *eth_dev = NULL; struct ibv_device_attr_ex device_attr_ex; struct ether_addr mac; struct ibv_device_attr_ex device_attr; @@ -721,20 +723,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } eth_dev->device = &pci_dev->device; eth_dev->dev_ops = &mlx5_dev_sec_ops; - priv = eth_dev->data->dev_private; - err = priv_uar_init_secondary(priv); + err = mlx5_uar_init_secondary(eth_dev); if (err < 0) { err = -err; goto error; } /* Receive command fd from primary process */ - err = priv_socket_connect(priv); + err = mlx5_socket_connect(eth_dev); if (err < 0) { err = -err; goto error; } /* Remap UAR for Tx queues. */ - err = priv_tx_uar_remap(priv, err); + err = mlx5_tx_uar_remap(eth_dev, err); if (err) goto error; /* @@ -743,9 +744,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, * secondary process. */ eth_dev->rx_pkt_burst = - priv_select_rx_function(priv, eth_dev); + mlx5_select_rx_function(eth_dev); eth_dev->tx_pkt_burst = - priv_select_tx_function(priv, eth_dev); + mlx5_select_tx_function(eth_dev); continue; } DEBUG("using port %u (%08" PRIx32 ")", port, test); @@ -859,11 +860,23 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, WARN("Rx CQE compression isn't supported"); config.cqe_comp = 0; } - err = priv_uar_init_primary(priv); + eth_dev = rte_eth_dev_allocate(name); + if (eth_dev == NULL) { + ERROR("can not allocate rte ethdev"); + err = ENOMEM; + goto port_error; + } + eth_dev->data->dev_private = priv; + priv->dev = eth_dev; + eth_dev->data->mac_addrs = priv->mac; + eth_dev->device = &pci_dev->device; + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->device->driver = &mlx5_driver.driver; + err = mlx5_uar_init_primary(eth_dev); if (err) goto port_error; /* Configure the first MAC address by default. */ - if (priv_get_mac(priv, &mac.addr_bytes)) { + if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { ERROR("cannot get MAC address, is mlx5_en loaded?" " (errno: %s)", strerror(errno)); err = ENODEV; @@ -878,7 +891,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, { char ifname[IF_NAMESIZE]; - if (priv_get_ifname(priv, &ifname) == 0) + if (mlx5_get_ifname(eth_dev, &ifname) == 0) DEBUG("port %u ifname is \"%s\"", priv->port, ifname); else @@ -886,25 +899,13 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } #endif /* Get actual MTU if possible. */ - priv_get_mtu(priv, &priv->mtu); + mlx5_get_mtu(eth_dev, &priv->mtu); DEBUG("port %u MTU is %u", priv->port, priv->mtu); - eth_dev = rte_eth_dev_allocate(name); - if (eth_dev == NULL) { - ERROR("can not allocate rte ethdev"); - err = ENOMEM; - goto port_error; - } - eth_dev->data->dev_private = priv; - eth_dev->data->mac_addrs = priv->mac; - eth_dev->device = &pci_dev->device; - rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->device->driver = &mlx5_driver.driver; /* * Initialize burst functions to prevent crashes before link-up. */ eth_dev->rx_pkt_burst = removed_rx_burst; eth_dev->tx_pkt_burst = removed_tx_burst; - priv->dev = eth_dev; eth_dev->dev_ops = &mlx5_dev_ops; /* Register MAC address. */ claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); @@ -919,10 +920,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS, (void *)((uintptr_t)&alctr)); - /* Bring Ethernet device up. */ DEBUG("forcing Ethernet interface up"); - priv_set_flags(priv, ~IFF_UP, IFF_UP); + mlx5_set_flags(eth_dev, ~IFF_UP, IFF_UP); /* Store device configuration on private structure. */ priv->config = config; continue; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 8e021544c..2cb463b62 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -163,18 +163,16 @@ int mlx5_getenv_int(const char *); /* mlx5_ethdev.c */ -struct priv *mlx5_get_priv(struct rte_eth_dev *dev); -int mlx5_is_secondary(void); -int priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]); -int priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr); -int priv_get_mtu(struct priv *priv, uint16_t *mtu); -int priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags); +int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); +int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr); +int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu); +int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, + unsigned int flags); int mlx5_dev_configure(struct rte_eth_dev *dev); void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info); const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); -int priv_link_update(struct priv *priv, int wait_to_complete); -int priv_force_link_status_change(struct priv *priv, int status); int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete); +int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status); int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); @@ -183,22 +181,18 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, struct rte_pci_addr *pci_addr); void mlx5_dev_link_status_handler(void *arg); -void mlx5_dev_interrupt_handler(void *cb_arg); -void priv_dev_interrupt_handler_uninstall(struct priv *priv, - struct rte_eth_dev *dev); -void priv_dev_interrupt_handler_install(struct priv *priv, - struct rte_eth_dev *dev); +void mlx5_dev_interrupt_handler(void *arg); +void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev); +void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev); int mlx5_set_link_down(struct rte_eth_dev *dev); int mlx5_set_link_up(struct rte_eth_dev *dev); -eth_tx_burst_t priv_select_tx_function(struct priv *priv, - struct rte_eth_dev *dev); -eth_rx_burst_t priv_select_rx_function(struct priv *priv, - struct rte_eth_dev *dev); int mlx5_is_removed(struct rte_eth_dev *dev); +eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); +eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev); /* mlx5_mac.c */ -int priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]); +int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]); void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, uint32_t index, uint32_t vmdq); @@ -210,7 +204,7 @@ int mlx5_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); -int priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size); +int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size); int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); @@ -227,13 +221,13 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *dev); /* mlx5_stats.c */ -void priv_xstats_init(struct priv *priv); +void mlx5_xstats_init(struct rte_eth_dev *dev); int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); void mlx5_stats_reset(struct rte_eth_dev *dev); -int mlx5_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstat *stats, unsigned int n); +int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n); void mlx5_xstats_reset(struct rte_eth_dev *dev); -int mlx5_xstats_get_names(struct rte_eth_dev *dev, +int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, struct rte_eth_xstat_name *xstats_names, unsigned int n); @@ -247,9 +241,8 @@ int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); int mlx5_dev_start(struct rte_eth_dev *dev); void mlx5_dev_stop(struct rte_eth_dev *dev); -int priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev); -int priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev); -int priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev); +int mlx5_traffic_enable(struct rte_eth_dev *dev); +int mlx5_traffic_disable(struct rte_eth_dev *dev); int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ @@ -259,21 +252,6 @@ int mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_item items[], const struct rte_flow_action actions[], struct rte_flow_error *error); -void priv_flow_flush(struct priv *priv, struct mlx5_flows *list); -int priv_flow_create_drop_queue(struct priv *priv); -void priv_flow_stop(struct priv *priv, struct mlx5_flows *list); -int priv_flow_start(struct priv *priv, struct mlx5_flows *list); -int priv_flow_verify(struct priv *priv); -int priv_flow_create_drop_queue(struct priv *priv); -void priv_flow_delete_drop_queue(struct priv *priv); -int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, - struct rte_flow_item_eth *eth_spec, - struct rte_flow_item_eth *eth_mask, - struct rte_flow_item_vlan *vlan_spec, - struct rte_flow_item_vlan *vlan_mask); -int mlx5_ctrl_flow(struct rte_eth_dev *dev, - struct rte_flow_item_eth *eth_spec, - struct rte_flow_item_eth *eth_mask); struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], @@ -281,6 +259,7 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, struct rte_flow_error *error); int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error); +void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list); int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, enum rte_flow_action_type action, void *data, @@ -291,19 +270,32 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg); +int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list); +void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list); +int mlx5_flow_verify(struct rte_eth_dev *dev); +int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask, + struct rte_flow_item_vlan *vlan_spec, + struct rte_flow_item_vlan *vlan_mask); +int mlx5_ctrl_flow(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask); +int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev); +void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev); /* mlx5_socket.c */ -int priv_socket_init(struct priv *priv); -int priv_socket_uninit(struct priv *priv); -void priv_socket_handle(struct priv *priv); -int priv_socket_connect(struct priv *priv); +int mlx5_socket_init(struct rte_eth_dev *priv); +int mlx5_socket_uninit(struct rte_eth_dev *priv); +void mlx5_socket_handle(struct rte_eth_dev *priv); +int mlx5_socket_connect(struct rte_eth_dev *priv); /* mlx5_mr.c */ -struct mlx5_mr *priv_mr_new(struct priv *priv, struct rte_mempool *mp); -struct mlx5_mr *priv_mr_get(struct priv *priv, struct rte_mempool *mp); -int priv_mr_release(struct priv *priv, struct mlx5_mr *mr); -int priv_mr_verify(struct priv *priv); +struct mlx5_mr *mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp); +struct mlx5_mr *mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp); +int mlx5_mr_release(struct mlx5_mr *mr); +int mlx5_mr_verify(struct rte_eth_dev *dev); #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index f0defc69d..1fde3d842 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -95,8 +95,8 @@ struct ethtool_link_settings { /** * Get interface name from private structure. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param[out] ifname * Interface name output buffer. * @@ -104,8 +104,9 @@ struct ethtool_link_settings { * 0 on success, -1 on failure and errno is set. */ int -priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) +mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) { + struct priv *priv = dev->data->dev_private; DIR *dir; struct dirent *dent; unsigned int dev_type = 0; @@ -176,8 +177,8 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) /** * Perform ifreq ioctl() on associated Ethernet device. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param req * Request number to pass to ioctl(). * @param[out] ifr @@ -187,14 +188,14 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) * 0 on success, -1 on failure and errno is set. */ int -priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) +mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) { int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); int ret = -1; if (sock == -1) return ret; - if (priv_get_ifname(priv, &ifr->ifr_name) == 0) + if (mlx5_get_ifname(dev, &ifr->ifr_name) == 0) ret = ioctl(sock, req, ifr); close(sock); return ret; @@ -203,8 +204,8 @@ priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) /** * Get device MTU. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] mtu * MTU value output buffer. * @@ -212,10 +213,10 @@ priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) * 0 on success, -1 on failure and errno is set. */ int -priv_get_mtu(struct priv *priv, uint16_t *mtu) +mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) { struct ifreq request; - int ret = priv_ifreq(priv, SIOCGIFMTU, &request); + int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); if (ret) return ret; @@ -226,8 +227,8 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu) /** * Set device MTU. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param mtu * MTU value to set. * @@ -235,18 +236,18 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu) * 0 on success, -1 on failure and errno is set. */ static int -priv_set_mtu(struct priv *priv, uint16_t mtu) +mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct ifreq request = { .ifr_mtu = mtu, }; - return priv_ifreq(priv, SIOCSIFMTU, &request); + return mlx5_ifreq(dev, SIOCSIFMTU, &request); } /** * Set device flags. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param keep * Bitmask for flags that must remain untouched. * @param flags @@ -256,16 +257,16 @@ priv_set_mtu(struct priv *priv, uint16_t mtu) * 0 on success, -1 on failure and errno is set. */ int -priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) +mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) { struct ifreq request; - int ret = priv_ifreq(priv, SIOCGIFFLAGS, &request); + int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); if (ret) return ret; request.ifr_flags &= keep; request.ifr_flags |= flags & ~keep; - return priv_ifreq(priv, SIOCSIFFLAGS, &request); + return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); } /** @@ -288,11 +289,11 @@ mlx5_dev_configure(struct rte_eth_dev *dev) unsigned int reta_idx_n; const uint8_t use_app_rss_key = !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; - uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv); + uint64_t supp_tx_offloads = mlx5_get_tx_port_offloads(dev); uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; uint64_t supp_rx_offloads = - (mlx5_priv_get_rx_port_offloads(priv) | - mlx5_priv_get_rx_queue_offloads(priv)); + (mlx5_get_rx_port_offloads() | + mlx5_get_rx_queue_offloads(dev)); uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; if ((tx_offloads & supp_tx_offloads) != tx_offloads) { @@ -349,7 +350,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev) reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? priv->config.ind_table_max_size : rxqs_n)); - if (priv_rss_reta_index_resize(priv, reta_idx_n)) + if (mlx5_rss_reta_index_resize(dev, reta_idx_n)) return ENOMEM; /* When the number of RX queues is not a power of two, the remaining * table entries are padded with reused WQs and hashes are not spread @@ -395,12 +396,11 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->max_rx_queues = max; info->max_tx_queues = max; info->max_mac_addrs = RTE_DIM(priv->mac); - info->rx_queue_offload_capa = - mlx5_priv_get_rx_queue_offloads(priv); - info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) | + info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); + info->rx_offload_capa = (mlx5_get_rx_port_offloads() | info->rx_queue_offload_capa); - info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv); - if (priv_get_ifname(priv, &ifname) == 0) + info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); + if (mlx5_get_ifname(dev, &ifname) == 0) info->if_index = if_nametoindex(ifname); info->reta_size = priv->reta_idx_n ? priv->reta_idx_n : config->ind_table_max_size; @@ -465,7 +465,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) struct rte_eth_link dev_link; int link_speed = 0; - if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { + if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; } @@ -473,7 +473,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&edata; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", strerror(errno)); return -1; @@ -527,7 +527,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) struct rte_eth_link dev_link; uint64_t sc; - if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { + if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; } @@ -535,7 +535,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&gcmd; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", strerror(errno)); return -1; @@ -549,7 +549,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) *ecmd = gcmd; ifr.ifr_data = (void *)ecmd; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", strerror(errno)); return -1; @@ -608,90 +608,50 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) /** * Enable receiving and transmitting traffic. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ static void -priv_link_start(struct priv *priv) +mlx5_link_start(struct rte_eth_dev *dev) { - struct rte_eth_dev *dev = priv->dev; + struct priv *priv = dev->data->dev_private; int err; - dev->tx_pkt_burst = priv_select_tx_function(priv, dev); - dev->rx_pkt_burst = priv_select_rx_function(priv, dev); - err = priv_dev_traffic_enable(priv, dev); + dev->tx_pkt_burst = mlx5_select_tx_function(dev); + dev->rx_pkt_burst = mlx5_select_rx_function(dev); + err = mlx5_traffic_enable(dev); if (err) ERROR("%p: error occurred while configuring control flows: %s", - (void *)priv, strerror(err)); - err = priv_flow_start(priv, &priv->flows); + (void *)dev, strerror(err)); + err = mlx5_flow_start(dev, &priv->flows); if (err) ERROR("%p: error occurred while configuring flows: %s", - (void *)priv, strerror(err)); + (void *)dev, strerror(err)); } /** * Disable receiving and transmitting traffic. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ static void -priv_link_stop(struct priv *priv) +mlx5_link_stop(struct rte_eth_dev *dev) { - struct rte_eth_dev *dev = priv->dev; + struct priv *priv = dev->data->dev_private; - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; } /** - * Retrieve physical link information and update rx/tx_pkt_burst callbacks - * accordingly. - * - * @param priv - * Pointer to private structure. - * @param wait_to_complete - * Wait for request completion (ignored). - */ -int -priv_link_update(struct priv *priv, int wait_to_complete __rte_unused) -{ - struct rte_eth_dev *dev = priv->dev; - struct utsname utsname; - int ver[3]; - int ret; - struct rte_eth_link dev_link = dev->data->dev_link; - - if (uname(&utsname) == -1 || - sscanf(utsname.release, "%d.%d.%d", - &ver[0], &ver[1], &ver[2]) != 3 || - KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) - ret = mlx5_link_update_unlocked_gset(dev); - else - ret = mlx5_link_update_unlocked_gs(dev); - /* If lsc interrupt is disabled, should always be ready for traffic. */ - if (!dev->data->dev_conf.intr_conf.lsc) { - priv_link_start(priv); - return ret; - } - /* Re-select burst callbacks only if link status has been changed. */ - if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { - if (dev->data->dev_link.link_status == ETH_LINK_UP) - priv_link_start(priv); - else - priv_link_stop(priv); - } - return ret; -} - -/** * Querying the link status till it changes to the desired state. * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param status * Link desired status. * @@ -699,13 +659,13 @@ priv_link_update(struct priv *priv, int wait_to_complete __rte_unused) * 0 on success, negative errno value on failure. */ int -priv_force_link_status_change(struct priv *priv, int status) +mlx5_force_link_status_change(struct rte_eth_dev *dev, int status) { int try = 0; while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) { - priv_link_update(priv, 0); - if (priv->dev->data->dev_link.link_status == status) + mlx5_link_update(dev, 0); + if (dev->data->dev_link.link_status == status) return 0; try++; sleep(1); @@ -727,10 +687,30 @@ priv_force_link_status_change(struct priv *priv, int status) int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct utsname utsname; + int ver[3]; int ret; + struct rte_eth_link dev_link = dev->data->dev_link; - ret = priv_link_update(priv, wait_to_complete); + if (uname(&utsname) == -1 || + sscanf(utsname.release, "%d.%d.%d", + &ver[0], &ver[1], &ver[2]) != 3 || + KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) + ret = mlx5_link_update_unlocked_gset(dev); + else + ret = mlx5_link_update_unlocked_gs(dev); + /* If lsc interrupt is disabled, should always be ready for traffic. */ + if (!dev->data->dev_conf.intr_conf.lsc) { + mlx5_link_start(dev); + return ret; + } + /* Re-select burst callbacks only if link status has been changed. */ + if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { + if (dev->data->dev_link.link_status == ETH_LINK_UP) + mlx5_link_start(dev); + else + mlx5_link_stop(dev); + } return ret; } @@ -752,14 +732,14 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) uint16_t kern_mtu; int ret = 0; - ret = priv_get_mtu(priv, &kern_mtu); + ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) goto out; /* Set kernel interface MTU first. */ - ret = priv_set_mtu(priv, mtu); + ret = mlx5_set_mtu(dev, mtu); if (ret) goto out; - ret = priv_get_mtu(priv, &kern_mtu); + ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) goto out; if (kern_mtu == mtu) { @@ -789,7 +769,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct priv *priv = dev->data->dev_private; struct ifreq ifr; struct ethtool_pauseparam ethpause = { .cmd = ETHTOOL_GPAUSEPARAM @@ -797,10 +776,9 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret; ifr.ifr_data = (void *)ðpause; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { ret = errno; - WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" - " failed: %s", + WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed: %s", strerror(ret)); goto out; } @@ -833,7 +811,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct priv *priv = dev->data->dev_private; struct ifreq ifr; struct ethtool_pauseparam ethpause = { .cmd = ETHTOOL_SPAUSEPARAM @@ -853,7 +830,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { ret = errno; WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" " failed: %s", @@ -919,18 +896,19 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, /** * Update the link status. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Zero if the callback process can be called immediately. */ static int -priv_link_status_update(struct priv *priv) +mlx5_link_status_update(struct rte_eth_dev *dev) { - struct rte_eth_link *link = &priv->dev->data->dev_link; + struct priv *priv = dev->data->dev_private; + struct rte_eth_link *link = &dev->data->dev_link; - priv_link_update(priv, 0); + mlx5_link_update(dev, 0); if (((link->link_speed == 0) && link->link_status) || ((link->link_speed != 0) && !link->link_status)) { /* @@ -955,8 +933,8 @@ priv_link_status_update(struct priv *priv) /** * Device status handler. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param events * Pointer to event flags holder. * @@ -964,8 +942,9 @@ priv_link_status_update(struct priv *priv) * Events bitmap of callback process which can be called immediately. */ static uint32_t -priv_dev_status_handler(struct priv *priv) +mlx5_dev_status_handler(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct ibv_async_event event; uint32_t ret = 0; @@ -975,10 +954,10 @@ priv_dev_status_handler(struct priv *priv) break; if ((event.event_type == IBV_EVENT_PORT_ACTIVE || event.event_type == IBV_EVENT_PORT_ERR) && - (priv->dev->data->dev_conf.intr_conf.lsc == 1)) + (dev->data->dev_conf.intr_conf.lsc == 1)) ret |= (1 << RTE_ETH_EVENT_INTR_LSC); else if (event.event_type == IBV_EVENT_DEVICE_FATAL && - priv->dev->data->dev_conf.intr_conf.rmv == 1) + dev->data->dev_conf.intr_conf.rmv == 1) ret |= (1 << RTE_ETH_EVENT_INTR_RMV); else DEBUG("event type %d on port %d not handled", @@ -986,7 +965,7 @@ priv_dev_status_handler(struct priv *priv) mlx5_glue->ack_async_event(&event); } if (ret & (1 << RTE_ETH_EVENT_INTR_LSC)) - if (priv_link_status_update(priv)) + if (mlx5_link_status_update(dev)) ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC); return ret; } @@ -1005,7 +984,7 @@ mlx5_dev_link_status_handler(void *arg) int ret; priv->pending_alarm = 0; - ret = priv_link_status_update(priv); + ret = mlx5_link_status_update(dev); if (!ret) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } @@ -1022,10 +1001,9 @@ void mlx5_dev_interrupt_handler(void *cb_arg) { struct rte_eth_dev *dev = cb_arg; - struct priv *priv = dev->data->dev_private; uint32_t events; - events = priv_dev_status_handler(priv); + events = mlx5_dev_status_handler(dev); if (events & (1 << RTE_ETH_EVENT_INTR_LSC)) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); if (events & (1 << RTE_ETH_EVENT_INTR_RMV)) @@ -1042,22 +1020,21 @@ static void mlx5_dev_handler_socket(void *cb_arg) { struct rte_eth_dev *dev = cb_arg; - struct priv *priv = dev->data->dev_private; - priv_socket_handle(priv); + mlx5_socket_handle(dev); } /** * Uninstall interrupt handler. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to the rte_eth_dev structure. + * Pointer to Ethernet device. */ void -priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) +mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + if (dev->data->dev_conf.intr_conf.lsc || dev->data->dev_conf.intr_conf.rmv) rte_intr_callback_unregister(&priv->intr_handle, @@ -1078,14 +1055,13 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) /** * Install interrupt handler. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to the rte_eth_dev structure. + * Pointer to Ethernet device. */ void -priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) +mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int rc, flags; assert(priv->ctx->async_fd > 0); @@ -1103,7 +1079,7 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) rte_intr_callback_register(&priv->intr_handle, mlx5_dev_interrupt_handler, dev); } - rc = priv_socket_init(priv); + rc = mlx5_socket_init(dev); if (!rc && priv->primary_socket) { priv->intr_handle_socket.fd = priv->primary_socket; priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT; @@ -1113,23 +1089,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) } /** - * Change the link state (UP / DOWN). - * - * @param priv - * Pointer to private data structure. - * @param up - * Nonzero for link up, otherwise link down. - * - * @return - * 0 on success, errno value on failure. - */ -static int -priv_dev_set_link(struct priv *priv, int up) -{ - return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP); -} - -/** * DPDK callback to bring the link DOWN. * * @param dev @@ -1141,11 +1100,7 @@ priv_dev_set_link(struct priv *priv, int up) int mlx5_set_link_down(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - int err; - - err = priv_dev_set_link(priv, 0); - return err; + return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); } /** @@ -1160,27 +1115,22 @@ mlx5_set_link_down(struct rte_eth_dev *dev) int mlx5_set_link_up(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - int err; - - err = priv_dev_set_link(priv, 1); - return err; + return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); } /** * Configure the TX function to use. * - * @param priv - * Pointer to private data structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to private data structure. * * @return * Pointer to selected Tx burst function. */ eth_tx_burst_t -priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) +mlx5_select_tx_function(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; struct mlx5_dev_config *config = &priv->config; uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; @@ -1194,8 +1144,8 @@ priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) if (vlan_insert || tso) return tx_pkt_burst; if (config->mps == MLX5_MPW_ENHANCED) { - if (priv_check_vec_tx_support(priv, dev) > 0) { - if (priv_check_raw_vec_tx_support(priv, dev) > 0) + if (mlx5_check_vec_tx_support(dev) > 0) { + if (mlx5_check_raw_vec_tx_support(dev) > 0) tx_pkt_burst = mlx5_tx_burst_raw_vec; else tx_pkt_burst = mlx5_tx_burst_vec; @@ -1217,21 +1167,19 @@ priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) /** * Configure the RX function to use. * - * @param priv - * Pointer to private data structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to private data structure. * * @return * Pointer to selected Rx burst function. */ eth_rx_burst_t -priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev) +mlx5_select_rx_function(struct rte_eth_dev *dev) { eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; - assert(priv != NULL); - if (priv_check_vec_rx_support(priv) > 0) { + assert(dev != NULL); + if (mlx5_check_vec_rx_support(dev) > 0) { rx_pkt_burst = mlx5_rx_burst_vec; DEBUG("selected RX vectorized function"); } diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 137c34988..6b53b3ea5 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -89,7 +89,7 @@ static int mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id); static int -mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser); +mlx5_flow_create_count(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser); /* Hash RX queue types. */ enum hash_rxq_type { @@ -515,8 +515,6 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * Copy the RSS configuration from the user ones, of the rss_conf is null, * uses the driver one. * - * @param priv - * Pointer to private structure. * @param parser * Internal parser structure. * @param rss_conf @@ -526,13 +524,12 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * 0 on success, errno value on failure. */ static int -priv_flow_convert_rss_conf(struct priv *priv __rte_unused, - struct mlx5_flow_parse *parser, +mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser, const struct rte_eth_rss_conf *rss_conf) { /* * This function is also called at the beginning of - * priv_flow_convert_actions() to initialize the parser with the + * mlx5_flow_convert_actions() to initialize the parser with the * device default RSS configuration. */ if (rss_conf) { @@ -554,23 +551,17 @@ priv_flow_convert_rss_conf(struct priv *priv __rte_unused, /** * Extract attribute to the parser. * - * @param priv - * Pointer to private structure. * @param[in] attr * Flow rule attributes. * @param[out] error * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_attributes(struct priv *priv __rte_unused, - const struct rte_flow_attr *attr, - struct rte_flow_error *error, - struct mlx5_flow_parse *parser __rte_unused) +mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, + struct rte_flow_error *error) { if (attr->group) { rte_flow_error_set(error, ENOTSUP, @@ -606,8 +597,8 @@ priv_flow_convert_attributes(struct priv *priv __rte_unused, /** * Extract actions request to the parser. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error @@ -619,16 +610,18 @@ priv_flow_convert_attributes(struct priv *priv __rte_unused, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_actions(struct priv *priv, +mlx5_flow_convert_actions(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { + struct priv *priv = dev->data->dev_private; + /* * Add default RSS configuration necessary for Verbs to create QP even * if no RSS is necessary. */ - priv_flow_convert_rss_conf(priv, parser, + mlx5_flow_convert_rss_conf(parser, (const struct rte_eth_rss_conf *) &priv->rss_conf); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { @@ -708,8 +701,7 @@ priv_flow_convert_actions(struct priv *priv, for (n = 0; n < rss->num; ++n) parser->queues[n] = rss->queue[n]; parser->queues_n = rss->num; - if (priv_flow_convert_rss_conf(priv, parser, - rss->rss_conf)) { + if (mlx5_flow_convert_rss_conf(parser, rss->rss_conf)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -763,8 +755,6 @@ priv_flow_convert_actions(struct priv *priv, /** * Validate items. * - * @param priv - * Pointer to private structure. * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[out] error @@ -776,8 +766,7 @@ priv_flow_convert_actions(struct priv *priv, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_items_validate(struct priv *priv __rte_unused, - const struct rte_flow_item items[], +mlx5_flow_convert_items_validate(const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { @@ -854,8 +843,6 @@ priv_flow_convert_items_validate(struct priv *priv __rte_unused, /** * Allocate memory space to store verbs flow attributes. * - * @param priv - * Pointer to private structure. * @param[in] priority * Flow priority. * @param[in] size @@ -867,8 +854,7 @@ priv_flow_convert_items_validate(struct priv *priv __rte_unused, * A verbs flow attribute on success, NULL otherwise. */ static struct ibv_flow_attr * -priv_flow_convert_allocate(struct priv *priv __rte_unused, - unsigned int priority, +mlx5_flow_convert_allocate(unsigned int priority, unsigned int size, struct rte_flow_error *error) { @@ -889,14 +875,11 @@ priv_flow_convert_allocate(struct priv *priv __rte_unused, /** * Finalise verbs flow attributes. * - * @param priv - * Pointer to private structure. * @param[in, out] parser * Internal parser structure. */ static void -priv_flow_convert_finalise(struct priv *priv __rte_unused, - struct mlx5_flow_parse *parser) +mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser) { const unsigned int ipv4 = hash_rxq_init[parser->layer].ip_version == MLX5_IPV4; @@ -1014,8 +997,8 @@ priv_flow_convert_finalise(struct priv *priv __rte_unused, /** * Validate and convert a flow supported by the NIC. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[in] attr * Flow rule attributes. * @param[in] pattern @@ -1031,7 +1014,7 @@ priv_flow_convert_finalise(struct priv *priv __rte_unused, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert(struct priv *priv, +mlx5_flow_convert(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -1048,16 +1031,16 @@ priv_flow_convert(struct priv *priv, .layer = HASH_RXQ_ETH, .mark_id = MLX5_FLOW_MARK_DEFAULT, }; - ret = priv_flow_convert_attributes(priv, attr, error, parser); + ret = mlx5_flow_convert_attributes(attr, error); if (ret) return ret; - ret = priv_flow_convert_actions(priv, actions, error, parser); + ret = mlx5_flow_convert_actions(dev, actions, error, parser); if (ret) return ret; - ret = priv_flow_convert_items_validate(priv, items, error, parser); + ret = mlx5_flow_convert_items_validate(items, error, parser); if (ret) return ret; - priv_flow_convert_finalise(priv, parser); + mlx5_flow_convert_finalise(parser); /* * Second step. * Allocate the memory space to store verbs specifications. @@ -1069,8 +1052,7 @@ priv_flow_convert(struct priv *priv, unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; parser->queue[HASH_RXQ_ETH].ibv_attr = - priv_flow_convert_allocate(priv, priority, - offset, error); + mlx5_flow_convert_allocate(priority, offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) return ENOMEM; parser->queue[HASH_RXQ_ETH].offset = @@ -1088,7 +1070,7 @@ priv_flow_convert(struct priv *priv, continue; offset = parser->queue[i].offset; parser->queue[i].ibv_attr = - priv_flow_convert_allocate(priv, priority, + mlx5_flow_convert_allocate(priority, offset, error); if (!parser->queue[i].ibv_attr) goto exit_enomem; @@ -1116,7 +1098,7 @@ priv_flow_convert(struct priv *priv, if (parser->mark) mlx5_flow_create_flag_mark(parser, parser->mark_id); if (parser->count && parser->create) { - mlx5_flow_create_count(priv, parser); + mlx5_flow_create_count(dev, parser); if (!parser->cs) goto exit_count_error; } @@ -1125,7 +1107,7 @@ priv_flow_convert(struct priv *priv, * configuration. */ if (!parser->drop) { - priv_flow_convert_finalise(priv, parser); + mlx5_flow_convert_finalise(parser); } else { parser->queue[HASH_RXQ_ETH].ibv_attr->priority = attr->priority + @@ -1577,8 +1559,8 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) /** * Convert count action to Verbs specification. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Pointer to MLX5 flow parser structure. * @@ -1586,10 +1568,11 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) * 0 on success, errno value on failure. */ static int -mlx5_flow_create_count(struct priv *priv __rte_unused, +mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused, struct mlx5_flow_parse *parser __rte_unused) { #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + struct priv *priv = dev->data->dev_private; unsigned int size = sizeof(struct ibv_flow_spec_counter_action); struct ibv_counter_set_init_attr init_attr = {0}; struct ibv_flow_spec_counter_action counter = { @@ -1611,8 +1594,8 @@ mlx5_flow_create_count(struct priv *priv __rte_unused, /** * Complete flow rule creation with a drop queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow @@ -1624,11 +1607,12 @@ mlx5_flow_create_count(struct priv *priv __rte_unused, * 0 on success, errno value on failure. */ static int -priv_flow_create_action_queue_drop(struct priv *priv, +mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { + struct priv *priv = dev->data->dev_private; struct ibv_flow_spec_action_drop *drop; unsigned int size = sizeof(struct ibv_flow_spec_action_drop); int err = 0; @@ -1683,8 +1667,8 @@ priv_flow_create_action_queue_drop(struct priv *priv, /** * Create hash Rx queues when RSS is enabled. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow @@ -1696,11 +1680,12 @@ priv_flow_create_action_queue_drop(struct priv *priv, * 0 on success, a errno value otherwise and rte_errno is set. */ static int -priv_flow_create_action_queue_rss(struct priv *priv, +mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { + struct priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != hash_rxq_init_n; ++i) { @@ -1714,21 +1699,21 @@ priv_flow_create_action_queue_rss(struct priv *priv, if (!priv->dev->data->dev_started) continue; flow->frxq[i].hrxq = - mlx5_priv_hrxq_get(priv, - parser->rss_conf.rss_key, - parser->rss_conf.rss_key_len, - hash_fields, - parser->queues, - parser->queues_n); + mlx5_hrxq_get(dev, + parser->rss_conf.rss_key, + parser->rss_conf.rss_key_len, + hash_fields, + parser->queues, + parser->queues_n); if (flow->frxq[i].hrxq) continue; flow->frxq[i].hrxq = - mlx5_priv_hrxq_new(priv, - parser->rss_conf.rss_key, - parser->rss_conf.rss_key_len, - hash_fields, - parser->queues, - parser->queues_n); + mlx5_hrxq_new(dev, + parser->rss_conf.rss_key, + parser->rss_conf.rss_key_len, + hash_fields, + parser->queues, + parser->queues_n); if (!flow->frxq[i].hrxq) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, @@ -1742,8 +1727,8 @@ priv_flow_create_action_queue_rss(struct priv *priv, /** * Complete flow rule creation. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow @@ -1755,18 +1740,19 @@ priv_flow_create_action_queue_rss(struct priv *priv, * 0 on success, a errno value otherwise and rte_errno is set. */ static int -priv_flow_create_action_queue(struct priv *priv, +mlx5_flow_create_action_queue(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { + struct priv *priv = dev->data->dev_private; int err = 0; unsigned int i; assert(priv->pd); assert(priv->ctx); assert(!parser->drop); - err = priv_flow_create_action_queue_rss(priv, parser, flow, error); + err = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); if (err) goto error; if (parser->count) @@ -1807,7 +1793,7 @@ priv_flow_create_action_queue(struct priv *priv, claim_zero(mlx5_glue->destroy_flow(ibv_flow)); } if (flow->frxq[i].hrxq) - mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq); + mlx5_hrxq_release(dev, flow->frxq[i].hrxq); if (flow->frxq[i].ibv_attr) rte_free(flow->frxq[i].ibv_attr); } @@ -1822,8 +1808,8 @@ priv_flow_create_action_queue(struct priv *priv, /** * Convert a flow. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @param[in] attr @@ -1839,19 +1825,19 @@ priv_flow_create_action_queue(struct priv *priv, * A flow on success, NULL otherwise. */ static struct rte_flow * -priv_flow_create(struct priv *priv, - struct mlx5_flows *list, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +mlx5_flow_list_create(struct rte_eth_dev *dev, + struct mlx5_flows *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct mlx5_flow_parse parser = { .create = 1, }; struct rte_flow *flow = NULL; unsigned int i; int err; - err = priv_flow_convert(priv, attr, items, actions, error, &parser); + err = mlx5_flow_convert(dev, attr, items, actions, error, &parser); if (err) goto exit; flow = rte_calloc(__func__, 1, @@ -1875,10 +1861,10 @@ priv_flow_create(struct priv *priv, memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len); /* finalise the flow. */ if (parser.drop) - err = priv_flow_create_action_queue_drop(priv, &parser, flow, + err = mlx5_flow_create_action_queue_drop(dev, &parser, flow, error); else - err = priv_flow_create_action_queue(priv, &parser, flow, error); + err = mlx5_flow_create_action_queue(dev, &parser, flow, error); if (err) goto exit; TAILQ_INSERT_TAIL(list, flow, next); @@ -1907,11 +1893,10 @@ mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; int ret; struct mlx5_flow_parse parser = { .create = 0, }; - ret = priv_flow_convert(priv, attr, items, actions, error, &parser); + ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser); return ret; } @@ -1929,28 +1914,26 @@ mlx5_flow_create(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; - struct rte_flow *flow; - flow = priv_flow_create(priv, &priv->flows, attr, items, actions, - error); - return flow; + return mlx5_flow_list_create(dev, &priv->flows, attr, items, actions, + error); } /** - * Destroy a flow. + * Destroy a flow in a list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @param[in] flow * Flow to destroy. */ static void -priv_flow_destroy(struct priv *priv, - struct mlx5_flows *list, - struct rte_flow *flow) +mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, + struct rte_flow *flow) { + struct priv *priv = dev->data->dev_private; unsigned int i; if (flow->drop || !flow->mark) @@ -1998,7 +1981,7 @@ priv_flow_destroy(struct priv *priv, claim_zero(mlx5_glue->destroy_flow (frxq->ibv_flow)); if (frxq->hrxq) - mlx5_priv_hrxq_release(priv, frxq->hrxq); + mlx5_hrxq_release(dev, frxq->hrxq); if (frxq->ibv_attr) rte_free(frxq->ibv_attr); } @@ -2015,34 +1998,35 @@ priv_flow_destroy(struct priv *priv, /** * Destroy all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. */ void -priv_flow_flush(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) { while (!TAILQ_EMPTY(list)) { struct rte_flow *flow; flow = TAILQ_FIRST(list); - priv_flow_destroy(priv, list, flow); + mlx5_flow_list_destroy(dev, list, flow); } } /** * Create drop queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * 0 on success. */ int -priv_flow_create_drop_queue(struct priv *priv) +mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq_drop *fdq = NULL; assert(priv->pd); @@ -2123,12 +2107,13 @@ priv_flow_create_drop_queue(struct priv *priv) /** * Delete drop queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_flow_delete_drop_queue(struct priv *priv) +mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue; if (!fdq) @@ -2148,14 +2133,15 @@ priv_flow_delete_drop_queue(struct priv *priv) /** * Remove all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. */ void -priv_flow_stop(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { @@ -2198,7 +2184,7 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list) claim_zero(mlx5_glue->destroy_flow (flow->frxq[i].ibv_flow)); flow->frxq[i].ibv_flow = NULL; - mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq); + mlx5_hrxq_release(dev, flow->frxq[i].hrxq); flow->frxq[i].hrxq = NULL; } DEBUG("Flow %p removed", (void *)flow); @@ -2208,8 +2194,8 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list) /** * Add all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @@ -2217,8 +2203,9 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list) * 0 on success, a errno value otherwise and rte_errno is set. */ int -priv_flow_start(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; TAILQ_FOREACH(flow, list, next) { @@ -2243,19 +2230,19 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list) if (!flow->frxq[i].ibv_attr) continue; flow->frxq[i].hrxq = - mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key, - flow->rss_conf.rss_key_len, - hash_rxq_init[i].hash_fields, - (*flow->queues), - flow->queues_n); + mlx5_hrxq_get(dev, flow->rss_conf.rss_key, + flow->rss_conf.rss_key_len, + hash_rxq_init[i].hash_fields, + (*flow->queues), + flow->queues_n); if (flow->frxq[i].hrxq) goto flow_create; flow->frxq[i].hrxq = - mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key, - flow->rss_conf.rss_key_len, - hash_rxq_init[i].hash_fields, - (*flow->queues), - flow->queues_n); + mlx5_hrxq_new(dev, flow->rss_conf.rss_key, + flow->rss_conf.rss_key_len, + hash_rxq_init[i].hash_fields, + (*flow->queues), + flow->queues_n); if (!flow->frxq[i].hrxq) { DEBUG("Flow %p cannot be applied", (void *)flow); @@ -2285,19 +2272,20 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list) /** * Verify the flow list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return the number of flows not released. */ int -priv_flow_verify(struct priv *priv) +mlx5_flow_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; int ret = 0; TAILQ_FOREACH(flow, &priv->flows, next) { - DEBUG("%p: flow %p still referenced", (void *)priv, + DEBUG("%p: flow %p still referenced", (void *)dev, (void *)flow); ++ret; } @@ -2378,8 +2366,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, action_rss.local.rss_conf = &priv->rss_conf; action_rss.local.num = priv->reta_idx_n; actions[0].conf = (const void *)&action_rss.rss; - flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions, - &error); + flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, + actions, &error); if (!flow) return rte_errno; return 0; @@ -2419,7 +2407,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_flow_destroy(priv, &priv->flows, flow); + mlx5_flow_list_destroy(dev, &priv->flows, flow); return 0; } @@ -2435,7 +2423,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_flow_flush(priv, &priv->flows); + mlx5_flow_list_flush(dev, &priv->flows); return 0; } @@ -2452,7 +2440,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, * 0 on success, a errno value otherwise and rte_errno is set. */ static int -priv_flow_query_count(struct ibv_counter_set *cs, +mlx5_flow_query_count(struct ibv_counter_set *cs, struct mlx5_flow_counter_stats *counter_stats, struct rte_flow_query_count *query_count, struct rte_flow_error *error) @@ -2502,7 +2490,7 @@ mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, int res = EINVAL; if (flow->cs) { - res = priv_flow_query_count(flow->cs, + res = mlx5_flow_query_count(flow->cs, &flow->counter_stats, (struct rte_flow_query_count *)data, error); @@ -2547,8 +2535,8 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, /** * Convert a flow director filter to a generic flow. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Flow director filter to add. * @param attributes @@ -2558,10 +2546,11 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, * 0 on success, errno value on error. */ static int -priv_fdir_filter_convert(struct priv *priv, +mlx5_fdir_filter_convert(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, struct mlx5_fdir *attributes) { + struct priv *priv = dev->data->dev_private; const struct rte_eth_fdir_input *input = &fdir_filter->input; /* Validate queue number. */ @@ -2733,8 +2722,8 @@ priv_fdir_filter_convert(struct priv *priv, /** * Add new flow director filter and store it in list. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Flow director filter to add. * @@ -2742,9 +2731,10 @@ priv_fdir_filter_convert(struct priv *priv, * 0 on success, errno value on failure. */ static int -priv_fdir_filter_add(struct priv *priv, +mlx5_fdir_filter_add(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { + struct priv *priv = dev->data->dev_private; struct mlx5_fdir attributes = { .attr.group = 0, .l2_mask = { @@ -2760,19 +2750,16 @@ priv_fdir_filter_add(struct priv *priv, struct rte_flow *flow; int ret; - ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); + ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) return -ret; - ret = priv_flow_convert(priv, &attributes.attr, attributes.items, + ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) return -ret; - flow = priv_flow_create(priv, - &priv->flows, - &attributes.attr, - attributes.items, - attributes.actions, - &error); + flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, + attributes.items, attributes.actions, + &error); if (flow) { DEBUG("FDIR created %p", (void *)flow); return 0; @@ -2783,8 +2770,8 @@ priv_fdir_filter_add(struct priv *priv, /** * Delete specific filter. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Filter to be deleted. * @@ -2792,9 +2779,10 @@ priv_fdir_filter_add(struct priv *priv, * 0 on success, errno value on failure. */ static int -priv_fdir_filter_delete(struct priv *priv, +mlx5_fdir_filter_delete(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { + struct priv *priv = dev->data->dev_private; struct mlx5_fdir attributes = { .attr.group = 0, }; @@ -2807,10 +2795,10 @@ priv_fdir_filter_delete(struct priv *priv, unsigned int i; int ret; - ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); + ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) return -ret; - ret = priv_flow_convert(priv, &attributes.attr, attributes.items, + ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) goto exit; @@ -2868,7 +2856,7 @@ priv_fdir_filter_delete(struct priv *priv, continue; } if (flow) - priv_flow_destroy(priv, &priv->flows, flow); + mlx5_flow_list_destroy(dev, &priv->flows, flow); exit: for (i = 0; i != hash_rxq_init_n; ++i) { if (parser.queue[i].ibv_attr) @@ -2880,8 +2868,8 @@ priv_fdir_filter_delete(struct priv *priv, /** * Update queue for specific filter. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Filter to be updated. * @@ -2889,41 +2877,44 @@ priv_fdir_filter_delete(struct priv *priv, * 0 on success, errno value on failure. */ static int -priv_fdir_filter_update(struct priv *priv, +mlx5_fdir_filter_update(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { int ret; - ret = priv_fdir_filter_delete(priv, fdir_filter); + ret = mlx5_fdir_filter_delete(dev, fdir_filter); if (ret) return ret; - ret = priv_fdir_filter_add(priv, fdir_filter); + ret = mlx5_fdir_filter_add(dev, fdir_filter); return ret; } /** * Flush all filters. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. */ static void -priv_fdir_filter_flush(struct priv *priv) +mlx5_fdir_filter_flush(struct rte_eth_dev *dev) { - priv_flow_flush(priv, &priv->flows); + struct priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->flows); } /** * Get flow director information. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] fdir_info * Resulting flow director information. */ static void -priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) +mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) { + struct priv *priv = dev->data->dev_private; struct rte_eth_fdir_masks *mask = &priv->dev->data->dev_conf.fdir_conf.mask; @@ -2941,8 +2932,8 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) /** * Deal with flow director operations. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param filter_op * Operation to perform. * @param arg @@ -2952,8 +2943,10 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) * 0 on success, errno value on failure. */ static int -priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) +mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, + void *arg) { + struct priv *priv = dev->data->dev_private; enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode; int ret = 0; @@ -2963,27 +2956,27 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) if (fdir_mode != RTE_FDIR_MODE_PERFECT && fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { ERROR("%p: flow director mode %d not supported", - (void *)priv, fdir_mode); + (void *)dev, fdir_mode); return EINVAL; } switch (filter_op) { case RTE_ETH_FILTER_ADD: - ret = priv_fdir_filter_add(priv, arg); + ret = mlx5_fdir_filter_add(dev, arg); break; case RTE_ETH_FILTER_UPDATE: - ret = priv_fdir_filter_update(priv, arg); + ret = mlx5_fdir_filter_update(dev, arg); break; case RTE_ETH_FILTER_DELETE: - ret = priv_fdir_filter_delete(priv, arg); + ret = mlx5_fdir_filter_delete(dev, arg); break; case RTE_ETH_FILTER_FLUSH: - priv_fdir_filter_flush(priv); + mlx5_fdir_filter_flush(dev); break; case RTE_ETH_FILTER_INFO: - priv_fdir_info_get(priv, arg); + mlx5_fdir_info_get(dev, arg); break; default: - DEBUG("%p: unknown operation %u", (void *)priv, + DEBUG("%p: unknown operation %u", (void *)dev, filter_op); ret = EINVAL; break; @@ -3013,7 +3006,6 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, void *arg) { int ret = EINVAL; - struct priv *priv = dev->data->dev_private; switch (filter_type) { case RTE_ETH_FILTER_GENERIC: @@ -3022,7 +3014,7 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - ret = priv_fdir_ctrl_func(priv, filter_op, arg); + ret = mlx5_fdir_ctrl_func(dev, filter_op, arg); break; default: ERROR("%p: filter type (%d) not supported", diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index a529dfeac..91c977bc5 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -35,8 +35,8 @@ /** * Get MAC address by querying netdevice. * - * @param[in] priv - * struct priv for the requested device. + * @param[in] dev + * Pointer to Ethernet device. * @param[out] mac * MAC address output buffer. * @@ -44,11 +44,11 @@ * 0 on success, -1 on failure and errno is set. */ int -priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]) +mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]) { struct ifreq request; - if (priv_ifreq(priv, SIOCGIFHWADDR, &request)) + if (mlx5_ifreq(dev, SIOCGIFHWADDR, &request)) return -1; memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); return 0; diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 3b7b6d140..fe60dd132 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -83,10 +83,6 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, * Register a Memory Region (MR) <-> Memory Pool (MP) association in * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. * - * This function should only be called by txq_mp2mr(). - * - * @param priv - * Pointer to private structure. * @param txq * Pointer to TX queue structure. * @param[in] mp @@ -98,29 +94,35 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, * mr on success, NULL on failure. */ struct mlx5_mr * -priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, - struct rte_mempool *mp, unsigned int idx) +mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, + unsigned int idx) { struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + struct rte_eth_dev *dev; struct mlx5_mr *mr; + rte_spinlock_lock(&txq_ctrl->priv->mr_lock); /* Add a new entry, register MR first. */ DEBUG("%p: discovered new memory pool \"%s\" (%p)", (void *)txq_ctrl, mp->name, (void *)mp); - mr = priv_mr_get(priv, mp); + dev = txq_ctrl->priv->dev; + mr = mlx5_mr_get(dev, mp); if (mr == NULL) { if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - DEBUG("Using unregistered mempool 0x%p(%s) in secondary process," - " please create mempool before rte_eth_dev_start()", + DEBUG("Using unregistered mempool 0x%p(%s) in " + "secondary process, please create mempool before " + " rte_eth_dev_start()", (void *)mp, mp->name); + rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); return NULL; } - mr = priv_mr_new(priv, mp); + mr = mlx5_mr_new(dev, mp); } if (unlikely(mr == NULL)) { DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", (void *)txq_ctrl); + rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); return NULL; } if (unlikely(idx == RTE_DIM(txq->mp2mr))) { @@ -128,7 +130,7 @@ priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, DEBUG("%p: MR <-> MP table full, dropping oldest entry.", (void *)txq_ctrl); --idx; - priv_mr_release(priv, txq->mp2mr[0]); + mlx5_mr_release(txq->mp2mr[0]); memmove(&txq->mp2mr[0], &txq->mp2mr[1], (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); } @@ -137,35 +139,6 @@ priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, (void *)txq_ctrl, mp->name, (void *)mp, txq_ctrl->txq.mp2mr[idx]->lkey); - return mr; -} - -/** - * Register a Memory Region (MR) <-> Memory Pool (MP) association in - * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. - * - * This function should only be called by txq_mp2mr(). - * - * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. - * @param idx - * Index of the next available entry. - * - * @return - * mr on success, NULL on failure. - */ -struct mlx5_mr* -mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, - unsigned int idx) -{ - struct mlx5_txq_ctrl *txq_ctrl = - container_of(txq, struct mlx5_txq_ctrl, txq); - struct mlx5_mr *mr; - - rte_spinlock_lock(&txq_ctrl->priv->mr_lock); - mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx); rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); return mr; } @@ -225,20 +198,20 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || data.ret == -1) return; - mr = priv_mr_get(priv, mp); + mr = mlx5_mr_get(priv->dev, mp); if (mr) { - priv_mr_release(priv, mr); + mlx5_mr_release(mr); return; } - priv_mr_new(priv, mp); + mlx5_mr_new(priv->dev, mp); } /** * Register a new memory region from the mempool and store it in the memory * region list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param mp * Pointer to the memory pool to register. * @@ -246,8 +219,9 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) * The memory region on success. */ struct mlx5_mr * -priv_mr_new(struct priv *priv, struct rte_mempool *mp) +mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp) { + struct priv *priv = dev->data->dev_private; const struct rte_memseg *ms = rte_eal_get_physmem_layout(); uintptr_t start; uintptr_t end; @@ -289,7 +263,7 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) mr->mp = mp; mr->lkey = rte_cpu_to_be_32(mr->mr->lkey); rte_atomic32_inc(&mr->refcnt); - DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv, + DEBUG("%p: new Memory Region %p refcnt: %d", (void *)dev, (void *)mr, rte_atomic32_read(&mr->refcnt)); LIST_INSERT_HEAD(&priv->mr, mr, next); return mr; @@ -298,8 +272,8 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) /** * Search the memory region object in the memory region list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param mp * Pointer to the memory pool to register. * @@ -307,8 +281,9 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) * The memory region on success. */ struct mlx5_mr * -priv_mr_get(struct priv *priv, struct rte_mempool *mp) +mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp) { + struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; assert(mp); @@ -335,7 +310,7 @@ priv_mr_get(struct priv *priv, struct rte_mempool *mp) * 0 on success, errno on failure. */ int -priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr) +mlx5_mr_release(struct mlx5_mr *mr) { assert(mr); DEBUG("Memory Region %p refcnt: %d", @@ -352,20 +327,21 @@ priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr) /** * Verify the flow list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -priv_mr_verify(struct priv *priv) +mlx5_mr_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_mr *mr; LIST_FOREACH(mr, &priv->mr, next) { - DEBUG("%p: mr %p still referenced", (void *)priv, + DEBUG("%p: mr %p still referenced", (void *)dev, (void *)mr); ++ret; } diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index 4b88215fb..a654a5a7d 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -96,8 +96,8 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, /** * Allocate/reallocate RETA index table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @praram reta_size * The size of the array to allocate. * @@ -105,8 +105,9 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, * 0 on success, errno value on failure. */ int -priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) +mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) { + struct priv *priv = dev->data->dev_private; void *mem; unsigned int old_size = priv->reta_idx_n; @@ -127,28 +128,29 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) } /** - * Query RETA table. + * DPDK callback to get the RETA indirection table. * - * @param priv - * Pointer to private structure. - * @param[in, out] reta_conf - * Pointer to the first RETA configuration structure. + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. * @param reta_size - * Number of entries. + * Size of the RETA table. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ -static int -priv_dev_rss_reta_query(struct priv *priv, +int +mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, - unsigned int reta_size) + uint16_t reta_size) { + struct priv *priv = dev->data->dev_private; unsigned int idx; unsigned int i; if (!reta_size || reta_size > priv->reta_idx_n) - return EINVAL; + return -EINVAL; /* Fill each entry of the table even if its bit is not set. */ for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; @@ -159,31 +161,32 @@ priv_dev_rss_reta_query(struct priv *priv, } /** - * Update RETA table. + * DPDK callback to update the RETA indirection table. * - * @param priv - * Pointer to private structure. - * @param[in] reta_conf - * Pointer to the first RETA configuration structure. + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. * @param reta_size - * Number of entries. + * Size of the RETA table. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ -static int -priv_dev_rss_reta_update(struct priv *priv, +int +mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, - unsigned int reta_size) + uint16_t reta_size) { + int ret; + struct priv *priv = dev->data->dev_private; unsigned int idx; unsigned int i; unsigned int pos; - int ret; if (!reta_size) - return EINVAL; - ret = priv_rss_reta_index_resize(priv, reta_size); + return -EINVAL; + ret = mlx5_rss_reta_index_resize(dev, reta_size); if (ret) return ret; for (idx = 0, i = 0; (i != reta_size); ++i) { @@ -194,56 +197,6 @@ priv_dev_rss_reta_update(struct priv *priv, assert(reta_conf[idx].reta[pos] < priv->rxqs_n); (*priv->reta_idx)[i] = reta_conf[idx].reta[pos]; } - return 0; -} - -/** - * DPDK callback to get the RETA indirection table. - * - * @param dev - * Pointer to Ethernet device structure. - * @param reta_conf - * Pointer to RETA configuration structure array. - * @param reta_size - * Size of the RETA table. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) -{ - int ret; - struct priv *priv = dev->data->dev_private; - - ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size); - return -ret; -} - -/** - * DPDK callback to update the RETA indirection table. - * - * @param dev - * Pointer to Ethernet device structure. - * @param reta_conf - * Pointer to RETA configuration structure array. - * @param reta_size - * Size of the RETA table. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) -{ - int ret; - struct priv *priv = dev->data->dev_private; - - ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size); if (dev->data->dev_started) { mlx5_dev_stop(dev); mlx5_dev_start(dev); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index de3335cb9..a3b08a1a3 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -97,7 +97,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq_ctrl->rxq.elts)[i] = buf; } /* If Rx vector is activated. */ - if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { + if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; int j; @@ -156,7 +156,7 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) * Some mbuf in the Ring belongs to the application. They cannot be * freed. */ - if (rxq_check_vec_support(rxq) > 0) { + if (mlx5_rxq_check_vec_support(rxq) > 0) { for (i = 0; i < used; ++i) (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; rxq->rq_pi = rxq->rq_ci; @@ -181,22 +181,23 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { DEBUG("cleaning up %p", (void *)rxq_ctrl); if (rxq_ctrl->ibv) - mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); + mlx5_rxq_ibv_release(rxq_ctrl->ibv); memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); } /** * Returns the per-queue supported offloads. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Supported Rx offloads. */ uint64_t -mlx5_priv_get_rx_queue_offloads(struct priv *priv) +mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_TIMESTAMP | @@ -217,13 +218,11 @@ mlx5_priv_get_rx_queue_offloads(struct priv *priv) /** * Returns the per-port supported offloads. * - * @param priv - * Pointer to private structure. * @return * Supported Rx offloads. */ uint64_t -mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused) +mlx5_get_rx_port_offloads(void) { uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; @@ -233,8 +232,8 @@ mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused) /** * Checks if the per-queue offload configuration is valid. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param offloads * Per-queue offloads configuration. * @@ -242,12 +241,11 @@ mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused) * 1 if the configuration is valid, 0 otherwise. */ static int -priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) +mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) { - uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads; - uint64_t queue_supp_offloads = - mlx5_priv_get_rx_queue_offloads(priv); - uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv); + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; + uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev); + uint64_t port_supp_offloads = mlx5_get_rx_port_offloads(); if ((offloads & (queue_supp_offloads | port_supp_offloads)) != offloads) @@ -299,24 +297,24 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, idx, priv->rxqs_n); return -EOVERFLOW; } - if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) { + if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) { ret = ENOTSUP; ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port " "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, (void *)dev, conf->offloads, dev->data->dev_conf.rxmode.offloads, - (mlx5_priv_get_rx_port_offloads(priv) | - mlx5_priv_get_rx_queue_offloads(priv))); + (mlx5_get_rx_port_offloads() | + mlx5_get_rx_queue_offloads(dev))); goto out; } - if (!mlx5_priv_rxq_releasable(priv, idx)) { + if (!mlx5_rxq_releasable(dev, idx)) { ret = EBUSY; ERROR("%p: unable to release queue index %u", (void *)dev, idx); goto out; } - mlx5_priv_rxq_release(priv, idx); - rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp); + mlx5_rxq_release(dev, idx); + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); if (!rxq_ctrl) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); @@ -347,24 +345,25 @@ mlx5_rx_queue_release(void *dpdk_rxq) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; - if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx)) + if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx)) rte_panic("Rx queue %p is still used by a flow and cannot be" " removed\n", (void *)rxq_ctrl); - mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx); + mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx); } /** * Allocate queue vector and fill epoll fd list for Rx interrupts. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * 0 on success, negative on failure. */ int -priv_rx_intr_vec_enable(struct priv *priv) +mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); @@ -373,7 +372,7 @@ priv_rx_intr_vec_enable(struct priv *priv) if (!priv->dev->data->dev_conf.intr_conf.rxq) return 0; - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); if (intr_handle->intr_vec == NULL) { ERROR("failed to allocate memory for interrupt vector," @@ -383,7 +382,7 @@ priv_rx_intr_vec_enable(struct priv *priv) intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { /* This rxq ibv must not be released in this function. */ - struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i); + struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i); int fd; int flags; int rc; @@ -400,7 +399,7 @@ priv_rx_intr_vec_enable(struct priv *priv) ERROR("too many Rx queues for interrupt vector size" " (%d), Rx interrupts cannot be enabled", RTE_MAX_RXTX_INTR_VEC_ID); - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); return -1; } fd = rxq_ibv->channel->fd; @@ -409,7 +408,7 @@ priv_rx_intr_vec_enable(struct priv *priv) if (rc < 0) { ERROR("failed to make Rx interrupt file descriptor" " %d non-blocking for queue index %d", fd, i); - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); return -1; } intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; @@ -417,7 +416,7 @@ priv_rx_intr_vec_enable(struct priv *priv) count++; } if (!count) - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); else intr_handle->nb_efd = count; return 0; @@ -426,12 +425,13 @@ priv_rx_intr_vec_enable(struct priv *priv) /** * Clean up Rx interrupts handler. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_rx_intr_vec_disable(struct priv *priv) +mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_intr_handle *intr_handle = priv->dev->intr_handle; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; @@ -454,7 +454,7 @@ priv_rx_intr_vec_disable(struct priv *priv) */ rxq_data = (*priv->rxqs)[i]; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv); + mlx5_rxq_ibv_release(rxq_ctrl->ibv); } free: rte_intr_free_epoll_fd(intr_handle); @@ -516,13 +516,13 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (rxq_ctrl->irq) { struct mlx5_rxq_ibv *rxq_ibv; - rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { ret = EINVAL; goto exit; } mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); - mlx5_priv_rxq_ibv_release(priv, rxq_ibv); + mlx5_rxq_ibv_release(rxq_ibv); } exit: if (ret) @@ -560,7 +560,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (!rxq_ctrl->irq) goto exit; - rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { ret = EINVAL; goto exit; @@ -574,7 +574,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); exit: if (rxq_ibv) - mlx5_priv_rxq_ibv_release(priv, rxq_ibv); + mlx5_rxq_ibv_release(rxq_ibv); if (ret) WARN("unable to disable interrupt on rx queue %d", rx_queue_id); @@ -584,8 +584,8 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) /** * Create the Rx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * @@ -593,8 +593,9 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) * The Verbs object initialised if it can be created. */ struct mlx5_rxq_ibv * -mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) +mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); @@ -629,9 +630,9 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) } tmpl->rxq_ctrl = rxq_ctrl; /* Use the entire RX mempool as the memory region. */ - tmpl->mr = priv_mr_get(priv, rxq_data->mp); + tmpl->mr = mlx5_mr_get(dev, rxq_data->mp); if (!tmpl->mr) { - tmpl->mr = priv_mr_new(priv, rxq_data->mp); + tmpl->mr = mlx5_mr_new(dev, rxq_data->mp); if (!tmpl->mr) { ERROR("%p: MR creation failure", (void *)rxq_ctrl); goto error; @@ -661,7 +662,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) * For vectorized Rx, it must not be doubled in order to * make cq_ci and rq_ci aligned. */ - if (rxq_check_vec_support(rxq_data) < 0) + if (mlx5_rxq_check_vec_support(rxq_data) < 0) attr.cq.ibv.cqe *= 2; } else if (config->cqe_comp && rxq_data->hw_timestamp) { DEBUG("Rx CQE compression is disabled for HW timestamp"); @@ -781,7 +782,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci); DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)dev, (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; @@ -794,7 +795,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) if (tmpl->channel) claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel)); if (tmpl->mr) - priv_mr_release(priv, tmpl->mr); + mlx5_mr_release(tmpl->mr); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return NULL; } @@ -802,8 +803,8 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) /** * Get an Rx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * @@ -811,8 +812,9 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) * The Verbs object if it exists. */ struct mlx5_rxq_ibv * -mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) +mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl; @@ -822,9 +824,9 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) return NULL; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->ibv) { - priv_mr_get(priv, rxq_data->mp); + mlx5_mr_get(dev, rxq_data->mp); rte_atomic32_inc(&rxq_ctrl->ibv->refcnt); - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)dev, (void *)rxq_ctrl->ibv, rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); } @@ -834,8 +836,6 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) /** * Release an Rx verbs queue object. * - * @param priv - * Pointer to private structure. * @param rxq_ibv * Verbs Rx queue object. * @@ -843,7 +843,7 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) * 0 on success, errno value on failure. */ int -mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) { int ret; @@ -851,10 +851,10 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) assert(rxq_ibv->wq); assert(rxq_ibv->cq); assert(rxq_ibv->mr); - ret = priv_mr_release(priv, rxq_ibv->mr); + ret = mlx5_mr_release(rxq_ibv->mr); if (!ret) rxq_ibv->mr = NULL; - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, + DEBUG("Verbs Rx queue %p: refcnt %d", (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { rxq_free_elts(rxq_ibv->rxq_ctrl); @@ -873,20 +873,21 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) /** * Verify the Verbs Rx queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_rxq_ibv_verify(struct priv *priv) +mlx5_rxq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_rxq_ibv *rxq_ibv; LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { - DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv, + DEBUG("%p: Verbs Rx queue %p still referenced", (void *)dev, (void *)rxq_ibv); ++ret; } @@ -896,14 +897,11 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv) /** * Return true if a single reference exists on the object. * - * @param priv - * Pointer to private structure. * @param rxq_ibv * Verbs Rx queue object. */ int -mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, - struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv) { assert(rxq_ibv); return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); @@ -912,8 +910,8 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, /** * Create a DPDK Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @param desc @@ -925,11 +923,11 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, * A DPDK queue object on success. */ struct mlx5_rxq_ctrl * -mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, - unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) +mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) { - struct rte_eth_dev *dev = priv->dev; + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; unsigned int mb_len = rte_pktmbuf_data_room_size(mp); struct mlx5_dev_config *config = &priv->config; @@ -1029,7 +1027,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, tmpl->rxq.elts = (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; @@ -1041,8 +1039,8 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, /** * Get a Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -1050,17 +1048,18 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, * A pointer to the queue if it exists. */ struct mlx5_rxq_ctrl * -mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) +mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl = NULL; if ((*priv->rxqs)[idx]) { rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - mlx5_priv_rxq_ibv_get(priv, idx); + mlx5_rxq_ibv_get(dev, idx); rte_atomic32_inc(&rxq_ctrl->refcnt); - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); } return rxq_ctrl; @@ -1069,8 +1068,8 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) /** * Release a Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -1078,8 +1077,9 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) * 0 on success, errno value on failure. */ int -mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) +mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; if (!(*priv->rxqs)[idx]) @@ -1089,11 +1089,11 @@ mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) if (rxq_ctrl->ibv) { int ret; - ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); + ret = mlx5_rxq_ibv_release(rxq_ctrl->ibv); if (!ret) rxq_ctrl->ibv = NULL; } - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { LIST_REMOVE(rxq_ctrl, next); @@ -1107,8 +1107,8 @@ mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) /** * Verify if the queue can be released. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -1116,8 +1116,9 @@ mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) * 1 if the queue can be released. */ int -mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) +mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; if (!(*priv->rxqs)[idx]) @@ -1129,20 +1130,21 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_rxq_verify(struct priv *priv) +mlx5_rxq_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; int ret = 0; LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { - DEBUG("%p: Rx Queue %p still referenced", (void *)priv, + DEBUG("%p: Rx Queue %p still referenced", (void *)dev, (void *)rxq_ctrl); ++ret; } @@ -1152,8 +1154,8 @@ mlx5_priv_rxq_verify(struct priv *priv) /** * Create an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param queues * Queues entering in the indirection table. * @param queues_n @@ -1163,9 +1165,10 @@ mlx5_priv_rxq_verify(struct priv *priv) * A new indirection table. */ struct mlx5_ind_table_ibv * -mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], - uint16_t queues_n) +mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[], + uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; const unsigned int wq_n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : @@ -1179,8 +1182,7 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], if (!ind_tbl) return NULL; for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = - mlx5_priv_rxq_get(priv, queues[i]); + struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); if (!rxq) goto error; @@ -1202,20 +1204,20 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], goto error; rte_atomic32_inc(&ind_tbl->refcnt); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, + DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); return ind_tbl; error: rte_free(ind_tbl); - DEBUG("%p cannot create indirection table", (void *)priv); + DEBUG("%p cannot create indirection table", (void *)dev); return NULL; } /** * Get an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param queues * Queues entering in the indirection table. * @param queues_n @@ -1225,9 +1227,10 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], * An indirection table if found. */ struct mlx5_ind_table_ibv * -mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], - uint16_t queues_n) +mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[], + uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { @@ -1241,10 +1244,10 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], unsigned int i; rte_atomic32_inc(&ind_tbl->refcnt); - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, + DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); for (i = 0; i != ind_tbl->queues_n; ++i) - mlx5_priv_rxq_get(priv, ind_tbl->queues[i]); + mlx5_rxq_get(dev, ind_tbl->queues[i]); } return ind_tbl; } @@ -1252,8 +1255,8 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], /** * Release an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param ind_table * Indirection table to release. * @@ -1261,18 +1264,18 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], * 0 on success, errno value on failure. */ int -mlx5_priv_ind_table_ibv_release(struct priv *priv, - struct mlx5_ind_table_ibv *ind_tbl) +mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_ibv *ind_tbl) { unsigned int i; - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, + DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) claim_zero(mlx5_glue->destroy_rwq_ind_table (ind_tbl->ind_table)); for (i = 0; i != ind_tbl->queues_n; ++i) - claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i])); + claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); if (!rte_atomic32_read(&ind_tbl->refcnt)) { LIST_REMOVE(ind_tbl, next); rte_free(ind_tbl); @@ -1284,21 +1287,22 @@ mlx5_priv_ind_table_ibv_release(struct priv *priv, /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_ind_table_ibv_verify(struct priv *priv) +mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; int ret = 0; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { DEBUG("%p: Verbs indirection table %p still referenced", - (void *)priv, (void *)ind_tbl); + (void *)dev, (void *)ind_tbl); ++ret; } return ret; @@ -1307,8 +1311,8 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) /** * Create an Rx Hash queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param rss_key * RSS key for the Rx hash queue. * @param rss_key_len @@ -1325,17 +1329,18 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) * An hash Rx queue on success. */ struct mlx5_hrxq * -mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, - uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, + uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; queues_n = hash_fields ? queues_n : 1; - ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); + ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) - ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n); + ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n); if (!ind_tbl) return NULL; qp = mlx5_glue->create_qp_ex @@ -1367,11 +1372,11 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, memcpy(hrxq->rss_key, rss_key, rss_key_len); rte_atomic32_inc(&hrxq->refcnt); LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev, (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); return hrxq; error: - mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + mlx5_ind_table_ibv_release(dev, ind_tbl); if (qp) claim_zero(mlx5_glue->destroy_qp(qp)); return NULL; @@ -1380,8 +1385,8 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, /** * Get an Rx Hash queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param rss_conf * RSS configuration for the Rx hash queue. * @param queues @@ -1394,9 +1399,10 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, * An hash Rx queue on success. */ struct mlx5_hrxq * -mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, - uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, + uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; queues_n = hash_fields ? queues_n : 1; @@ -1409,15 +1415,15 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, continue; if (hrxq->hash_fields != hash_fields) continue; - ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); + ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) continue; if (ind_tbl != hrxq->ind_table) { - mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + mlx5_ind_table_ibv_release(dev, ind_tbl); continue; } rte_atomic32_inc(&hrxq->refcnt); - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev, (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); return hrxq; } @@ -1427,8 +1433,8 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, /** * Release the hash Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param hrxq * Pointer to Hash Rx queue to release. * @@ -1436,39 +1442,40 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, * 0 on success, errno value on failure. */ int -mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq) +mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) { - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev, (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table); + mlx5_ind_table_ibv_release(dev, hrxq->ind_table); LIST_REMOVE(hrxq, next); rte_free(hrxq); return 0; } - claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table)); + claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table)); return EBUSY; } /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_hrxq_ibv_verify(struct priv *priv) +mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; int ret = 0; LIST_FOREACH(hrxq, &priv->hrxqs, next) { DEBUG("%p: Verbs Hash Rx queue %p still referenced", - (void *)priv, (void *)hrxq); + (void *)dev, (void *)hrxq); ++ret; } return ret; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 93d794ede..11dd1b84e 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1962,27 +1962,25 @@ mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, } int __attribute__((weak)) -priv_check_raw_vec_tx_support(struct priv *priv __rte_unused, - struct rte_eth_dev *dev __rte_unused) +mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused) { return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_tx_support(struct priv *priv __rte_unused, - struct rte_eth_dev *dev __rte_unused) +mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused) { return -ENOTSUP; } int __attribute__((weak)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) +mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) { return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_rx_support(struct priv *priv __rte_unused) +mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) { return -ENOTSUP; } diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index d0ec9a214..17a6072e2 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -215,67 +215,64 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, struct rte_mempool *mp); void mlx5_rx_queue_release(void *dpdk_rxq); -int priv_rx_intr_vec_enable(struct priv *priv); -void priv_rx_intr_vec_disable(struct priv *priv); +int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev); +void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev); int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx); -int mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv); -int mlx5_priv_rxq_ibv_releasable(struct priv *priv, - struct mlx5_rxq_ibv *rxq_ibv); -int mlx5_priv_rxq_ibv_verify(struct priv *priv); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, - uint16_t desc, - unsigned int socket, - const struct rte_eth_rxconf *conf, - struct rte_mempool *mp); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *priv, uint16_t idx); -int mlx5_priv_rxq_release(struct priv *priv, uint16_t idx); -int mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx); -int mlx5_priv_rxq_verify(struct priv *priv); +struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx); +struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_verify(struct rte_eth_dev *dev); int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *priv, - uint16_t queues[], - uint16_t queues_n); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *priv, - uint16_t queues[], - uint16_t queues_n); -int mlx5_priv_ind_table_ibv_release(struct priv *priv, - struct mlx5_ind_table_ibv *ind_tbl); -int mlx5_priv_ind_table_ibv_verify(struct priv *priv); -struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, - uint8_t rss_key_len, uint64_t hash_fields, - uint16_t queues[], uint16_t queues_n); -struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, - uint8_t rss_key_len, uint64_t hash_fields, - uint16_t queues[], uint16_t queues_n); -int mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq); -int mlx5_priv_hrxq_ibv_verify(struct priv *priv); -uint64_t mlx5_priv_get_rx_port_offloads(struct priv *priv); -uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *priv); +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, + uint16_t queues[], + uint16_t queues_n); +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, + uint16_t queues[], + uint16_t queues_n); +int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_ibv *ind_tbl); +int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, + uint8_t rss_key_len, uint64_t hash_fields, + uint16_t queues[], uint16_t queues_n); +struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, + uint8_t rss_key_len, uint64_t hash_fields, + uint16_t queues[], uint16_t queues_n); +int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq); +int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev); +uint64_t mlx5_get_rx_port_offloads(void); +uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); /* mlx5_txq.c */ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf); void mlx5_tx_queue_release(void *dpdk_txq); -int priv_tx_uar_remap(struct priv *priv, int fd); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx); -int mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv); -int mlx5_priv_txq_ibv_releasable(struct priv *priv, - struct mlx5_txq_ibv *txq_ibv); -int mlx5_priv_txq_ibv_verify(struct priv *priv); -struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *priv, uint16_t idx, - uint16_t desc, unsigned int socket, - const struct rte_eth_txconf *conf); -struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *priv, uint16_t idx); -int mlx5_priv_txq_release(struct priv *priv, uint16_t idx); -int mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx); -int mlx5_priv_txq_verify(struct priv *priv); +int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd); +struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx); +struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv); +int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv); +int mlx5_txq_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf); +struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_verify(struct rte_eth_dev *dev); void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); -uint64_t mlx5_priv_get_tx_port_offloads(struct priv *priv); +uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); /* mlx5_rxtx.c */ @@ -299,26 +296,22 @@ int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset); /* Vectorized version of mlx5_rxtx.c */ - -int priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev); -int priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev); -int rxq_check_vec_support(struct mlx5_rxq_data *rxq); -int priv_check_vec_rx_support(struct priv *priv); +int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev); +int mlx5_check_vec_tx_support(struct rte_eth_dev *dev); +int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data); +int mlx5_check_vec_rx_support(struct rte_eth_dev *dev); uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n); uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n); -uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, +uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n); /* mlx5_mr.c */ void mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg); -struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, - struct rte_mempool *mp, unsigned int idx); struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, - struct rte_mempool *mp, - unsigned int idx); + struct rte_mempool *mp, unsigned int idx); #ifndef NDEBUG /** diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index b66c2916f..257d7b11c 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -223,17 +223,14 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) /** * Check Tx queue flags are set for raw vectorized Tx. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_raw_vec_tx_support(__rte_unused struct priv *priv, - struct rte_eth_dev *dev) +mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev) { uint64_t offloads = dev->data->dev_conf.txmode.offloads; @@ -246,17 +243,16 @@ priv_check_raw_vec_tx_support(__rte_unused struct priv *priv, /** * Check a device can support vectorized TX. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +mlx5_check_vec_tx_support(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint64_t offloads = dev->data->dev_conf.txmode.offloads; if (!priv->config.tx_vec_en || @@ -277,7 +273,7 @@ priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq) +mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) { struct mlx5_rxq_ctrl *ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); @@ -290,15 +286,16 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq) /** * Check a device can support vectorized RX. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_vec_rx_support(struct priv *priv) +mlx5_check_vec_rx_support(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint16_t i; if (!priv->config.rx_vec_en) @@ -309,7 +306,7 @@ priv_check_vec_rx_support(struct priv *priv) if (!rxq) continue; - if (rxq_check_vec_support(rxq) < 0) + if (mlx5_rxq_check_vec_support(rxq) < 0) break; } if (i != priv->rxqs_n) diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c index 61c1a4a50..b8f610df3 100644 --- a/drivers/net/mlx5/mlx5_socket.c +++ b/drivers/net/mlx5/mlx5_socket.c @@ -18,15 +18,16 @@ /** * Initialise the socket to communicate with the secondary process * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return * 0 on success, errno value on failure. */ int -priv_socket_init(struct priv *priv) +mlx5_socket_init(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct sockaddr_un sun = { .sun_family = AF_UNIX, }; @@ -79,15 +80,17 @@ priv_socket_init(struct priv *priv) /** * Un-Initialise the socket to communicate with the secondary process * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return * 0 on success, errno value on failure. */ int -priv_socket_uninit(struct priv *priv) +mlx5_socket_uninit(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; @@ -98,12 +101,13 @@ priv_socket_uninit(struct priv *priv) /** * Handle socket interrupts. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_socket_handle(struct priv *priv) +mlx5_socket_handle(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int conn_sock; int ret = 0; struct cmsghdr *cmsg = NULL; @@ -179,15 +183,16 @@ priv_socket_handle(struct priv *priv) /** * Connect to the primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet structure. * * @return * fd on success, negative errno value on failure. */ int -priv_socket_connect(struct priv *priv) +mlx5_socket_connect(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct sockaddr_un sun = { .sun_family = AF_UNIX, }; diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 39be1865a..0febed878 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -122,8 +122,8 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); /** * Read device counters table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] stats * Counters table output buffer. * @@ -131,8 +131,9 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); * 0 on success and stats is filled, negative on error. */ static int -priv_read_dev_counters(struct priv *priv, uint64_t *stats) +mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) { + struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; unsigned int i; struct ifreq ifr; @@ -143,7 +144,7 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats) et_stats->cmd = ETHTOOL_GSTATS; et_stats->n_stats = xstats_ctrl->stats_n; ifr.ifr_data = (caddr_t)et_stats; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { WARN("unable to read statistic values from device"); return -1; } @@ -173,20 +174,20 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats) /** * Query the number of statistics provided by ETHTOOL. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Number of statistics on success, -1 on error. */ static int -priv_ethtool_get_stats_n(struct priv *priv) { +mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) { struct ethtool_drvinfo drvinfo; struct ifreq ifr; drvinfo.cmd = ETHTOOL_GDRVINFO; ifr.ifr_data = (caddr_t)&drvinfo; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { WARN("unable to query number of statistics"); return -1; } @@ -196,12 +197,13 @@ priv_ethtool_get_stats_n(struct priv *priv) { /** * Init the structures to read device counters. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_xstats_init(struct priv *priv) +mlx5_xstats_init(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; unsigned int i; unsigned int j; @@ -210,7 +212,7 @@ priv_xstats_init(struct priv *priv) unsigned int dev_stats_n; unsigned int str_sz; - dev_stats_n = priv_ethtool_get_stats_n(priv); + dev_stats_n = mlx5_ethtool_get_stats_n(dev); if (dev_stats_n < 1) { WARN("no extended statistics available"); return; @@ -229,7 +231,7 @@ priv_xstats_init(struct priv *priv) strings->string_set = ETH_SS_STATS; strings->len = dev_stats_n; ifr.ifr_data = (caddr_t)strings; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { WARN("unable to get statistic names"); goto free; } @@ -258,61 +260,55 @@ priv_xstats_init(struct priv *priv) } /* Copy to base at first time. */ assert(xstats_n <= MLX5_MAX_XSTATS); - priv_read_dev_counters(priv, xstats_ctrl->base); + mlx5_read_dev_counters(dev, xstats_ctrl->base); free: rte_free(strings); } /** - * Get device extended statistics. + * DPDK callback to get extended device statistics. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] stats * Pointer to rte extended stats table. + * @param n + * The size of the stats table. * * @return * Number of extended stats on success and stats is filled, * negative on error. */ -static int -priv_xstats_get(struct priv *priv, struct rte_eth_xstat *stats) +int +mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n) { - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + struct priv *priv = dev->data->dev_private; unsigned int i; - unsigned int n = xstats_n; uint64_t counters[n]; + int ret = 0; - if (priv_read_dev_counters(priv, counters) < 0) - return -1; - for (i = 0; i != xstats_n; ++i) { - stats[i].id = i; - stats[i].value = (counters[i] - xstats_ctrl->base[i]); + if (n >= xstats_n && stats) { + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + int stats_n; + + stats_n = mlx5_ethtool_get_stats_n(dev); + if (stats_n < 0) + return -1; + if (xstats_ctrl->stats_n != stats_n) + mlx5_xstats_init(dev); + ret = mlx5_read_dev_counters(dev, counters); + if (ret) + return ret; + for (i = 0; i != xstats_n; ++i) { + stats[i].id = i; + stats[i].value = (counters[i] - xstats_ctrl->base[i]); + } } return n; } /** - * Reset device extended statistics. - * - * @param priv - * Pointer to private structure. - */ -static void -priv_xstats_reset(struct priv *priv) -{ - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - unsigned int i; - unsigned int n = xstats_n; - uint64_t counters[n]; - - if (priv_read_dev_counters(priv, counters) < 0) - return; - for (i = 0; i != n; ++i) - xstats_ctrl->base[i] = counters[i]; -} - -/** * DPDK callback to get device statistics. * * @param dev @@ -409,41 +405,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev) } /** - * DPDK callback to get extended device statistics. - * - * @param dev - * Pointer to Ethernet device structure. - * @param[out] stats - * Stats table output buffer. - * @param n - * The size of the stats table. - * - * @return - * Number of xstats on success, negative on failure. - */ -int -mlx5_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstat *stats, unsigned int n) -{ - struct priv *priv = dev->data->dev_private; - int ret = xstats_n; - - if (n >= xstats_n && stats) { - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - int stats_n; - - stats_n = priv_ethtool_get_stats_n(priv); - if (stats_n < 0) { - return -1; - } - if (xstats_ctrl->stats_n != stats_n) - priv_xstats_init(priv); - ret = priv_xstats_get(priv, stats); - } - return ret; -} - -/** * DPDK callback to clear device extended statistics. * * @param dev @@ -455,13 +416,19 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; + unsigned int i; + unsigned int n = xstats_n; + uint64_t counters[n]; - stats_n = priv_ethtool_get_stats_n(priv); + stats_n = mlx5_ethtool_get_stats_n(dev); if (stats_n < 0) return; if (xstats_ctrl->stats_n != stats_n) - priv_xstats_init(priv); - priv_xstats_reset(priv); + mlx5_xstats_init(dev); + if (mlx5_read_dev_counters(dev, counters) < 0) + return; + for (i = 0; i != n; ++i) + xstats_ctrl->base[i] = counters[i]; } /** diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 3ce93910d..07226b864 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -21,12 +21,13 @@ * Pointer to Ethernet device structure. */ static void -priv_txq_stop(struct priv *priv) +mlx5_txq_stop(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->txqs_n; ++i) - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(dev, i); } /** @@ -39,8 +40,9 @@ priv_txq_stop(struct priv *priv) * 0 on success, errno on error. */ static int -priv_txq_start(struct priv *priv) +mlx5_txq_start(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; int ret = 0; @@ -48,28 +50,28 @@ priv_txq_start(struct priv *priv) for (i = 0; i != priv->txqs_n; ++i) { unsigned int idx = 0; struct mlx5_mr *mr; - struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i); + struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); if (!txq_ctrl) continue; LIST_FOREACH(mr, &priv->mr, next) { - priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++); + mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mr->mp, idx++); if (idx == MLX5_PMD_TX_MP_CACHE) break; } txq_alloc_elts(txq_ctrl); - txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i); + txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i); if (!txq_ctrl->ibv) { ret = ENOMEM; goto error; } } - ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd); + ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd); if (ret) goto error; return ret; error: - priv_txq_stop(priv); + mlx5_txq_stop(dev); return ret; } @@ -80,12 +82,13 @@ priv_txq_start(struct priv *priv) * Pointer to Ethernet device structure. */ static void -priv_rxq_stop(struct priv *priv) +mlx5_rxq_stop(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) - mlx5_priv_rxq_release(priv, i); + mlx5_rxq_release(dev, i); } /** @@ -98,20 +101,21 @@ priv_rxq_stop(struct priv *priv) * 0 on success, errno on error. */ static int -priv_rxq_start(struct priv *priv) +mlx5_rxq_start(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; int ret = 0; for (i = 0; i != priv->rxqs_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); if (!rxq_ctrl) continue; ret = rxq_alloc_elts(rxq_ctrl); if (ret) goto error; - rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i); + rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i); if (!rxq_ctrl->ibv) { ret = ENOMEM; goto error; @@ -119,7 +123,7 @@ priv_rxq_start(struct priv *priv) } return -ret; error: - priv_rxq_stop(priv); + mlx5_rxq_stop(dev); return -ret; } @@ -142,7 +146,7 @@ mlx5_dev_start(struct rte_eth_dev *dev) int err; dev->data->dev_started = 1; - err = priv_flow_create_drop_queue(priv); + err = mlx5_flow_create_drop_queue(dev); if (err) { ERROR("%p: Drop queue allocation failed: %s", (void *)dev, strerror(err)); @@ -150,46 +154,46 @@ mlx5_dev_start(struct rte_eth_dev *dev) } DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); rte_mempool_walk(mlx5_mp2mr_iter, priv); - err = priv_txq_start(priv); + err = mlx5_txq_start(dev); if (err) { ERROR("%p: TXQ allocation failed: %s", (void *)dev, strerror(err)); goto error; } - err = priv_rxq_start(priv); + err = mlx5_rxq_start(dev); if (err) { ERROR("%p: RXQ allocation failed: %s", (void *)dev, strerror(err)); goto error; } - err = priv_rx_intr_vec_enable(priv); + err = mlx5_rx_intr_vec_enable(dev); if (err) { ERROR("%p: RX interrupt vector creation failed", (void *)priv); goto error; } - priv_xstats_init(priv); + mlx5_xstats_init(dev); /* Update link status and Tx/Rx callbacks for the first time. */ memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); INFO("Forcing port %u link to be up", dev->data->port_id); - err = priv_force_link_status_change(priv, ETH_LINK_UP); + err = mlx5_force_link_status_change(dev, ETH_LINK_UP); if (err) { DEBUG("Failed to set port %u link to be up", dev->data->port_id); goto error; } - priv_dev_interrupt_handler_install(priv, dev); + mlx5_dev_interrupt_handler_install(dev); return 0; error: /* Rollback. */ dev->data->dev_started = 0; for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) - priv_mr_release(priv, mr); - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); - priv_txq_stop(priv); - priv_rxq_stop(priv); - priv_flow_delete_drop_queue(priv); + mlx5_mr_release(mr); + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); + mlx5_flow_delete_drop_queue(dev); return err; } @@ -214,21 +218,21 @@ mlx5_dev_stop(struct rte_eth_dev *dev) rte_wmb(); usleep(1000 * priv->rxqs_n); DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev); - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); - priv_rx_intr_vec_disable(priv); - priv_dev_interrupt_handler_uninstall(priv, dev); - priv_txq_stop(priv); - priv_rxq_stop(priv); + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); + mlx5_rx_intr_vec_disable(dev); + mlx5_dev_interrupt_handler_uninstall(dev); + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) - priv_mr_release(priv, mr); - priv_flow_delete_drop_queue(priv); + mlx5_mr_release(mr); + mlx5_flow_delete_drop_queue(dev); } /** * Enable traffic flows configured by control plane * - * @param priv + * @param dev * Pointer to Ethernet device private data. * @param dev * Pointer to Ethernet device structure. @@ -237,8 +241,9 @@ mlx5_dev_stop(struct rte_eth_dev *dev) * 0 on success. */ int -priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) +mlx5_traffic_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_flow_item_eth bcast = { .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", }; @@ -356,40 +361,18 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) /** * Disable traffic flows configured by control plane * - * @param priv - * Pointer to Ethernet device private data. * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success. - */ -int -priv_dev_traffic_disable(struct priv *priv, - struct rte_eth_dev *dev __rte_unused) -{ - priv_flow_flush(priv, &priv->ctrl_flows); - return 0; -} - -/** - * Restart traffic flows configured by control plane - * - * @param priv * Pointer to Ethernet device private data. - * @param dev - * Pointer to Ethernet device structure. * * @return * 0 on success. */ int -priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) +mlx5_traffic_disable(struct rte_eth_dev *dev) { - if (dev->data->dev_started) { - priv_dev_traffic_disable(priv, dev); - priv_dev_traffic_enable(priv, dev); - } + struct priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->ctrl_flows); return 0; } @@ -397,7 +380,7 @@ priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) * Restart traffic flows configured by control plane * * @param dev - * Pointer to Ethernet device structure. + * Pointer to Ethernet device private data. * * @return * 0 on success. @@ -405,8 +388,9 @@ priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) int mlx5_traffic_restart(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - - priv_dev_traffic_restart(priv, dev); + if (dev->data->dev_started) { + mlx5_traffic_disable(dev); + mlx5_traffic_enable(dev); + } return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 47ee95990..54ed972d7 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -91,15 +91,16 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) /** * Returns the per-port supported offloads. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Supported Tx offloads. */ uint64_t -mlx5_priv_get_tx_port_offloads(struct priv *priv) +mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_VLAN_INSERT); struct mlx5_dev_config *config = &priv->config; @@ -123,8 +124,8 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv) /** * Checks if the per-queue offload configuration is valid. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param offloads * Per-queue offloads configuration. * @@ -132,10 +133,10 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv) * 1 if the configuration is valid, 0 otherwise. */ static int -priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) +mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) { - uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads; - uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv); + uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev); /* There are no Tx offloads which are per queue. */ if ((offloads & port_supp_offloads) != offloads) @@ -177,13 +178,13 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, * use the old API. */ if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && - !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) { + !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) { ret = ENOTSUP; ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port " "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, (void *)dev, conf->offloads, dev->data->dev_conf.txmode.offloads, - mlx5_priv_get_tx_port_offloads(priv)); + mlx5_get_tx_port_offloads(dev)); goto out; } if (desc <= MLX5_TX_COMP_THRESH) { @@ -206,14 +207,14 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, idx, priv->txqs_n); return -EOVERFLOW; } - if (!mlx5_priv_txq_releasable(priv, idx)) { + if (!mlx5_txq_releasable(dev, idx)) { ret = EBUSY; ERROR("%p: unable to release queue index %u", (void *)dev, idx); goto out; } - mlx5_priv_txq_release(priv, idx); - txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf); + mlx5_txq_release(dev, idx); + txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf); if (!txq_ctrl) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); @@ -249,7 +250,7 @@ mlx5_tx_queue_release(void *dpdk_txq) if ((*priv->txqs)[i] == txq) { DEBUG("%p: removing TX queue %p from list", (void *)priv->dev, (void *)txq_ctrl); - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(priv->dev, i); break; } } @@ -260,8 +261,8 @@ mlx5_tx_queue_release(void *dpdk_txq) * Both primary and secondary process do mmap to make UAR address * aligned. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param fd * Verbs file descriptor to map UAR pages. * @@ -269,8 +270,9 @@ mlx5_tx_queue_release(void *dpdk_txq) * 0 on success, errno value on failure. */ int -priv_tx_uar_remap(struct priv *priv, int fd) +mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) { + struct priv *priv = dev->data->dev_private; unsigned int i, j; uintptr_t pages[priv->txqs_n]; unsigned int pages_n = 0; @@ -356,8 +358,8 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) /** * Create the Tx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * @@ -365,8 +367,9 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) * The Verbs object initialised if it can be created. */ struct mlx5_txq_ibv * -mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) +mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); @@ -383,7 +386,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) struct mlx5dv_cq cq_info; struct mlx5dv_obj obj; const int desc = 1 << txq_data->elts_n; - eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev); + eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev); int ret = 0; assert(txq_data); @@ -517,7 +520,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) ERROR("Failed to retrieve UAR info, invalid libmlx5.so version"); goto error; } - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev, (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; @@ -534,8 +537,8 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) /** * Get an Tx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * @@ -543,8 +546,9 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) * The Verbs object if it exists. */ struct mlx5_txq_ibv * -mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) +mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq_ctrl; if (idx >= priv->txqs_n) @@ -554,7 +558,7 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); if (txq_ctrl->ibv) { rte_atomic32_inc(&txq_ctrl->ibv->refcnt); - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev, (void *)txq_ctrl->ibv, rte_atomic32_read(&txq_ctrl->ibv->refcnt)); } @@ -564,8 +568,6 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) /** * Release an Tx verbs queue object. * - * @param priv - * Pointer to private structure. * @param txq_ibv * Verbs Tx queue object. * @@ -573,11 +575,10 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) * 0 on success, errno on failure. */ int -mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused, - struct mlx5_txq_ibv *txq_ibv) +mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) { assert(txq_ibv); - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + DEBUG("Verbs Tx queue %p: refcnt %d", (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp)); @@ -592,14 +593,11 @@ mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused, /** * Return true if a single reference exists on the object. * - * @param priv - * Pointer to private structure. * @param txq_ibv * Verbs Tx queue object. */ int -mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused, - struct mlx5_txq_ibv *txq_ibv) +mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv) { assert(txq_ibv); return (rte_atomic32_read(&txq_ibv->refcnt) == 1); @@ -608,20 +606,21 @@ mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused, /** * Verify the Verbs Tx queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_txq_ibv_verify(struct priv *priv) +mlx5_txq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_txq_ibv *txq_ibv; LIST_FOREACH(txq_ibv, &priv->txqsibv, next) { - DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv, + DEBUG("%p: Verbs Tx queue %p still referenced", (void *)dev, (void *)txq_ibv); ++ret; } @@ -645,7 +644,8 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) unsigned int txq_inline; unsigned int txqs_inline; unsigned int inline_max_packet_sz; - eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev); + eth_tx_burst_t tx_pkt_burst = + mlx5_select_tx_function(txq_ctrl->priv->dev); int is_empw_func = is_empw_burst_func(tx_pkt_burst); int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO); @@ -731,8 +731,8 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) /** * Create a DPDK Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @param desc @@ -746,10 +746,10 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) * A DPDK queue object on success. */ struct mlx5_txq_ctrl * -mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, - unsigned int socket, - const struct rte_eth_txconf *conf) +mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *tmpl; tmpl = rte_calloc_socket("TXQ", 1, @@ -773,7 +773,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); tmpl->txq.stats.idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev, (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; @@ -782,8 +782,8 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, /** * Get a Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -791,8 +791,9 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, * A pointer to the queue if it exists. */ struct mlx5_txq_ctrl * -mlx5_priv_txq_get(struct priv *priv, uint16_t idx) +mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *ctrl = NULL; if ((*priv->txqs)[idx]) { @@ -800,15 +801,15 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) txq); unsigned int i; - mlx5_priv_txq_ibv_get(priv, idx); + mlx5_txq_ibv_get(dev, idx); for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { if (ctrl->txq.mp2mr[i]) claim_nonzero - (priv_mr_get(priv, + (mlx5_mr_get(dev, ctrl->txq.mp2mr[i]->mp)); } rte_atomic32_inc(&ctrl->refcnt); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev, (void *)ctrl, rte_atomic32_read(&ctrl->refcnt)); } return ctrl; @@ -817,8 +818,8 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) /** * Release a Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -826,8 +827,9 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) * 0 on success, errno on failure. */ int -mlx5_priv_txq_release(struct priv *priv, uint16_t idx) +mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; unsigned int i; struct mlx5_txq_ctrl *txq; size_t page_size = sysconf(_SC_PAGESIZE); @@ -835,18 +837,18 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx) if (!(*priv->txqs)[idx]) return 0; txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev, (void *)txq, rte_atomic32_read(&txq->refcnt)); if (txq->ibv) { int ret; - ret = mlx5_priv_txq_ibv_release(priv, txq->ibv); + ret = mlx5_txq_ibv_release(txq->ibv); if (!ret) txq->ibv = NULL; } for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { if (txq->txq.mp2mr[i]) { - priv_mr_release(priv, txq->txq.mp2mr[i]); + mlx5_mr_release(txq->txq.mp2mr[i]); txq->txq.mp2mr[i] = NULL; } } @@ -866,8 +868,8 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx) /** * Verify if the queue can be released. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -875,8 +877,9 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx) * 1 if the queue can be released. */ int -mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) +mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; if (!(*priv->txqs)[idx]) @@ -888,20 +891,21 @@ mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) /** * Verify the Tx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * The number of object not released. */ int -mlx5_priv_txq_verify(struct priv *priv) +mlx5_txq_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; int ret = 0; LIST_FOREACH(txq, &priv->txqsctrl, next) { - DEBUG("%p: Tx Queue %p still referenced", (void *)priv, + DEBUG("%p: Tx Queue %p still referenced", (void *)dev, (void *)txq); ++ret; } diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 184ae2f4e..3df962a90 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -79,25 +79,26 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) ++priv->vlan_filter_n; } if (dev->data->dev_started) - priv_dev_traffic_restart(priv, dev); + mlx5_traffic_restart(dev); out: return ret; } /** - * Set/reset VLAN stripping for a specific queue. + * Callback to set/reset VLAN stripping for a specific queue. * - * @param priv - * Pointer to private structure. - * @param idx + * @param dev + * Pointer to Ethernet device structure. + * @param queue * RX queue index. * @param on * Enable/disable VLAN stripping. */ -static void -priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) +void +mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); struct ibv_wq_attr mod; @@ -106,8 +107,18 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) 0; int err; + /* Validate hw support */ + if (!priv->config.hw_vlan_strip) { + ERROR("VLAN stripping is not supported"); + return; + } + /* Validate queue number */ + if (queue >= priv->rxqs_n) { + ERROR("VLAN stripping, invalid queue number %d", queue); + return; + } DEBUG("set VLAN offloads 0x%x for port %d queue %d", - vlan_offloads, rxq->port_id, idx); + vlan_offloads, rxq->port_id, queue); if (!rxq_ctrl->ibv) { /* Update related bits in RX queue. */ rxq->vlan_strip = !!on; @@ -121,7 +132,7 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); if (err) { ERROR("%p: failed to modified stripping mode: %s", - (void *)priv, strerror(err)); + (void *)dev, strerror(err)); return; } /* Update related bits in RX queue. */ @@ -129,34 +140,6 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) } /** - * Callback to set/reset VLAN stripping for a specific queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param queue - * RX queue index. - * @param on - * Enable/disable VLAN stripping. - */ -void -mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) -{ - struct priv *priv = dev->data->dev_private; - - /* Validate hw support */ - if (!priv->config.hw_vlan_strip) { - ERROR("VLAN stripping is not supported"); - return; - } - /* Validate queue number */ - if (queue >= priv->rxqs_n) { - ERROR("VLAN stripping, invalid queue number %d", queue); - return; - } - priv_vlan_strip_queue_set(priv, queue, on); -} - -/** * Callback to set/reset VLAN offloads for a port. * * @param dev @@ -180,7 +163,7 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) } /* Run on every RX queue and set/reset VLAN stripping. */ for (i = 0; (i != priv->rxqs_n); i++) - priv_vlan_strip_queue_set(priv, i, hw_vlan_strip); + mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip); } return 0; } -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v3 09/10] net/mlx5: change non failing function return values 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (7 preceding siblings ...) 2018-03-05 12:21 ` [PATCH v3 08/10] net/mlx5: prefix all function with mlx5 Nelio Laranjeiro @ 2018-03-05 12:21 ` Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 10/10] net/mlx5: standardize on negative errno values Nelio Laranjeiro 2018-03-18 6:33 ` [PATCH v3 00/10] net/mlx5: clean driver Shahaf Shuler 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:21 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh These functions return int although they are not supposed to fail, resulting in unnecessary checks in their callers. Some are returning error where is should be a boolean. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.h | 4 ++-- drivers/net/mlx5/mlx5_mr.c | 4 ++-- drivers/net/mlx5/mlx5_rxq.c | 25 ++++++++++--------------- drivers/net/mlx5/mlx5_socket.c | 6 +----- drivers/net/mlx5/mlx5_trigger.c | 6 +----- drivers/net/mlx5/mlx5_txq.c | 17 ++++++----------- 6 files changed, 22 insertions(+), 40 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 2cb463b62..86310404a 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -242,7 +242,7 @@ int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); int mlx5_dev_start(struct rte_eth_dev *dev); void mlx5_dev_stop(struct rte_eth_dev *dev); int mlx5_traffic_enable(struct rte_eth_dev *dev); -int mlx5_traffic_disable(struct rte_eth_dev *dev); +void mlx5_traffic_disable(struct rte_eth_dev *dev); int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ @@ -287,7 +287,7 @@ void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev); /* mlx5_socket.c */ int mlx5_socket_init(struct rte_eth_dev *priv); -int mlx5_socket_uninit(struct rte_eth_dev *priv); +void mlx5_socket_uninit(struct rte_eth_dev *priv); void mlx5_socket_handle(struct rte_eth_dev *priv); int mlx5_socket_connect(struct rte_eth_dev *priv); diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index fe60dd132..5c4e68736 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -307,7 +307,7 @@ mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp) * Pointer to memory region to release. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_mr_release(struct mlx5_mr *mr) @@ -321,7 +321,7 @@ mlx5_mr_release(struct mlx5_mr *mr) rte_free(mr); return 0; } - return EBUSY; + return 1; } /** diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index a3b08a1a3..8e7693df2 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -840,7 +840,7 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) * Verbs Rx queue object. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) @@ -867,7 +867,7 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) rte_free(rxq_ibv); return 0; } - return EBUSY; + return 1; } /** @@ -1074,7 +1074,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) * TX queue index. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) @@ -1086,13 +1086,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) return 0; rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); assert(rxq_ctrl->priv); - if (rxq_ctrl->ibv) { - int ret; - - ret = mlx5_rxq_ibv_release(rxq_ctrl->ibv); - if (!ret) - rxq_ctrl->ibv = NULL; - } + if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv)) + rxq_ctrl->ibv = NULL; DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { @@ -1101,7 +1096,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) (*priv->rxqs)[idx] = NULL; return 0; } - return EBUSY; + return 1; } /** @@ -1261,7 +1256,7 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[], * Indirection table to release. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, @@ -1281,7 +1276,7 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, rte_free(ind_tbl); return 0; } - return EBUSY; + return 1; } /** @@ -1439,7 +1434,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, * Pointer to Hash Rx queue to release. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) @@ -1454,7 +1449,7 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) return 0; } claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table)); - return EBUSY; + return 1; } /** diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c index b8f610df3..8db25cff1 100644 --- a/drivers/net/mlx5/mlx5_socket.c +++ b/drivers/net/mlx5/mlx5_socket.c @@ -82,11 +82,8 @@ mlx5_socket_init(struct rte_eth_dev *dev) * * @param[in] dev * Pointer to Ethernet device. - * - * @return - * 0 on success, errno value on failure. */ -int +void mlx5_socket_uninit(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; @@ -95,7 +92,6 @@ mlx5_socket_uninit(struct rte_eth_dev *dev) claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; claim_zero(remove(path)); - return 0; } /** diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 07226b864..a3ccebdd6 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -363,17 +363,13 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device private data. - * - * @return - * 0 on success. */ -int +void mlx5_traffic_disable(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->ctrl_flows); - return 0; } /** diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 54ed972d7..a5e2c7fa7 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -572,7 +572,7 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) * Verbs Tx queue object. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) @@ -587,7 +587,7 @@ mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) rte_free(txq_ibv); return 0; } - return EBUSY; + return 1; } /** @@ -824,7 +824,7 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) * TX queue index. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) @@ -839,13 +839,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev, (void *)txq, rte_atomic32_read(&txq->refcnt)); - if (txq->ibv) { - int ret; - - ret = mlx5_txq_ibv_release(txq->ibv); - if (!ret) - txq->ibv = NULL; - } + if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv)) + txq->ibv = NULL; for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { if (txq->txq.mp2mr[i]) { mlx5_mr_release(txq->txq.mp2mr[i]); @@ -862,7 +857,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) (*priv->txqs)[idx] = NULL; return 0; } - return EBUSY; + return 1; } /** -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH v3 10/10] net/mlx5: standardize on negative errno values 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (8 preceding siblings ...) 2018-03-05 12:21 ` [PATCH v3 09/10] net/mlx5: change non failing function return values Nelio Laranjeiro @ 2018-03-05 12:21 ` Nelio Laranjeiro 2018-03-18 6:33 ` [PATCH v3 00/10] net/mlx5: clean driver Shahaf Shuler 10 siblings, 0 replies; 30+ messages in thread From: Nelio Laranjeiro @ 2018-03-05 12:21 UTC (permalink / raw) To: dev; +Cc: Adrien Mazarguil, Yongseok Koh Set rte_errno systematically as well. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> --- drivers/net/mlx5/mlx5.c | 88 ++++++----- drivers/net/mlx5/mlx5_ethdev.c | 231 ++++++++++++++++------------- drivers/net/mlx5/mlx5_flow.c | 317 +++++++++++++++++++++++----------------- drivers/net/mlx5/mlx5_mac.c | 33 +++-- drivers/net/mlx5/mlx5_mr.c | 15 +- drivers/net/mlx5/mlx5_rss.c | 50 ++++--- drivers/net/mlx5/mlx5_rxmode.c | 28 +++- drivers/net/mlx5/mlx5_rxq.c | 142 ++++++++++-------- drivers/net/mlx5/mlx5_socket.c | 82 +++++++---- drivers/net/mlx5/mlx5_stats.c | 53 +++++-- drivers/net/mlx5/mlx5_trigger.c | 89 ++++++----- drivers/net/mlx5/mlx5_txq.c | 54 ++++--- drivers/net/mlx5/mlx5_vlan.c | 24 +-- 13 files changed, 719 insertions(+), 487 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index b6211e9c1..10da7a283 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -108,7 +108,7 @@ mlx5_getenv_int(const char *name) * A pointer to the callback data. * * @return - * a pointer to the allocate space. + * Allocated buffer, NULL otherwise and rte_errno is set. */ static void * mlx5_alloc_verbs_buf(size_t size, void *data) @@ -130,6 +130,8 @@ mlx5_alloc_verbs_buf(size_t size, void *data) } assert(data != NULL); ret = rte_malloc_socket(__func__, size, alignment, socket); + if (!ret && size) + rte_errno = ENOMEM; DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret); return ret; } @@ -365,7 +367,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr) * User data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_args_check(const char *key, const char *val, void *opaque) @@ -376,8 +378,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque) errno = 0; tmp = strtoul(val, NULL, 0); if (errno) { + rte_errno = errno; WARN("%s: \"%s\" is not a valid integer", key, val); - return errno; + return -rte_errno; } if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { config->cqe_comp = !!tmp; @@ -397,7 +400,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque) config->rx_vec_en = !!tmp; } else { WARN("%s: unknown parameter", key); - return -EINVAL; + rte_errno = EINVAL; + return -rte_errno; } return 0; } @@ -411,7 +415,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque) * Device arguments structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) @@ -442,9 +446,10 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) if (rte_kvargs_count(kvlist, params[i])) { ret = rte_kvargs_process(kvlist, params[i], mlx5_args_check, config); - if (ret != 0) { + if (ret) { + rte_errno = EINVAL; rte_kvargs_free(kvlist); - return ret; + return -rte_errno; } } } @@ -470,7 +475,7 @@ static void *uar_base; * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_uar_init_primary(struct rte_eth_dev *dev) @@ -479,7 +484,6 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev) void *addr = (void *)0; int i; const struct rte_mem_config *mcfg; - int ret; if (uar_base) { /* UAR address space mapped. */ priv->uar_base = uar_base; @@ -501,8 +505,8 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev) if (addr == MAP_FAILED) { ERROR("Failed to reserve UAR address space, please adjust " "MLX5_UAR_SIZE or try --base-virtaddr"); - ret = ENOMEM; - return ret; + rte_errno = ENOMEM; + return -rte_errno; } /* Accept either same addr or a new addr returned from mmap if target * range occupied. @@ -521,14 +525,13 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev) * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_uar_init_secondary(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; void *addr; - int ret; assert(priv->uar_base); if (uar_base) { /* already reserved. */ @@ -541,15 +544,15 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev) if (addr == MAP_FAILED) { ERROR("UAR mmap failed: %p size: %llu", priv->uar_base, MLX5_UAR_SIZE); - ret = ENXIO; - return ret; + rte_errno = ENXIO; + return -rte_errno; } if (priv->uar_base != addr) { ERROR("UAR address %p size %llu occupied, please adjust " "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr", priv->uar_base, MLX5_UAR_SIZE); - ret = ENXIO; - return ret; + rte_errno = ENXIO; + return -rte_errno; } uar_base = addr; /* process local, don't reserve again */ INFO("Reserved UAR address space: %p", addr); @@ -568,13 +571,13 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev) * PCI device information. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - struct ibv_device **list; + struct ibv_device **list = NULL; struct ibv_device *ibv_dev; int err = 0; struct ibv_context *attr_ctx = NULL; @@ -594,7 +597,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, idx = mlx5_dev_idx(&pci_dev->addr); if (idx == -1) { ERROR("this driver cannot support any more adapters"); - return -ENOMEM; + err = ENOMEM; + goto error; } DEBUG("using driver device index %d", idx); /* Save PCI address. */ @@ -602,9 +606,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, list = mlx5_glue->get_device_list(&i); if (list == NULL) { assert(errno); + err = errno; if (errno == ENOSYS) ERROR("cannot list devices, is ib_uverbs loaded?"); - return -errno; + goto error; } assert(i >= 0); /* @@ -626,7 +631,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, INFO("PCI information matches, using device \"%s\"", list[i]->name); attr_ctx = mlx5_glue->open_device(list[i]); - err = errno; + rte_errno = errno; + err = rte_errno; break; } if (attr_ctx == NULL) { @@ -634,13 +640,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, switch (err) { case 0: ERROR("cannot access device, is mlx5_ib loaded?"); - return -ENODEV; + err = ENODEV; + goto error; case EINVAL: ERROR("cannot use device, are drivers up to date?"); - return -EINVAL; + goto error; } - assert(err > 0); - return -err; } ibv_dev = list[i]; DEBUG("device opened"); @@ -680,8 +685,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, #else WARN("Tunnel offloading disabled due to old OFED/rdma-core version"); #endif - if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) + if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) { + err = errno; goto error; + } INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt); for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) { char name[RTE_ETH_NAME_MAX_LEN]; @@ -718,22 +725,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, eth_dev = rte_eth_dev_attach_secondary(name); if (eth_dev == NULL) { ERROR("can not attach rte ethdev"); - err = ENOMEM; + rte_errno = ENOMEM; + err = rte_errno; goto error; } eth_dev->device = &pci_dev->device; eth_dev->dev_ops = &mlx5_dev_sec_ops; err = mlx5_uar_init_secondary(eth_dev); - if (err < 0) { - err = -err; + if (err) goto error; - } /* Receive command fd from primary process */ err = mlx5_socket_connect(eth_dev); - if (err < 0) { - err = -err; + if (err) goto error; - } /* Remap UAR for Tx queues. */ err = mlx5_tx_uar_remap(eth_dev, err); if (err) @@ -804,6 +808,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) { ERROR("ibv_query_device_ex() failed"); + err = errno; goto port_error; } config.hw_csum = !!(device_attr_ex.device_cap_flags_ex & @@ -899,7 +904,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } #endif /* Get actual MTU if possible. */ - mlx5_get_mtu(eth_dev, &priv->mtu); + err = mlx5_get_mtu(eth_dev, &priv->mtu); + if (err) + goto port_error; DEBUG("port %u MTU is %u", priv->port, priv->mtu); /* * Initialize burst functions to prevent crashes before link-up. @@ -943,16 +950,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, */ /* no port found, complain */ if (!mlx5_dev[idx].ports) { - err = ENODEV; - goto error; + rte_errno = ENODEV; + err = rte_errno; } error: if (attr_ctx) claim_zero(mlx5_glue->close_device(attr_ctx)); if (list) mlx5_glue->free_device_list(list); - assert(err >= 0); - return -err; + if (err) { + rte_errno = err; + return -rte_errno; + } + return 0; } static const struct rte_pci_id mlx5_pci_id_map[] = { diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 1fde3d842..d7e85577f 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -101,7 +101,7 @@ struct ethtool_link_settings { * Interface name output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) @@ -117,8 +117,10 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) MKSTR(path, "%s/device/net", priv->ibdev_path); dir = opendir(path); - if (dir == NULL) - return -1; + if (dir == NULL) { + rte_errno = errno; + return -rte_errno; + } } while ((dent = readdir(dir)) != NULL) { char *name = dent->d_name; @@ -168,8 +170,10 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) snprintf(match, sizeof(match), "%s", name); } closedir(dir); - if (match[0] == '\0') - return -1; + if (match[0] == '\0') { + rte_errno = ENOENT; + return -rte_errno; + } strncpy(*ifname, match, sizeof(*ifname)); return 0; } @@ -185,20 +189,31 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) * Interface request structure output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) { int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); - int ret = -1; + int ret = 0; - if (sock == -1) - return ret; - if (mlx5_get_ifname(dev, &ifr->ifr_name) == 0) - ret = ioctl(sock, req, ifr); + if (sock == -1) { + rte_errno = errno; + return -rte_errno; + } + ret = mlx5_get_ifname(dev, &ifr->ifr_name); + if (ret) + goto error; + ret = ioctl(sock, req, ifr); + if (ret == -1) { + rte_errno = errno; + goto error; + } close(sock); - return ret; + return 0; +error: + close(sock); + return -rte_errno; } /** @@ -210,7 +225,7 @@ mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) * MTU value output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) @@ -233,7 +248,7 @@ mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) * MTU value to set. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) @@ -254,7 +269,7 @@ mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * Bitmask for flags to modify. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) @@ -276,7 +291,7 @@ mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) * Pointer to Ethernet device structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_configure(struct rte_eth_dev *dev) @@ -295,31 +310,36 @@ mlx5_dev_configure(struct rte_eth_dev *dev) (mlx5_get_rx_port_offloads() | mlx5_get_rx_queue_offloads(dev)); uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; + int ret = 0; if ((tx_offloads & supp_tx_offloads) != tx_offloads) { ERROR("Some Tx offloads are not supported " "requested 0x%" PRIx64 " supported 0x%" PRIx64, tx_offloads, supp_tx_offloads); - return ENOTSUP; + rte_errno = ENOTSUP; + return -rte_errno; } if ((rx_offloads & supp_rx_offloads) != rx_offloads) { ERROR("Some Rx offloads are not supported " "requested 0x%" PRIx64 " supported 0x%" PRIx64, rx_offloads, supp_rx_offloads); - return ENOTSUP; + rte_errno = ENOTSUP; + return -rte_errno; } if (use_app_rss_key && (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != rss_hash_default_key_len)) { /* MLX5 RSS only support 40bytes key. */ - return EINVAL; + rte_errno = EINVAL; + return -rte_errno; } priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, rss_hash_default_key_len, 0); if (!priv->rss_conf.rss_key) { ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n); - return ENOMEM; + rte_errno = ENOMEM; + return -rte_errno; } memcpy(priv->rss_conf.rss_key, use_app_rss_key ? @@ -337,7 +357,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev) } if (rxqs_n > priv->config.ind_table_max_size) { ERROR("cannot handle this many RX queues (%u)", rxqs_n); - return EINVAL; + rte_errno = EINVAL; + return -rte_errno; } if (rxqs_n == priv->rxqs_n) return 0; @@ -350,8 +371,9 @@ mlx5_dev_configure(struct rte_eth_dev *dev) reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? priv->config.ind_table_max_size : rxqs_n)); - if (mlx5_rss_reta_index_resize(dev, reta_idx_n)) - return ENOMEM; + ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); + if (ret) + return ret; /* When the number of RX queues is not a power of two, the remaining * table entries are padded with reused WQs and hashes are not spread * uniformly. */ @@ -361,7 +383,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev) j = 0; } return 0; - } /** @@ -452,7 +473,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, -1 on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) @@ -464,19 +485,22 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) struct ifreq ifr; struct rte_eth_link dev_link; int link_speed = 0; + int ret; - if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) { - WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); + if (ret) { + WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno)); + return ret; } memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&edata; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", - strerror(errno)); - return -1; + strerror(rte_errno)); + return ret; } link_speed = ethtool_cmd_speed(&edata); if (link_speed == -1) @@ -506,7 +530,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) return 0; } /* Link status is still the same. */ - return -1; + rte_errno = EAGAIN; + return -rte_errno; } /** @@ -516,7 +541,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, -1 on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) @@ -526,19 +551,22 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) struct ifreq ifr; struct rte_eth_link dev_link; uint64_t sc; + int ret; - if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) { - WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); + if (ret) { + WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno)); + return ret; } memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&gcmd; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", - strerror(errno)); - return -1; + strerror(rte_errno)); + return ret; } gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; @@ -549,10 +577,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) *ecmd = gcmd; ifr.ifr_data = (void *)ecmd; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", - strerror(errno)); - return -1; + strerror(rte_errno)); + return ret; } dev_link.link_speed = ecmd->speed; sc = ecmd->link_mode_masks[0] | @@ -602,7 +631,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) return 0; } /* Link status is still the same. */ - return -1; + rte_errno = EAGAIN; + return -rte_errno; } /** @@ -615,18 +645,21 @@ static void mlx5_link_start(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - int err; + int ret; dev->tx_pkt_burst = mlx5_select_tx_function(dev); dev->rx_pkt_burst = mlx5_select_rx_function(dev); - err = mlx5_traffic_enable(dev); - if (err) + ret = mlx5_traffic_enable(dev); + if (ret) { ERROR("%p: error occurred while configuring control flows: %s", - (void *)dev, strerror(err)); - err = mlx5_flow_start(dev, &priv->flows); - if (err) + (void *)dev, strerror(rte_errno)); + return; + } + ret = mlx5_flow_start(dev, &priv->flows); + if (ret) { ERROR("%p: error occurred while configuring flows: %s", - (void *)dev, strerror(err)); + (void *)dev, strerror(rte_errno)); + } } /** @@ -656,7 +689,7 @@ mlx5_link_stop(struct rte_eth_dev *dev) * Link desired status. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status) @@ -670,7 +703,8 @@ mlx5_force_link_status_change(struct rte_eth_dev *dev, int status) try++; sleep(1); } - return -EAGAIN; + rte_errno = EAGAIN; + return -rte_errno; } /** @@ -682,7 +716,7 @@ mlx5_force_link_status_change(struct rte_eth_dev *dev, int status) * Wait for request completion (ignored). * * @return - * 0 on success, -1 on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) @@ -699,10 +733,12 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) ret = mlx5_link_update_unlocked_gset(dev); else ret = mlx5_link_update_unlocked_gs(dev); + if (ret) + return ret; /* If lsc interrupt is disabled, should always be ready for traffic. */ if (!dev->data->dev_conf.intr_conf.lsc) { mlx5_link_start(dev); - return ret; + return 0; } /* Re-select burst callbacks only if link status has been changed. */ if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { @@ -711,7 +747,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) else mlx5_link_stop(dev); } - return ret; + return 0; } /** @@ -723,36 +759,32 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) * New MTU. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct priv *priv = dev->data->dev_private; - uint16_t kern_mtu; - int ret = 0; + uint16_t kern_mtu = 0; + int ret; ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) - goto out; + return ret; /* Set kernel interface MTU first. */ ret = mlx5_set_mtu(dev, mtu); if (ret) - goto out; + return ret; ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) - goto out; + return ret; if (kern_mtu == mtu) { priv->mtu = mtu; DEBUG("adapter port %u MTU set to %u", priv->port, mtu); + return 0; } - return 0; -out: - ret = errno; - WARN("cannot set port %u MTU to %u: %s", priv->port, mtu, - strerror(ret)); - assert(ret >= 0); - return -ret; + rte_errno = EAGAIN; + return -rte_errno; } /** @@ -764,7 +796,7 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * Flow control output buffer. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) @@ -776,11 +808,11 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret; ifr.ifr_data = (void *)ðpause; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { - ret = errno; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed: %s", - strerror(ret)); - goto out; + strerror(rte_errno)); + return ret; } fc_conf->autoneg = ethpause.autoneg; if (ethpause.rx_pause && ethpause.tx_pause) @@ -791,10 +823,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) fc_conf->mode = RTE_FC_TX_PAUSE; else fc_conf->mode = RTE_FC_NONE; - ret = 0; -out: - assert(ret >= 0); - return -ret; + return 0; } /** @@ -806,7 +835,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * Flow control parameters. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) @@ -830,17 +859,14 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) { - ret = errno; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" " failed: %s", - strerror(ret)); - goto out; + strerror(rte_errno)); + return ret; } - ret = 0; -out: - assert(ret >= 0); - return -ret; + return 0; } /** @@ -852,7 +878,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * PCI bus address output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, @@ -863,8 +889,10 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, MKSTR(path, "%s/device/uevent", device->ibdev_path); file = fopen(path, "rb"); - if (file == NULL) - return -1; + if (file == NULL) { + rte_errno = errno; + return -rte_errno; + } while (fgets(line, sizeof(line), file) == line) { size_t len = strlen(line); int ret; @@ -900,15 +928,19 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, * Pointer to Ethernet device. * * @return - * Zero if the callback process can be called immediately. + * Zero if the callback process can be called immediately, negative errno + * value otherwise and rte_errno is set. */ static int mlx5_link_status_update(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct rte_eth_link *link = &dev->data->dev_link; + int ret; - mlx5_link_update(dev, 0); + ret = mlx5_link_update(dev, 0); + if (ret) + return ret; if (((link->link_speed == 0) && link->link_status) || ((link->link_speed != 0) && !link->link_status)) { /* @@ -1062,12 +1094,13 @@ void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - int rc, flags; + int ret; + int flags; assert(priv->ctx->async_fd > 0); flags = fcntl(priv->ctx->async_fd, F_GETFL); - rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); - if (rc < 0) { + ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); + if (ret) { INFO("failed to change file descriptor async event queue"); dev->data->dev_conf.intr_conf.lsc = 0; dev->data->dev_conf.intr_conf.rmv = 0; @@ -1079,8 +1112,10 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) rte_intr_callback_register(&priv->intr_handle, mlx5_dev_interrupt_handler, dev); } - rc = mlx5_socket_init(dev); - if (!rc && priv->primary_socket) { + ret = mlx5_socket_init(dev); + if (ret) + ERROR("cannot initialise socket: %s", strerror(rte_errno)); + else if (priv->primary_socket) { priv->intr_handle_socket.fd = priv->primary_socket; priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT; rte_intr_callback_register(&priv->intr_handle_socket, @@ -1095,7 +1130,7 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_set_link_down(struct rte_eth_dev *dev) @@ -1110,7 +1145,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_set_link_up(struct rte_eth_dev *dev) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 6b53b3ea5..1435516dc 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -247,7 +247,8 @@ struct mlx5_flow_items { * Internal structure to store the conversion. * * @return - * 0 on success, negative value otherwise. + * 0 on success, a negative errno value otherwise and rte_errno is + * set. */ int (*convert)(const struct rte_flow_item *item, const void *default_mask, @@ -460,45 +461,52 @@ struct ibv_spec_header { * Bit-Mask size in bytes. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_item_validate(const struct rte_flow_item *item, const uint8_t *mask, unsigned int size) { - int ret = 0; - - if (!item->spec && (item->mask || item->last)) - return -1; + if (!item->spec && (item->mask || item->last)) { + rte_errno = EINVAL; + return -rte_errno; + } if (item->spec && !item->mask) { unsigned int i; const uint8_t *spec = item->spec; for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; + if ((spec[i] | mask[i]) != mask[i]) { + rte_errno = EINVAL; + return -rte_errno; + } } if (item->last && !item->mask) { unsigned int i; const uint8_t *spec = item->last; for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; + if ((spec[i] | mask[i]) != mask[i]) { + rte_errno = EINVAL; + return -rte_errno; + } } if (item->mask) { unsigned int i; const uint8_t *spec = item->spec; for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; + if ((spec[i] | mask[i]) != mask[i]) { + rte_errno = EINVAL; + return -rte_errno; + } } if (item->spec && item->last) { uint8_t spec[size]; uint8_t last[size]; const uint8_t *apply = mask; unsigned int i; + int ret; if (item->mask) apply = item->mask; @@ -507,8 +515,12 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, last[i] = ((const uint8_t *)item->last)[i] & apply[i]; } ret = memcmp(spec, last, size); + if (ret != 0) { + rte_errno = EINVAL; + return -rte_errno; + } } - return ret; + return 0; } /** @@ -521,7 +533,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * User RSS configuration to save. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser, @@ -533,10 +545,14 @@ mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser, * device default RSS configuration. */ if (rss_conf) { - if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) - return EINVAL; - if (rss_conf->rss_key_len != 40) - return EINVAL; + if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) { + rte_errno = EINVAL; + return -rte_errno; + } + if (rss_conf->rss_key_len != 40) { + rte_errno = EINVAL; + return -rte_errno; + } if (rss_conf->rss_key_len && rss_conf->rss_key) { parser->rss_conf.rss_key_len = rss_conf->rss_key_len; memcpy(parser->rss_key, rss_conf->rss_key, @@ -616,14 +632,17 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser) { struct priv *priv = dev->data->dev_private; + int ret; /* * Add default RSS configuration necessary for Verbs to create QP even * if no RSS is necessary. */ - mlx5_flow_convert_rss_conf(parser, - (const struct rte_eth_rss_conf *) - &priv->rss_conf); + ret = mlx5_flow_convert_rss_conf(parser, + (const struct rte_eth_rss_conf *) + &priv->rss_conf); + if (ret) + return ret; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { continue; @@ -772,6 +791,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], { const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; + int ret = 0; /* Initialise the offsets to start after verbs attribute. */ for (i = 0; i != hash_rxq_init_n; ++i) @@ -779,7 +799,6 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { const struct mlx5_flow_items *token = NULL; unsigned int n; - int err; if (items->type == RTE_FLOW_ITEM_TYPE_VOID) continue; @@ -795,10 +814,10 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], if (!token) goto exit_item_not_supported; cur_item = token; - err = mlx5_flow_item_validate(items, + ret = mlx5_flow_item_validate(items, (const uint8_t *)cur_item->mask, cur_item->mask_sz); - if (err) + if (ret) goto exit_item_not_supported; if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) { if (parser->inner) { @@ -835,9 +854,8 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], } return 0; exit_item_not_supported: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, - items, "item not supported"); - return -rte_errno; + return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, + items, "item not supported"); } /** @@ -851,7 +869,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], * Perform verbose error reporting if not NULL. * * @return - * A verbs flow attribute on success, NULL otherwise. + * A verbs flow attribute on success, NULL otherwise and rte_errno is set. */ static struct ibv_flow_attr * mlx5_flow_convert_allocate(unsigned int priority, @@ -1054,7 +1072,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev, parser->queue[HASH_RXQ_ETH].ibv_attr = mlx5_flow_convert_allocate(priority, offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) - return ENOMEM; + goto exit_enomem; parser->queue[HASH_RXQ_ETH].offset = sizeof(struct ibv_flow_attr); } else { @@ -1089,7 +1107,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev, cur_item->mask), parser); if (ret) { - rte_flow_error_set(error, ret, + rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ITEM, items, "item not supported"); goto exit_free; @@ -1131,13 +1149,13 @@ mlx5_flow_convert(struct rte_eth_dev *dev, parser->queue[i].ibv_attr = NULL; } } - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes."); - return ret; + return -rte_errno; exit_count_error: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create counter."); - return rte_errno; + return -rte_errno; } /** @@ -1183,6 +1201,9 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_eth(const struct rte_flow_item *item, @@ -1232,6 +1253,9 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_vlan(const struct rte_flow_item *item, @@ -1272,6 +1296,9 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_ipv4(const struct rte_flow_item *item, @@ -1324,6 +1351,9 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_ipv6(const struct rte_flow_item *item, @@ -1396,6 +1426,9 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_udp(const struct rte_flow_item *item, @@ -1442,6 +1475,9 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_tcp(const struct rte_flow_item *item, @@ -1488,6 +1524,9 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_vxlan(const struct rte_flow_item *item, @@ -1527,8 +1566,10 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ - if (!vxlan.val.tunnel_id) - return EINVAL; + if (!vxlan.val.tunnel_id) { + rte_errno = EINVAL; + return -rte_errno; + } mlx5_flow_create_copy(parser, &vxlan, size); return 0; } @@ -1540,6 +1581,9 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, * Internal parser structure. * @param mark_id * Mark identifier. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) @@ -1565,7 +1609,7 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) * Pointer to MLX5 flow parser structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused, @@ -1583,8 +1627,10 @@ mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused, init_attr.counter_set_id = 0; parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr); - if (!parser->cs) - return EINVAL; + if (!parser->cs) { + rte_errno = EINVAL; + return -rte_errno; + } counter.counter_set_handle = parser->cs->handle; mlx5_flow_create_copy(parser, &counter, size); #endif @@ -1604,7 +1650,7 @@ mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, @@ -1615,7 +1661,6 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; struct ibv_flow_spec_action_drop *drop; unsigned int size = sizeof(struct ibv_flow_spec_action_drop); - int err = 0; assert(priv->pd); assert(priv->ctx); @@ -1641,7 +1686,6 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); - err = ENOMEM; goto error; } return 0; @@ -1661,7 +1705,7 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, flow->cs = NULL; parser->cs = NULL; } - return err; + return -rte_errno; } /** @@ -1677,7 +1721,7 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, @@ -1715,10 +1759,10 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, parser->queues, parser->queues_n); if (!flow->frxq[i].hrxq) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot create hash rxq"); - return ENOMEM; + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "cannot create hash rxq"); } } return 0; @@ -1737,7 +1781,7 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue(struct rte_eth_dev *dev, @@ -1746,14 +1790,14 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; - int err = 0; + int ret; unsigned int i; assert(priv->pd); assert(priv->ctx); assert(!parser->drop); - err = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); - if (err) + ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); + if (ret) goto error; if (parser->count) flow->cs = parser->cs; @@ -1769,7 +1813,6 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); - err = ENOMEM; goto error; } DEBUG("%p type %d QP %p ibv_flow %p", @@ -1785,6 +1828,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, } return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ assert(flow); for (i = 0; i != hash_rxq_init_n; ++i) { if (flow->frxq[i].ibv_flow) { @@ -1802,7 +1846,8 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, flow->cs = NULL; parser->cs = NULL; } - return err; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -1822,7 +1867,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. * * @return - * A flow on success, NULL otherwise. + * A flow on success, NULL otherwise and rte_errno is set. */ static struct rte_flow * mlx5_flow_list_create(struct rte_eth_dev *dev, @@ -1835,10 +1880,10 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, struct mlx5_flow_parse parser = { .create = 1, }; struct rte_flow *flow = NULL; unsigned int i; - int err; + int ret; - err = mlx5_flow_convert(dev, attr, items, actions, error, &parser); - if (err) + ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser); + if (ret) goto exit; flow = rte_calloc(__func__, 1, sizeof(*flow) + parser.queues_n * sizeof(uint16_t), @@ -1861,11 +1906,11 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len); /* finalise the flow. */ if (parser.drop) - err = mlx5_flow_create_action_queue_drop(dev, &parser, flow, + ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow, error); else - err = mlx5_flow_create_action_queue(dev, &parser, flow, error); - if (err) + ret = mlx5_flow_create_action_queue(dev, &parser, flow, error); + if (ret) goto exit; TAILQ_INSERT_TAIL(list, flow, next); DEBUG("Flow created %p", (void *)flow); @@ -1893,11 +1938,9 @@ mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - int ret; struct mlx5_flow_parse parser = { .create = 0, }; - ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser); - return ret; + return mlx5_flow_convert(dev, attr, items, actions, error, &parser); } /** @@ -2021,7 +2064,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) * Pointer to Ethernet device. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) @@ -2034,11 +2077,13 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0); if (!fdq) { WARN("cannot allocate memory for drop queue"); - goto error; + rte_errno = ENOMEM; + return -rte_errno; } fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0); if (!fdq->cq) { WARN("cannot allocate CQ for drop queue"); + rte_errno = errno; goto error; } fdq->wq = mlx5_glue->create_wq @@ -2052,6 +2097,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) }); if (!fdq->wq) { WARN("cannot allocate WQ for drop queue"); + rte_errno = errno; goto error; } fdq->ind_table = mlx5_glue->create_rwq_ind_table @@ -2063,6 +2109,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) }); if (!fdq->ind_table) { WARN("cannot allocate indirection table for drop queue"); + rte_errno = errno; goto error; } fdq->qp = mlx5_glue->create_qp_ex @@ -2085,6 +2132,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) }); if (!fdq->qp) { WARN("cannot allocate QP for drop queue"); + rte_errno = errno; goto error; } priv->flow_drop_queue = fdq; @@ -2101,7 +2149,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) if (fdq) rte_free(fdq); priv->flow_drop_queue = NULL; - return -1; + return -rte_errno; } /** @@ -2200,7 +2248,7 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) * Pointer to a TAILQ flow list. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) @@ -2220,7 +2268,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } DEBUG("Flow %p applied", (void *)flow); /* Next flow. */ @@ -2247,7 +2295,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } flow_create: flow->frxq[i].ibv_flow = @@ -2257,7 +2305,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } DEBUG("Flow %p applied", (void *)flow); } @@ -2307,7 +2355,7 @@ mlx5_flow_verify(struct rte_eth_dev *dev) * A VLAN flow mask to apply. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, @@ -2359,8 +2407,10 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, } local; } action_rss; - if (!priv->reta_idx_n) - return EINVAL; + if (!priv->reta_idx_n) { + rte_errno = EINVAL; + return -rte_errno; + } for (i = 0; i != priv->reta_idx_n; ++i) action_rss.local.queue[i] = (*priv->reta_idx)[i]; action_rss.local.rss_conf = &priv->rss_conf; @@ -2369,7 +2419,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions, &error); if (!flow) - return rte_errno; + return -rte_errno; return 0; } @@ -2384,7 +2434,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, * An Ethernet flow mask to apply. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ctrl_flow(struct rte_eth_dev *dev, @@ -2437,7 +2487,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, * returned data from the counter. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_query_count(struct ibv_counter_set *cs, @@ -2454,15 +2504,13 @@ mlx5_flow_query_count(struct ibv_counter_set *cs, .out = counters, .outlen = 2 * sizeof(uint64_t), }; - int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out); + int err = mlx5_glue->query_counter_set(&query_cs_attr, &query_out); - if (res) { - rte_flow_error_set(error, -res, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot read counter"); - return -res; - } + if (err) + return rte_flow_error_set(error, err, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot read counter"); query_count->hits_set = 1; query_count->bytes_set = 1; query_count->hits = counters[0] - counter_stats->hits; @@ -2487,20 +2535,22 @@ mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, void *data, struct rte_flow_error *error) { - int res = EINVAL; - if (flow->cs) { - res = mlx5_flow_query_count(flow->cs, - &flow->counter_stats, - (struct rte_flow_query_count *)data, - error); + int ret; + + ret = mlx5_flow_query_count(flow->cs, + &flow->counter_stats, + (struct rte_flow_query_count *)data, + error); + if (ret) + return ret; } else { - rte_flow_error_set(error, res, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "no counter found for flow"); + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "no counter found for flow"); } - return -res; + return 0; } #endif @@ -2543,7 +2593,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, * Generic flow parameters structure. * * @return - * 0 on success, errno value on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_fdir_filter_convert(struct rte_eth_dev *dev, @@ -2556,7 +2606,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, /* Validate queue number. */ if (fdir_filter->action.rx_queue >= priv->rxqs_n) { ERROR("invalid queue number %d", fdir_filter->action.rx_queue); - return EINVAL; + rte_errno = EINVAL; + return -rte_errno; } attributes->attr.ingress = 1; attributes->items[0] = (struct rte_flow_item) { @@ -2578,7 +2629,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, break; default: ERROR("invalid behavior %d", fdir_filter->action.behavior); - return ENOTSUP; + rte_errno = ENOTSUP; + return -rte_errno; } attributes->queue.index = fdir_filter->action.rx_queue; switch (fdir_filter->input.flow_type) { @@ -2712,9 +2764,9 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; default: - ERROR("invalid flow type%d", - fdir_filter->input.flow_type); - return ENOTSUP; + ERROR("invalid flow type%d", fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; } return 0; } @@ -2728,7 +2780,7 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, * Flow director filter to add. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_fdir_filter_add(struct rte_eth_dev *dev, @@ -2752,11 +2804,11 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) - return -ret; + return ret; ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) - return -ret; + return ret; flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, attributes.items, attributes.actions, &error); @@ -2764,7 +2816,7 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, DEBUG("FDIR created %p", (void *)flow); return 0; } - return ENOTSUP; + return -rte_errno; } /** @@ -2776,7 +2828,7 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, * Filter to be deleted. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_fdir_filter_delete(struct rte_eth_dev *dev, @@ -2797,7 +2849,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev, ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) - return -ret; + return ret; ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) @@ -2855,6 +2907,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev, /* The flow does not match. */ continue; } + ret = rte_errno; /* Save rte_errno before cleanup. */ if (flow) mlx5_flow_list_destroy(dev, &priv->flows, flow); exit: @@ -2862,7 +2915,8 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev, if (parser.queue[i].ibv_attr) rte_free(parser.queue[i].ibv_attr); } - return -ret; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -2874,7 +2928,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev, * Filter to be updated. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_fdir_filter_update(struct rte_eth_dev *dev, @@ -2885,8 +2939,7 @@ mlx5_fdir_filter_update(struct rte_eth_dev *dev, ret = mlx5_fdir_filter_delete(dev, fdir_filter); if (ret) return ret; - ret = mlx5_fdir_filter_add(dev, fdir_filter); - return ret; + return mlx5_fdir_filter_add(dev, fdir_filter); } /** @@ -2940,7 +2993,7 @@ mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) * Pointer to operation-specific structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, @@ -2949,7 +3002,6 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, struct priv *priv = dev->data->dev_private; enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode; - int ret = 0; if (filter_op == RTE_ETH_FILTER_NOP) return 0; @@ -2957,18 +3009,16 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { ERROR("%p: flow director mode %d not supported", (void *)dev, fdir_mode); - return EINVAL; + rte_errno = EINVAL; + return -rte_errno; } switch (filter_op) { case RTE_ETH_FILTER_ADD: - ret = mlx5_fdir_filter_add(dev, arg); - break; + return mlx5_fdir_filter_add(dev, arg); case RTE_ETH_FILTER_UPDATE: - ret = mlx5_fdir_filter_update(dev, arg); - break; + return mlx5_fdir_filter_update(dev, arg); case RTE_ETH_FILTER_DELETE: - ret = mlx5_fdir_filter_delete(dev, arg); - break; + return mlx5_fdir_filter_delete(dev, arg); case RTE_ETH_FILTER_FLUSH: mlx5_fdir_filter_flush(dev); break; @@ -2976,12 +3026,11 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, mlx5_fdir_info_get(dev, arg); break; default: - DEBUG("%p: unknown operation %u", (void *)dev, - filter_op); - ret = EINVAL; - break; + DEBUG("%p: unknown operation %u", (void *)dev, filter_op); + rte_errno = EINVAL; + return -rte_errno; } - return ret; + return 0; } /** @@ -2997,7 +3046,7 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, * Pointer to operation-specific structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, @@ -3005,21 +3054,21 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - int ret = EINVAL; - switch (filter_type) { case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; + if (filter_op != RTE_ETH_FILTER_GET) { + rte_errno = EINVAL; + return -rte_errno; + } *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - ret = mlx5_fdir_ctrl_func(dev, filter_op, arg); - break; + return mlx5_fdir_ctrl_func(dev, filter_op, arg); default: ERROR("%p: filter type (%d) not supported", (void *)dev, filter_type); - break; + rte_errno = ENOTSUP; + return -rte_errno; } - return -ret; + return 0; } diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index 91c977bc5..ba54c055e 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -41,15 +41,17 @@ * MAC address output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]) { struct ifreq request; + int ret; - if (mlx5_ifreq(dev, SIOCGIFHWADDR, &request)) - return -1; + ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request); + if (ret) + return ret; memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); return 0; } @@ -67,8 +69,13 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) { assert(index < MLX5_MAX_MAC_ADDRESSES); memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr)); - if (!dev->data->promiscuous) - mlx5_traffic_restart(dev); + if (!dev->data->promiscuous) { + int ret = mlx5_traffic_restart(dev); + + if (ret) + ERROR("%p cannot remove mac address: %s", (void *)dev, + strerror(rte_errno)); + } } /** @@ -84,14 +91,13 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) * VMDq pool index to associate address with (ignored). * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, uint32_t index, uint32_t vmdq __rte_unused) { unsigned int i; - int ret = 0; assert(index < MLX5_MAX_MAC_ADDRESSES); /* First, make sure this address isn't already configured. */ @@ -102,12 +108,13 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac))) continue; /* Address already configured elsewhere, return with error. */ - return EADDRINUSE; + rte_errno = EADDRINUSE; + return -rte_errno; } dev->data->mac_addrs[index] = *mac; if (!dev->data->promiscuous) - mlx5_traffic_restart(dev); - return ret; + return mlx5_traffic_restart(dev); + return 0; } /** @@ -121,6 +128,10 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, void mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { + int ret; + DEBUG("%p: setting primary MAC address", (void *)dev); - mlx5_mac_addr_add(dev, mac_addr, 0, 0); + ret = mlx5_mac_addr_add(dev, mac_addr, 0, 0); + if (ret) + ERROR("cannot set mac address: %s", strerror(rte_errno)); } diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 5c4e68736..884ac33eb 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -91,7 +91,7 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, * Index of the next available entry. * * @return - * mr on success, NULL on failure. + * mr on success, NULL on failure and rte_errno is set. */ struct mlx5_mr * mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, @@ -115,6 +115,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, " rte_eth_dev_start()", (void *)mp, mp->name); rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); + rte_errno = ENOTSUP; return NULL; } mr = mlx5_mr_new(dev, mp); @@ -203,7 +204,9 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) mlx5_mr_release(mr); return; } - mlx5_mr_new(priv->dev, mp); + mr = mlx5_mr_new(priv->dev, mp); + if (!mr) + ERROR("cannot create memory region: %s", strerror(rte_errno)); } /** @@ -216,7 +219,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) * Pointer to the memory pool to register. * * @return - * The memory region on success. + * The memory region on success, NULL on failure and rte_errno is set. */ struct mlx5_mr * mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp) @@ -231,11 +234,13 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp) mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id); if (!mr) { DEBUG("unable to configure MR, ibv_reg_mr() failed."); + rte_errno = ENOMEM; return NULL; } if (mlx5_check_mempool(mp, &start, &end) != 0) { ERROR("mempool %p: not virtually contiguous", (void *)mp); + rte_errno = ENOMEM; return NULL; } DEBUG("mempool %p area start=%p end=%p size=%zu", @@ -260,6 +265,10 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp) (size_t)(end - start)); mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start, IBV_ACCESS_LOCAL_WRITE); + if (!mr->mr) { + rte_errno = ENOMEM; + return NULL; + } mr->mp = mp; mr->lkey = rte_cpu_to_be_32(mr->mr->lkey); rte_atomic32_inc(&mr->refcnt); diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index a654a5a7d..5ac650163 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -35,33 +35,31 @@ * RSS configuration data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct priv *priv = dev->data->dev_private; - int ret = 0; if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) { - ret = -EINVAL; - goto out; + rte_errno = EINVAL; + return -rte_errno; } if (rss_conf->rss_key && rss_conf->rss_key_len) { priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, rss_conf->rss_key_len, 0); if (!priv->rss_conf.rss_key) { - ret = -ENOMEM; - goto out; + rte_errno = ENOMEM; + return -rte_errno; } memcpy(priv->rss_conf.rss_key, rss_conf->rss_key, rss_conf->rss_key_len); priv->rss_conf.rss_key_len = rss_conf->rss_key_len; } priv->rss_conf.rss_hf = rss_conf->rss_hf; -out: - return ret; + return 0; } /** @@ -73,7 +71,7 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev, * RSS configuration data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, @@ -81,8 +79,10 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - if (!rss_conf) - return -EINVAL; + if (!rss_conf) { + rte_errno = EINVAL; + return -rte_errno; + } if (rss_conf->rss_key && (rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) { memcpy(rss_conf->rss_key, priv->rss_conf.rss_key, @@ -102,7 +102,7 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, * The size of the array to allocate. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) @@ -116,8 +116,10 @@ mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) mem = rte_realloc(priv->reta_idx, reta_size * sizeof((*priv->reta_idx)[0]), 0); - if (!mem) - return ENOMEM; + if (!mem) { + rte_errno = ENOMEM; + return -rte_errno; + } priv->reta_idx = mem; priv->reta_idx_n = reta_size; if (old_size < reta_size) @@ -138,7 +140,7 @@ mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) * Size of the RETA table. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, @@ -149,8 +151,10 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, unsigned int idx; unsigned int i; - if (!reta_size || reta_size > priv->reta_idx_n) - return -EINVAL; + if (!reta_size || reta_size > priv->reta_idx_n) { + rte_errno = EINVAL; + return -rte_errno; + } /* Fill each entry of the table even if its bit is not set. */ for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; @@ -171,7 +175,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, * Size of the RETA table. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, @@ -184,8 +188,10 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, unsigned int i; unsigned int pos; - if (!reta_size) - return -EINVAL; + if (!reta_size) { + rte_errno = EINVAL; + return -rte_errno; + } ret = mlx5_rss_reta_index_resize(dev, reta_size); if (ret) return ret; @@ -199,7 +205,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, } if (dev->data->dev_started) { mlx5_dev_stop(dev); - mlx5_dev_start(dev); + return mlx5_dev_start(dev); } - return -ret; + return 0; } diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 4ffc869ad..0c1e9eb2a 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -32,8 +32,13 @@ void mlx5_promiscuous_enable(struct rte_eth_dev *dev) { + int ret; + dev->data->promiscuous = 1; - mlx5_traffic_restart(dev); + ret = mlx5_traffic_restart(dev); + if (ret) + ERROR("%p cannot enable promiscuous mode: %s", (void *)dev, + strerror(rte_errno)); } /** @@ -45,8 +50,13 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev) void mlx5_promiscuous_disable(struct rte_eth_dev *dev) { + int ret; + dev->data->promiscuous = 0; - mlx5_traffic_restart(dev); + ret = mlx5_traffic_restart(dev); + if (ret) + ERROR("%p cannot disable promiscuous mode: %s", (void *)dev, + strerror(rte_errno)); } /** @@ -58,8 +68,13 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev) void mlx5_allmulticast_enable(struct rte_eth_dev *dev) { + int ret; + dev->data->all_multicast = 1; - mlx5_traffic_restart(dev); + ret = mlx5_traffic_restart(dev); + if (ret) + ERROR("%p cannot enable allmulicast mode: %s", (void *)dev, + strerror(rte_errno)); } /** @@ -71,6 +86,11 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev) void mlx5_allmulticast_disable(struct rte_eth_dev *dev) { + int ret; + dev->data->all_multicast = 0; - mlx5_traffic_restart(dev); + ret = mlx5_traffic_restart(dev); + if (ret) + ERROR("%p cannot disable allmulicast mode: %s", (void *)dev, + strerror(rte_errno)); } diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 8e7693df2..477aa2631 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -61,7 +61,7 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); * Pointer to RX queue structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) @@ -69,7 +69,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; unsigned int i; - int ret = 0; + int err; /* Iterate on segments. */ for (i = 0; (i != elts_n); ++i) { @@ -78,7 +78,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); if (buf == NULL) { ERROR("%p: empty mbuf pool", (void *)rxq_ctrl); - ret = ENOMEM; + rte_errno = ENOMEM; goto error; } /* Headroom is reserved by rte_pktmbuf_alloc(). */ @@ -120,9 +120,9 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) } DEBUG("%p: allocated and configured %u segments (max %u packets)", (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); - assert(ret == 0); return 0; error: + err = rte_errno; /* Save rte_errno before cleanup. */ elts_n = i; for (i = 0; (i != elts_n); ++i) { if ((*rxq_ctrl->rxq.elts)[i] != NULL) @@ -130,8 +130,8 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq_ctrl->rxq.elts)[i] = NULL; } DEBUG("%p: failed, freed everything", (void *)rxq_ctrl); - assert(ret > 0); - return ret; + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -271,7 +271,7 @@ mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) * Memory pool for buffer allocations. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -282,7 +282,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); - int ret = 0; if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); @@ -295,37 +294,37 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (idx >= priv->rxqs_n) { ERROR("%p: queue index out of range (%u >= %u)", (void *)dev, idx, priv->rxqs_n); - return -EOVERFLOW; + rte_errno = EOVERFLOW; + return -rte_errno; } if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) { - ret = ENOTSUP; ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port " "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, (void *)dev, conf->offloads, dev->data->dev_conf.rxmode.offloads, (mlx5_get_rx_port_offloads() | mlx5_get_rx_queue_offloads(dev))); - goto out; + rte_errno = ENOTSUP; + return -rte_errno; } if (!mlx5_rxq_releasable(dev, idx)) { - ret = EBUSY; ERROR("%p: unable to release queue index %u", (void *)dev, idx); - goto out; + rte_errno = EBUSY; + return -rte_errno; } mlx5_rxq_release(dev, idx); rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); if (!rxq_ctrl) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); - ret = ENOMEM; - goto out; + rte_errno = ENOMEM; + return -rte_errno; } DEBUG("%p: adding RX queue %p to list", (void *)dev, (void *)rxq_ctrl); (*priv->rxqs)[idx] = &rxq_ctrl->rxq; -out: - return -ret; + return 0; } /** @@ -358,7 +357,7 @@ mlx5_rx_queue_release(void *dpdk_rxq) * Pointer to Ethernet device. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) @@ -377,7 +376,8 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) if (intr_handle->intr_vec == NULL) { ERROR("failed to allocate memory for interrupt vector," " Rx interrupts will not be supported"); - return -ENOMEM; + rte_errno = ENOMEM; + return -rte_errno; } intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { @@ -400,16 +400,18 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) " (%d), Rx interrupts cannot be enabled", RTE_MAX_RXTX_INTR_VEC_ID); mlx5_rx_intr_vec_disable(dev); - return -1; + rte_errno = ENOMEM; + return -rte_errno; } fd = rxq_ibv->channel->fd; flags = fcntl(fd, F_GETFL); rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); if (rc < 0) { + rte_errno = errno; ERROR("failed to make Rx interrupt file descriptor" " %d non-blocking for queue index %d", fd, i); mlx5_rx_intr_vec_disable(dev); - return -1; + return -rte_errno; } intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; intr_handle->efds[count] = fd; @@ -497,7 +499,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) * Rx queue number. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) @@ -505,12 +507,11 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; - int ret = 0; rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->irq) { @@ -518,16 +519,13 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); mlx5_rxq_ibv_release(rxq_ibv); } -exit: - if (ret) - WARN("unable to arm interrupt on rx queue %d", rx_queue_id); - return -ret; + return 0; } /** @@ -539,7 +537,7 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) * Rx queue number. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) @@ -550,35 +548,36 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct mlx5_rxq_ibv *rxq_ibv = NULL; struct ibv_cq *ev_cq; void *ev_ctx; - int ret = 0; + int ret; rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (!rxq_ctrl->irq) - goto exit; + return 0; rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx); if (ret || ev_cq != rxq_ibv->cq) { - ret = EINVAL; + rte_errno = EINVAL; goto exit; } rxq_data->cq_arm_sn++; mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); + return 0; exit: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (rxq_ibv) mlx5_rxq_ibv_release(rxq_ibv); - if (ret) - WARN("unable to disable interrupt on rx queue %d", - rx_queue_id); - return -ret; + WARN("unable to disable interrupt on rx queue %d", rx_queue_id); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -590,7 +589,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) * Queue index in DPDK Rx queue array * * @return - * The Verbs object initialised if it can be created. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ struct mlx5_rxq_ibv * mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) @@ -626,6 +625,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (!tmpl) { ERROR("%p: cannot allocate verbs resources", (void *)rxq_ctrl); + rte_errno = ENOMEM; goto error; } tmpl->rxq_ctrl = rxq_ctrl; @@ -643,6 +643,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (!tmpl->channel) { ERROR("%p: Comp Channel creation failure", (void *)rxq_ctrl); + rte_errno = ENOMEM; goto error; } } @@ -672,6 +673,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) &attr.cq.mlx5)); if (tmpl->cq == NULL) { ERROR("%p: CQ creation failure", (void *)rxq_ctrl); + rte_errno = ENOMEM; goto error; } DEBUG("priv->device_attr.max_qp_wr is %d", @@ -708,6 +710,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq); if (tmpl->wq == NULL) { ERROR("%p: WQ creation failure", (void *)rxq_ctrl); + rte_errno = ENOMEM; goto error; } /* @@ -722,6 +725,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) ((1 << rxq_data->elts_n) >> rxq_data->sges_n), (1 << rxq_data->sges_n), attr.wq.max_wr, attr.wq.max_sge); + rte_errno = EINVAL; goto error; } /* Change queue state to ready. */ @@ -733,6 +737,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (ret) { ERROR("%p: WQ state to IBV_WQS_RDY failed", (void *)rxq_ctrl); + rte_errno = ret; goto error; } obj.cq.in = tmpl->cq; @@ -740,11 +745,14 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) obj.rwq.in = tmpl->wq; obj.rwq.out = &rwq; ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ); - if (ret != 0) + if (ret) { + rte_errno = ret; goto error; + } if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { ERROR("Wrong MLX5_CQE_SIZE environment variable value: " "it should be set to %u", RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; goto error; } /* Fill the rings. */ @@ -788,6 +796,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return tmpl; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (tmpl->wq) claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); if (tmpl->cq) @@ -797,6 +806,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (tmpl->mr) mlx5_mr_release(tmpl->mr); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ return NULL; } @@ -920,7 +930,7 @@ mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv) * NUMA socket on which memory must be allocated. * * @return - * A DPDK queue object on success. + * A DPDK queue object on success, NULL otherwise and rte_errno is set. */ struct mlx5_rxq_ctrl * mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -942,8 +952,10 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *), 0, socket); - if (!tmpl) + if (!tmpl) { + rte_errno = ENOMEM; return NULL; + } tmpl->socket = socket; if (priv->dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; @@ -973,6 +985,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, 1 << sges_n, dev->data->dev_conf.rxmode.max_rx_pkt_len); + rte_errno = EOVERFLOW; goto error; } } else { @@ -991,6 +1004,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, desc, 1 << tmpl->rxq.sges_n); + rte_errno = EINVAL; goto error; } /* Toggle RX checksum offload if hardware supports it. */ @@ -1045,7 +1059,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, * TX queue index. * * @return - * A pointer to the queue if it exists. + * A pointer to the queue if it exists, NULL otherwise. */ struct mlx5_rxq_ctrl * mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) @@ -1108,7 +1122,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) * TX queue index. * * @return - * 1 if the queue can be released. + * 1 if the queue can be released, negative errno otherwise and rte_errno is + * set. */ int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) @@ -1116,8 +1131,10 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; - if (!(*priv->rxqs)[idx]) - return -1; + if (!(*priv->rxqs)[idx]) { + rte_errno = EINVAL; + return -rte_errno; + } rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1); } @@ -1157,7 +1174,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev) * Number of queues in the array. * * @return - * A new indirection table. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ struct mlx5_ind_table_ibv * mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[], @@ -1174,8 +1191,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[], ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) + queues_n * sizeof(uint16_t), 0); - if (!ind_tbl) + if (!ind_tbl) { + rte_errno = ENOMEM; return NULL; + } for (i = 0; i != queues_n; ++i) { struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); @@ -1195,8 +1214,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[], .ind_tbl = wq, .comp_mask = 0, }); - if (!ind_tbl->ind_table) + if (!ind_tbl->ind_table) { + rte_errno = errno; goto error; + } rte_atomic32_inc(&ind_tbl->refcnt); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, @@ -1321,7 +1342,7 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) * Number of queues. * * @return - * An hash Rx queue on success. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ struct mlx5_hrxq * mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, @@ -1331,13 +1352,16 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, struct mlx5_hrxq *hrxq; struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; + int err; queues_n = hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n); - if (!ind_tbl) + if (!ind_tbl) { + rte_errno = ENOMEM; return NULL; + } qp = mlx5_glue->create_qp_ex (priv->ctx, &(struct ibv_qp_init_attr_ex){ @@ -1355,8 +1379,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, .rwq_ind_tbl = ind_tbl->ind_table, .pd = priv->pd, }); - if (!qp) + if (!qp) { + rte_errno = errno; goto error; + } hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0); if (!hrxq) goto error; @@ -1371,9 +1397,11 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); return hrxq; error: + err = rte_errno; /* Save rte_errno before cleanup. */ mlx5_ind_table_ibv_release(dev, ind_tbl); if (qp) claim_zero(mlx5_glue->destroy_qp(qp)); + rte_errno = err; /* Restore rte_errno. */ return NULL; } diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c index 8db25cff1..6e2d971c7 100644 --- a/drivers/net/mlx5/mlx5_socket.c +++ b/drivers/net/mlx5/mlx5_socket.c @@ -22,7 +22,7 @@ * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_socket_init(struct rte_eth_dev *dev) @@ -41,16 +41,21 @@ mlx5_socket_init(struct rte_eth_dev *dev) */ ret = socket(AF_UNIX, SOCK_STREAM, 0); if (ret < 0) { + rte_errno = errno; WARN("secondary process not supported: %s", strerror(errno)); - return ret; + goto error; } priv->primary_socket = ret; flags = fcntl(priv->primary_socket, F_GETFL, 0); - if (flags == -1) - goto out; + if (flags == -1) { + rte_errno = errno; + goto error; + } ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK); - if (ret < 0) - goto out; + if (ret < 0) { + rte_errno = errno; + goto error; + } snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); ret = stat(sun.sun_path, &file_stat); @@ -59,29 +64,30 @@ mlx5_socket_init(struct rte_eth_dev *dev) ret = bind(priv->primary_socket, (const struct sockaddr *)&sun, sizeof(sun)); if (ret < 0) { + rte_errno = errno; WARN("cannot bind socket, secondary process not supported: %s", strerror(errno)); goto close; } ret = listen(priv->primary_socket, 0); if (ret < 0) { + rte_errno = errno; WARN("Secondary process not supported: %s", strerror(errno)); goto close; } - return ret; + return 0; close: remove(sun.sun_path); -out: +error: claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; - return -(ret); + return -rte_errno; } /** * Un-Initialise the socket to communicate with the secondary process * * @param[in] dev - * Pointer to Ethernet device. */ void mlx5_socket_uninit(struct rte_eth_dev *dev) @@ -131,19 +137,21 @@ mlx5_socket_handle(struct rte_eth_dev *dev) ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1}, sizeof(int)); if (ret < 0) { - WARN("cannot change socket options"); - goto out; + ret = errno; + WARN("cannot change socket options: %s", strerror(rte_errno)); + goto error; } ret = recvmsg(conn_sock, &msg, MSG_WAITALL); if (ret < 0) { - WARN("received an empty message: %s", strerror(errno)); - goto out; + ret = errno; + WARN("received an empty message: %s", strerror(rte_errno)); + goto error; } /* Expect to receive credentials only. */ cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { WARN("no message"); - goto out; + goto error; } if ((cmsg->cmsg_type == SCM_CREDENTIALS) && (cmsg->cmsg_len >= sizeof(*cred))) { @@ -153,13 +161,13 @@ mlx5_socket_handle(struct rte_eth_dev *dev) cmsg = CMSG_NXTHDR(&msg, cmsg); if (cmsg != NULL) { WARN("Message wrongly formatted"); - goto out; + goto error; } /* Make sure all the ancillary data was received and valid. */ if ((cred == NULL) || (cred->uid != getuid()) || (cred->gid != getgid())) { WARN("wrong credentials"); - goto out; + goto error; } /* Set-up the ancillary data. */ cmsg = CMSG_FIRSTHDR(&msg); @@ -172,7 +180,7 @@ mlx5_socket_handle(struct rte_eth_dev *dev) ret = sendmsg(conn_sock, &msg, 0); if (ret < 0) WARN("cannot send response"); -out: +error: close(conn_sock); } @@ -183,7 +191,7 @@ mlx5_socket_handle(struct rte_eth_dev *dev) * Pointer to Ethernet structure. * * @return - * fd on success, negative errno value on failure. + * fd on success, negative errno value otherwise and rte_errno is set. */ int mlx5_socket_connect(struct rte_eth_dev *dev) @@ -192,7 +200,7 @@ mlx5_socket_connect(struct rte_eth_dev *dev) struct sockaddr_un sun = { .sun_family = AF_UNIX, }; - int socket_fd; + int socket_fd = -1; int *fd = NULL; int ret; struct ucred *cred; @@ -212,57 +220,67 @@ mlx5_socket_connect(struct rte_eth_dev *dev) ret = socket(AF_UNIX, SOCK_STREAM, 0); if (ret < 0) { + rte_errno = errno; WARN("cannot connect to primary"); - return ret; + goto error; } socket_fd = ret; snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun)); if (ret < 0) { + rte_errno = errno; WARN("cannot connect to primary"); - goto out; + goto error; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { + rte_errno = EINVAL; DEBUG("cannot get first message"); - goto out; + goto error; } cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_CREDENTIALS; cmsg->cmsg_len = CMSG_LEN(sizeof(*cred)); cred = (struct ucred *)CMSG_DATA(cmsg); if (cred == NULL) { + rte_errno = EINVAL; DEBUG("no credentials received"); - goto out; + goto error; } cred->pid = getpid(); cred->uid = getuid(); cred->gid = getgid(); ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT); if (ret < 0) { + rte_errno = errno; WARN("cannot send credentials to primary: %s", strerror(errno)); - goto out; + goto error; } ret = recvmsg(socket_fd, &msg, MSG_WAITALL); if (ret <= 0) { + rte_errno = errno; WARN("no message from primary: %s", strerror(errno)); - goto out; + goto error; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { + rte_errno = EINVAL; WARN("No file descriptor received"); - goto out; + goto error; } fd = (int *)CMSG_DATA(cmsg); - if (*fd <= 0) { + if (*fd < 0) { WARN("no file descriptor received: %s", strerror(errno)); - ret = *fd; - goto out; + rte_errno = *fd; + goto error; } ret = *fd; -out: close(socket_fd); - return ret; + return 0; +error: + if (socket_fd != -1) + close(socket_fd); + return -rte_errno; } diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 0febed878..06e9a1f19 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -128,7 +128,8 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); * Counters table output buffer. * * @return - * 0 on success and stats is filled, negative on error. + * 0 on success and stats is filled, negative errno value otherwise and + * rte_errno is set. */ static int mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) @@ -140,13 +141,15 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; + int ret; et_stats->cmd = ETHTOOL_GSTATS; et_stats->n_stats = xstats_ctrl->stats_n; ifr.ifr_data = (caddr_t)et_stats; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("unable to read statistic values from device"); - return -1; + return ret; } for (i = 0; i != xstats_n; ++i) { if (mlx5_counters_init[i].ib) { @@ -178,18 +181,21 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) * Pointer to Ethernet device. * * @return - * Number of statistics on success, -1 on error. + * Number of statistics on success, negative errno value otherwise and + * rte_errno is set. */ static int mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) { struct ethtool_drvinfo drvinfo; struct ifreq ifr; + int ret; drvinfo.cmd = ETHTOOL_GDRVINFO; ifr.ifr_data = (caddr_t)&drvinfo; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("unable to query number of statistics"); - return -1; + return ret; } return drvinfo.n_stats; } @@ -211,12 +217,14 @@ mlx5_xstats_init(struct rte_eth_dev *dev) struct ethtool_gstrings *strings = NULL; unsigned int dev_stats_n; unsigned int str_sz; + int ret; - dev_stats_n = mlx5_ethtool_get_stats_n(dev); - if (dev_stats_n < 1) { + ret = mlx5_ethtool_get_stats_n(dev); + if (ret < 0) { WARN("no extended statistics available"); return; } + dev_stats_n = ret; xstats_ctrl->stats_n = dev_stats_n; /* Allocate memory to grab stat names and values. */ str_sz = dev_stats_n * ETH_GSTRING_LEN; @@ -231,7 +239,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev) strings->string_set = ETH_SS_STATS; strings->len = dev_stats_n; ifr.ifr_data = (caddr_t)strings; - if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { WARN("unable to get statistic names"); goto free; } @@ -260,7 +269,9 @@ mlx5_xstats_init(struct rte_eth_dev *dev) } /* Copy to base at first time. */ assert(xstats_n <= MLX5_MAX_XSTATS); - mlx5_read_dev_counters(dev, xstats_ctrl->base); + ret = mlx5_read_dev_counters(dev, xstats_ctrl->base); + if (ret) + ERROR("cannot read device counters: %s", strerror(rte_errno)); free: rte_free(strings); } @@ -277,7 +288,7 @@ mlx5_xstats_init(struct rte_eth_dev *dev) * * @return * Number of extended stats on success and stats is filled, - * negative on error. + * negative on error and rte_errno is set. */ int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, @@ -286,15 +297,15 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, struct priv *priv = dev->data->dev_private; unsigned int i; uint64_t counters[n]; - int ret = 0; if (n >= xstats_n && stats) { struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; + int ret; stats_n = mlx5_ethtool_get_stats_n(dev); if (stats_n < 0) - return -1; + return stats_n; if (xstats_ctrl->stats_n != stats_n) mlx5_xstats_init(dev); ret = mlx5_read_dev_counters(dev, counters); @@ -315,6 +326,10 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, * Pointer to Ethernet device structure. * @param[out] stats * Stats structure output buffer. + * + * @return + * 0 on success and stats is filled, negative errno value otherwise and + * rte_errno is set. */ int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) @@ -419,14 +434,22 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) unsigned int i; unsigned int n = xstats_n; uint64_t counters[n]; + int ret; stats_n = mlx5_ethtool_get_stats_n(dev); - if (stats_n < 0) + if (stats_n < 0) { + ERROR("%p cannot get stats: %s", (void *)dev, + strerror(-stats_n)); return; + } if (xstats_ctrl->stats_n != stats_n) mlx5_xstats_init(dev); - if (mlx5_read_dev_counters(dev, counters) < 0) + ret = mlx5_read_dev_counters(dev, counters); + if (ret) { + ERROR("%p cannot read device counters: %s", (void *)dev, + strerror(rte_errno)); return; + } for (i = 0; i != n; ++i) xstats_ctrl->base[i] = counters[i]; } diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index a3ccebdd6..4e396b7f0 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -37,14 +37,14 @@ mlx5_txq_stop(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_txq_start(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; unsigned int i; - int ret = 0; + int ret; /* Add memory regions to Tx queues. */ for (i = 0; i != priv->txqs_n; ++i) { @@ -62,17 +62,19 @@ mlx5_txq_start(struct rte_eth_dev *dev) txq_alloc_elts(txq_ctrl); txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i); if (!txq_ctrl->ibv) { - ret = ENOMEM; + rte_errno = ENOMEM; goto error; } } ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd); if (ret) goto error; - return ret; + return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ mlx5_txq_stop(dev); - return ret; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -98,7 +100,7 @@ mlx5_rxq_stop(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_rxq_start(struct rte_eth_dev *dev) @@ -116,15 +118,15 @@ mlx5_rxq_start(struct rte_eth_dev *dev) if (ret) goto error; rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i); - if (!rxq_ctrl->ibv) { - ret = ENOMEM; + if (!rxq_ctrl->ibv) goto error; - } } - return -ret; + return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ mlx5_rxq_stop(dev); - return -ret; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -136,48 +138,48 @@ mlx5_rxq_start(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_start(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr = NULL; - int err; + int ret; dev->data->dev_started = 1; - err = mlx5_flow_create_drop_queue(dev); - if (err) { + ret = mlx5_flow_create_drop_queue(dev); + if (ret) { ERROR("%p: Drop queue allocation failed: %s", - (void *)dev, strerror(err)); + (void *)dev, strerror(rte_errno)); goto error; } DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); rte_mempool_walk(mlx5_mp2mr_iter, priv); - err = mlx5_txq_start(dev); - if (err) { - ERROR("%p: TXQ allocation failed: %s", - (void *)dev, strerror(err)); + ret = mlx5_txq_start(dev); + if (ret) { + ERROR("%p: Tx Queue allocation failed: %s", + (void *)dev, strerror(rte_errno)); goto error; } - err = mlx5_rxq_start(dev); - if (err) { - ERROR("%p: RXQ allocation failed: %s", - (void *)dev, strerror(err)); + ret = mlx5_rxq_start(dev); + if (ret) { + ERROR("%p: Rx Queue allocation failed: %s", + (void *)dev, strerror(rte_errno)); goto error; } - err = mlx5_rx_intr_vec_enable(dev); - if (err) { - ERROR("%p: RX interrupt vector creation failed", - (void *)priv); + ret = mlx5_rx_intr_vec_enable(dev); + if (ret) { + ERROR("%p: Rx interrupt vector creation failed", + (void *)dev); goto error; } mlx5_xstats_init(dev); /* Update link status and Tx/Rx callbacks for the first time. */ memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); INFO("Forcing port %u link to be up", dev->data->port_id); - err = mlx5_force_link_status_change(dev, ETH_LINK_UP); - if (err) { + ret = mlx5_force_link_status_change(dev, ETH_LINK_UP); + if (ret) { DEBUG("Failed to set port %u link to be up", dev->data->port_id); goto error; @@ -185,6 +187,7 @@ mlx5_dev_start(struct rte_eth_dev *dev) mlx5_dev_interrupt_handler_install(dev); return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ /* Rollback. */ dev->data->dev_started = 0; for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) @@ -194,7 +197,8 @@ mlx5_dev_start(struct rte_eth_dev *dev) mlx5_txq_stop(dev); mlx5_rxq_stop(dev); mlx5_flow_delete_drop_queue(dev); - return err; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -238,7 +242,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_traffic_enable(struct rte_eth_dev *dev) @@ -276,8 +280,9 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) .type = 0, }; - claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc)); - return 0; + ret = mlx5_ctrl_flow(dev, &promisc, &promisc); + if (ret) + goto error; } if (dev->data->all_multicast) { struct rte_flow_item_eth multicast = { @@ -286,7 +291,9 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) .type = 0, }; - claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast)); + ret = mlx5_ctrl_flow(dev, &multicast, &multicast); + if (ret) + goto error; } else { /* Add broadcast/multicast flows. */ for (i = 0; i != vlan_filter_n; ++i) { @@ -346,15 +353,17 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) goto error; } if (!vlan_filter_n) { - ret = mlx5_ctrl_flow(dev, &unicast, - &unicast_mask); + ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask); if (ret) goto error; } } return 0; error: - return rte_errno; + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_flow_list_flush(dev, &priv->ctrl_flows); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } @@ -379,14 +388,14 @@ mlx5_traffic_disable(struct rte_eth_dev *dev) * Pointer to Ethernet device private data. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_traffic_restart(struct rte_eth_dev *dev) { if (dev->data->dev_started) { mlx5_traffic_disable(dev); - mlx5_traffic_enable(dev); + return mlx5_traffic_enable(dev); } return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index a5e2c7fa7..042704cc6 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -161,7 +161,7 @@ mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) * Thresholds parameters. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -171,7 +171,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_txq_data *txq = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - int ret = 0; /* * Don't verify port offloads for application which @@ -179,13 +178,13 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, */ if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) { - ret = ENOTSUP; + rte_errno = ENOTSUP; ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port " "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, (void *)dev, conf->offloads, dev->data->dev_conf.txmode.offloads, mlx5_get_tx_port_offloads(dev)); - goto out; + return -rte_errno; } if (desc <= MLX5_TX_COMP_THRESH) { WARN("%p: number of descriptors requested for TX queue %u" @@ -205,27 +204,26 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (idx >= priv->txqs_n) { ERROR("%p: queue index out of range (%u >= %u)", (void *)dev, idx, priv->txqs_n); - return -EOVERFLOW; + rte_errno = EOVERFLOW; + return -rte_errno; } if (!mlx5_txq_releasable(dev, idx)) { - ret = EBUSY; + rte_errno = EBUSY; ERROR("%p: unable to release queue index %u", (void *)dev, idx); - goto out; + return -rte_errno; } mlx5_txq_release(dev, idx); txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf); if (!txq_ctrl) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); - ret = ENOMEM; - goto out; + return -rte_errno; } DEBUG("%p: adding TX queue %p to list", (void *)dev, (void *)txq_ctrl); (*priv->txqs)[idx] = &txq_ctrl->txq; -out: - return -ret; + return 0; } /** @@ -248,9 +246,9 @@ mlx5_tx_queue_release(void *dpdk_txq) priv = txq_ctrl->priv; for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { + mlx5_txq_release(priv->dev, i); DEBUG("%p: removing TX queue %p from list", (void *)priv->dev, (void *)txq_ctrl); - mlx5_txq_release(priv->dev, i); break; } } @@ -267,7 +265,7 @@ mlx5_tx_queue_release(void *dpdk_txq) * Verbs file descriptor to map UAR pages. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) @@ -284,7 +282,6 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) struct mlx5_txq_ctrl *txq_ctrl; int already_mapped; size_t page_size = sysconf(_SC_PAGESIZE); - int r; memset(pages, 0, priv->txqs_n * sizeof(uintptr_t)); /* @@ -323,8 +320,8 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) /* fixed mmap have to return same address */ ERROR("call to mmap failed on UAR for txq %d\n", i); - r = ENXIO; - return r; + rte_errno = ENXIO; + return -rte_errno; } } if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */ @@ -364,7 +361,7 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) * Queue index in DPDK Rx queue array * * @return - * The Verbs object initialised if it can be created. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ struct mlx5_txq_ibv * mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) @@ -394,7 +391,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) priv->verbs_alloc_ctx.obj = txq_ctrl; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set"); - goto error; + rte_errno = EINVAL; + return NULL; } memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv)); /* MRs will be registered in mp2mr[] later. */ @@ -408,6 +406,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0); if (tmpl.cq == NULL) { ERROR("%p: CQ creation failure", (void *)txq_ctrl); + rte_errno = errno; goto error; } attr.init = (struct ibv_qp_init_attr_ex){ @@ -449,6 +448,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init); if (tmpl.qp == NULL) { ERROR("%p: QP creation failure", (void *)txq_ctrl); + rte_errno = errno; goto error; } attr.mod = (struct ibv_qp_attr){ @@ -461,6 +461,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) (IBV_QP_STATE | IBV_QP_PORT)); if (ret) { ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl); + rte_errno = errno; goto error; } attr.mod = (struct ibv_qp_attr){ @@ -469,18 +470,21 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl); + rte_errno = errno; goto error; } attr.mod.qp_state = IBV_QPS_RTS; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl); + rte_errno = errno; goto error; } txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0, txq_ctrl->socket); if (!txq_ibv) { ERROR("%p: cannot allocate memory", (void *)txq_ctrl); + rte_errno = ENOMEM; goto error; } obj.cq.in = tmpl.cq; @@ -488,11 +492,14 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) obj.qp.in = tmpl.qp; obj.qp.out = &qp; ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); - if (ret != 0) + if (ret != 0) { + rte_errno = errno; goto error; + } if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { ERROR("Wrong MLX5_CQE_SIZE environment variable value: " "it should be set to %u", RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; goto error; } txq_data->cqe_n = log2above(cq_info.cqe_cnt); @@ -518,6 +525,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; } else { ERROR("Failed to retrieve UAR info, invalid libmlx5.so version"); + rte_errno = EINVAL; goto error; } DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev, @@ -526,11 +534,13 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return txq_ibv; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (tmpl.cq) claim_zero(mlx5_glue->destroy_cq(tmpl.cq)); if (tmpl.qp) claim_zero(mlx5_glue->destroy_qp(tmpl.qp)); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ return NULL; } @@ -743,7 +753,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) * Thresholds parameters. * * @return - * A DPDK queue object on success. + * A DPDK queue object on success, NULL otherwise and rte_errno is set. */ struct mlx5_txq_ctrl * mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -756,8 +766,10 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, sizeof(*tmpl) + desc * sizeof(struct rte_mbuf *), 0, socket); - if (!tmpl) + if (!tmpl) { + rte_errno = ENOMEM; return NULL; + } assert(desc > MLX5_TX_COMP_THRESH); tmpl->txq.offloads = conf->offloads; tmpl->priv = priv; diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 3df962a90..3246c0a38 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -37,14 +37,13 @@ * Toggle filter. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct priv *priv = dev->data->dev_private; unsigned int i; - int ret = 0; DEBUG("%p: %s VLAN filter ID %" PRIu16, (void *)dev, (on ? "enable" : "disable"), vlan_id); @@ -54,8 +53,8 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) break; /* Check if there's room for another VLAN filter. */ if (i == RTE_DIM(priv->vlan_filter)) { - ret = -ENOMEM; - goto out; + rte_errno = ENOMEM; + return -rte_errno; } if (i < priv->vlan_filter_n) { assert(priv->vlan_filter_n != 0); @@ -78,10 +77,10 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) priv->vlan_filter[priv->vlan_filter_n] = vlan_id; ++priv->vlan_filter_n; } - if (dev->data->dev_started) - mlx5_traffic_restart(dev); out: - return ret; + if (dev->data->dev_started) + return mlx5_traffic_restart(dev); + return 0; } /** @@ -105,7 +104,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) uint16_t vlan_offloads = (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) | 0; - int err; + int ret; /* Validate hw support */ if (!priv->config.hw_vlan_strip) { @@ -129,10 +128,10 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING, .flags = vlan_offloads, }; - err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); - if (err) { + ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); + if (ret) { ERROR("%p: failed to modified stripping mode: %s", - (void *)dev, strerror(err)); + (void *)dev, strerror(rte_errno)); return; } /* Update related bits in RX queue. */ @@ -146,6 +145,9 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) * Pointer to Ethernet device structure. * @param mask * VLAN offload bit mask. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) -- 2.11.0 ^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH v3 00/10] net/mlx5: clean driver 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro ` (9 preceding siblings ...) 2018-03-05 12:21 ` [PATCH v3 10/10] net/mlx5: standardize on negative errno values Nelio Laranjeiro @ 2018-03-18 6:33 ` Shahaf Shuler 2018-03-21 17:34 ` Ferruh Yigit 10 siblings, 1 reply; 30+ messages in thread From: Shahaf Shuler @ 2018-03-18 6:33 UTC (permalink / raw) To: Nélio Laranjeiro, dev@dpdk.org; +Cc: Adrien Mazarguil, Yongseok Koh Monday, March 5, 2018 2:21 PM, Nelio Laranjeiro: > - Removes unused SR-IOV flag. > - Adds missing documentation on some functions. > - Removes the spin-lock on the private structure. > - Standardize the return values of all functions as discussed on the mailing > list [1]. > > [1] > https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Fdp > dk.org%2Fml%2Farchives%2Fdev%2F2018- > January%2F087991.html&data=02%7C01%7Cshahafs%40mellanox.com%7Cf0 > 6e83bf350d44ba5f8908d58293a839%7Ca652971c7d2e4d9ba6a4d149256f461b > %7C0%7C0%7C636558493060723163&sdata=mlYhk19ixx42hzEmUguXNq8rUe > H7%2F5EIAF3AJD%2F6%2FNA%3D&reserved=0 > Series applied to next-net-mlx, thanks. ^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH v3 00/10] net/mlx5: clean driver 2018-03-18 6:33 ` [PATCH v3 00/10] net/mlx5: clean driver Shahaf Shuler @ 2018-03-21 17:34 ` Ferruh Yigit 0 siblings, 0 replies; 30+ messages in thread From: Ferruh Yigit @ 2018-03-21 17:34 UTC (permalink / raw) To: Shahaf Shuler, Nélio Laranjeiro, dev@dpdk.org Cc: Adrien Mazarguil, Yongseok Koh On 3/18/2018 6:33 AM, Shahaf Shuler wrote: > Monday, March 5, 2018 2:21 PM, Nelio Laranjeiro: >> - Removes unused SR-IOV flag. >> - Adds missing documentation on some functions. >> - Removes the spin-lock on the private structure. >> - Standardize the return values of all functions as discussed on the mailing >> list [1]. >> >> [1] >> https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Fdp >> dk.org%2Fml%2Farchives%2Fdev%2F2018- >> January%2F087991.html&data=02%7C01%7Cshahafs%40mellanox.com%7Cf0 >> 6e83bf350d44ba5f8908d58293a839%7Ca652971c7d2e4d9ba6a4d149256f461b >> %7C0%7C0%7C636558493060723163&sdata=mlYhk19ixx42hzEmUguXNq8rUe >> H7%2F5EIAF3AJD%2F6%2FNA%3D&reserved=0 >> > > Series applied to next-net-mlx, thanks. This is a good cleanup thanks for the effort. ^ permalink raw reply [flat|nested] 30+ messages in thread
end of thread, other threads:[~2018-03-21 17:34 UTC | newest] Thread overview: 30+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2018-02-15 9:29 [PATCH 1/3] net/mlx5: add missing function documentation Nelio Laranjeiro 2018-02-15 9:29 ` [PATCH 2/3] net/mlx5: convert return errno to negative ones Nelio Laranjeiro 2018-02-16 14:26 ` Adrien Mazarguil 2018-02-15 9:29 ` [PATCH 3/3] net/mlx5: fix traffic restart function to return errors Nelio Laranjeiro 2018-02-16 14:26 ` Adrien Mazarguil 2018-02-16 14:26 ` [PATCH 1/3] net/mlx5: add missing function documentation Adrien Mazarguil 2018-02-28 15:12 ` [PATCH v2 00/10] net/mlx5: clean driver Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 01/10] net/mlx5: fix sriov flag Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 02/10] net/mlx5: name parameters in function prototypes Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 03/10] net/mlx5: mark parameters with unused attribute Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 04/10] net/mlx5: normalize function prototypes Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 05/10] net/mlx5: add missing function documentation Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 06/10] net/mlx5: remove useless empty lines Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 07/10] net/mlx5: remove control path locks Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 08/10] net/mlx5: prefix all function with mlx5 Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 09/10] net/mlx5: change non failing function return values Nelio Laranjeiro 2018-02-28 15:12 ` [PATCH v2 10/10] net/mlx5: standardize on negative errno values Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 00/10] net/mlx5: clean driver Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 01/10] net/mlx5: fix sriov flag Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 02/10] net/mlx5: name parameters in function prototypes Nelio Laranjeiro 2018-03-05 12:20 ` [PATCH v3 03/10] net/mlx5: mark parameters with unused attribute Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 04/10] net/mlx5: normalize function prototypes Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 05/10] net/mlx5: add missing function documentation Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 06/10] net/mlx5: remove useless empty lines Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 07/10] net/mlx5: remove control path locks Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 08/10] net/mlx5: prefix all function with mlx5 Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 09/10] net/mlx5: change non failing function return values Nelio Laranjeiro 2018-03-05 12:21 ` [PATCH v3 10/10] net/mlx5: standardize on negative errno values Nelio Laranjeiro 2018-03-18 6:33 ` [PATCH v3 00/10] net/mlx5: clean driver Shahaf Shuler 2018-03-21 17:34 ` Ferruh Yigit
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).