* [PATCH net-next 1/6] net/mlx4_en: Add RFS support in UDP
2013-11-07 10:19 [PATCH net-next 0/6] net/mlx4: Mellanox driver update 07-11-2013 Amir Vadai
@ 2013-11-07 10:19 ` Amir Vadai
2013-11-07 10:19 ` [PATCH net-next 2/6] net/mlx4_core: Initialize all mailbox buffers to zero before use Amir Vadai
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Amir Vadai @ 2013-11-07 10:19 UTC (permalink / raw)
To: David S. Miller
Cc: netdev, Amir Vadai, Yevgeny Petrilin, Or Gerlitz, Eyal Perry
From: Eyal Perry <eyalpe@mellanox.com>
Modify RFS code to support applying filters for incoming UDP streams.
Signed-off-by: Eyal Perry <eyalpe@mellanox.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
---
drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 45 +++++++++++++++++++-------
1 file changed, 34 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index b555412..cd61e26 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -102,6 +102,7 @@ struct mlx4_en_filter {
struct list_head next;
struct work_struct work;
+ u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
@@ -120,14 +121,26 @@ struct mlx4_en_filter {
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
+static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
+{
+ switch (ip_proto) {
+ case IPPROTO_UDP:
+ return MLX4_NET_TRANS_RULE_ID_UDP;
+ case IPPROTO_TCP:
+ return MLX4_NET_TRANS_RULE_ID_TCP;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+};
+
static void mlx4_en_filter_work(struct work_struct *work)
{
struct mlx4_en_filter *filter = container_of(work,
struct mlx4_en_filter,
work);
struct mlx4_en_priv *priv = filter->priv;
- struct mlx4_spec_list spec_tcp = {
- .id = MLX4_NET_TRANS_RULE_ID_TCP,
+ struct mlx4_spec_list spec_tcp_udp = {
+ .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
{
.tcp_udp = {
.dst_port = filter->dst_port,
@@ -163,9 +176,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+ if (spec_tcp_udp.id < 0) {
+ en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
+ filter->ip_proto);
+ goto ignore;
+ }
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
- list_add_tail(&spec_tcp.list, &rule.list);
+ list_add_tail(&spec_tcp_udp.list, &rule.list);
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
@@ -183,6 +201,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
if (rc)
en_err(priv, "Error attaching flow. err = %d\n", rc);
+ignore:
mlx4_en_filter_rfs_expire(priv);
filter->activated = 1;
@@ -206,8 +225,8 @@ filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
static struct mlx4_en_filter *
mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
- __be32 dst_ip, __be16 src_port, __be16 dst_port,
- u32 flow_id)
+ __be32 dst_ip, u8 ip_proto, __be16 src_port,
+ __be16 dst_port, u32 flow_id)
{
struct mlx4_en_filter *filter = NULL;
@@ -221,6 +240,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
filter->src_ip = src_ip;
filter->dst_ip = dst_ip;
+ filter->ip_proto = ip_proto;
filter->src_port = src_port;
filter->dst_port = dst_port;
@@ -252,7 +272,7 @@ static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
- __be16 src_port, __be16 dst_port)
+ u8 ip_proto, __be16 src_port, __be16 dst_port)
{
struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL;
@@ -263,6 +283,7 @@ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
filter_chain) {
if (filter->src_ip == src_ip &&
filter->dst_ip == dst_ip &&
+ filter->ip_proto == ip_proto &&
filter->src_port == src_port &&
filter->dst_port == dst_port) {
ret = filter;
@@ -281,6 +302,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct mlx4_en_filter *filter;
const struct iphdr *ip;
const __be16 *ports;
+ u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
@@ -295,18 +317,19 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
+ if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
+ return -EPROTONOSUPPORT;
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ ip_proto = ip->protocol;
src_ip = ip->saddr;
dst_ip = ip->daddr;
src_port = ports[0];
dst_port = ports[1];
- if (ip->protocol != IPPROTO_TCP)
- return -EPROTONOSUPPORT;
-
spin_lock_bh(&priv->filters_lock);
- filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
+ filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
+ src_port, dst_port);
if (filter) {
if (filter->rxq_index == rxq_index)
goto out;
@@ -314,7 +337,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
filter->rxq_index = rxq_index;
} else {
filter = mlx4_en_filter_alloc(priv, rxq_index,
- src_ip, dst_ip,
+ src_ip, dst_ip, ip_proto,
src_port, dst_port, flow_id);
if (!filter) {
ret = -ENOMEM;
--
1.8.3.4
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next 2/6] net/mlx4_core: Initialize all mailbox buffers to zero before use
2013-11-07 10:19 [PATCH net-next 0/6] net/mlx4: Mellanox driver update 07-11-2013 Amir Vadai
2013-11-07 10:19 ` [PATCH net-next 1/6] net/mlx4_en: Add RFS support in UDP Amir Vadai
@ 2013-11-07 10:19 ` Amir Vadai
2013-11-07 10:19 ` [PATCH net-next 3/6] net/mlx4_core: Add immediate activate for VGT->VST->VGT Amir Vadai
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Amir Vadai @ 2013-11-07 10:19 UTC (permalink / raw)
To: David S. Miller
Cc: netdev, Amir Vadai, Yevgeny Petrilin, Or Gerlitz,
Jack Morgenstein, Majd Dibbiny
From: Jack Morgenstein <jackm@dev.mellanox.co.il>
To guarantee that all unused fields in all FW commands for both inboxes
and outboxes are zeroed out, initialize the mailbox buffer to all zeroes.
This is especially important for SRIOV comm-channel virtual commands
(such as QUERY_FUNC_CAP), where if new fields are added to support new
features, the driver can depend on older kernels passing zeroes in these
fields.
In addition to zeroing out the mailbox buffer at allocation time, all
(now unnecessary) calls to memset by the callers of
mlx4_alloc_cmd_mailbox() are removed.
Signed-off-by: Majd Dibbiny <majd@mellanox.com>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
---
drivers/infiniband/hw/mlx4/main.c | 6 ------
drivers/net/ethernet/mellanox/mlx4/cmd.c | 2 ++
drivers/net/ethernet/mellanox/mlx4/cq.c | 6 ------
drivers/net/ethernet/mellanox/mlx4/en_port.c | 3 ---
drivers/net/ethernet/mellanox/mlx4/eq.c | 1 -
drivers/net/ethernet/mellanox/mlx4/fw.c | 7 -------
drivers/net/ethernet/mellanox/mlx4/mcg.c | 2 --
drivers/net/ethernet/mellanox/mlx4/mr.c | 5 -----
drivers/net/ethernet/mellanox/mlx4/port.c | 11 -----------
drivers/net/ethernet/mellanox/mlx4/srq.c | 2 --
10 files changed, 2 insertions(+), 43 deletions(-)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 7567437..6a0a0d2 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -526,7 +526,6 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
if (IS_ERR(mailbox))
return 0;
- memset(mailbox->buf, 0, 256);
memcpy(mailbox->buf, props->node_desc, 64);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -547,8 +546,6 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, 256);
-
if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
*(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
@@ -879,8 +876,6 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
struct mlx4_ib_dev *mdev = to_mdev(qp->device);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
- size_t rule_size = sizeof(struct mlx4_net_trans_rule_hw_ctrl) +
- (sizeof(struct _rule_hw) * flow_attr->num_of_specs);
static const u16 __mlx4_domain[] = {
[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
@@ -905,7 +900,6 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, rule_size);
ctrl = mailbox->buf;
ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 65d41b76..7207dcd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2199,6 +2199,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
return ERR_PTR(-ENOMEM);
}
+ memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
+
return mailbox;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 004e423..22fcbe7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -128,8 +128,6 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
- memset(cq_context, 0, sizeof *cq_context);
-
cq_context->cq_max_count = cpu_to_be16(count);
cq_context->cq_period = cpu_to_be16(period);
@@ -153,8 +151,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
- memset(cq_context, 0, sizeof *cq_context);
-
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
cq_context->log_page_size = mtt->page_shift - 12;
mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -274,8 +270,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
}
cq_context = mailbox->buf;
- memset(cq_context, 0, sizeof *cq_context);
-
cq_context->flags = cpu_to_be32(!!collapsed << 18);
if (timestamp_en)
cq_context->flags |= cpu_to_be32(1 << 19);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3317914..5f8535e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -56,7 +56,6 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
return PTR_ERR(mailbox);
filter = mailbox->buf;
- memset(filter, 0, sizeof(*filter));
for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
entry = 0;
for (j = 0; j < 32; j++)
@@ -81,7 +80,6 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, sizeof(*qport_context));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
@@ -127,7 +125,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 0416c5b..c9cdb2a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -936,7 +936,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
if (err)
goto err_out_free_mtt;
- memset(eq_context, 0, sizeof *eq_context);
eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
MLX4_EQ_STATE_ARMED);
eq_context->log_eq_size = ilog2(eq->nent);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index c3e70bc..fda2667 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,6 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
return PTR_ERR(mailbox);
inbox = mailbox->buf;
- memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
-
MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
@@ -967,7 +965,6 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
pages = mailbox->buf;
for (mlx4_icm_first(icm, &iter);
@@ -1316,8 +1313,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
return PTR_ERR(mailbox);
inbox = mailbox->buf;
- memset(inbox, 0, INIT_HCA_IN_SIZE);
-
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
@@ -1616,8 +1611,6 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
return PTR_ERR(mailbox);
inbox = mailbox->buf;
- memset(inbox, 0, INIT_PORT_IN_SIZE);
-
flags = 0;
flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 70f0213..acf9d5f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -506,7 +506,6 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
goto out_list;
}
mgm = mailbox->buf;
- memset(mgm, 0, sizeof *mgm);
members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
@@ -857,7 +856,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
trans_rule_ctrl_to_hw(rule, mailbox->buf);
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 63391a1..b3ee9ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -480,9 +480,6 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
goto err_table;
}
mpt_entry = mailbox->buf;
-
- memset(mpt_entry, 0, sizeof *mpt_entry);
-
mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
MLX4_MPT_FLAG_REGION |
mr->access);
@@ -695,8 +692,6 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
}
mpt_entry = mailbox->buf;
- memset(mpt_entry, 0, sizeof(*mpt_entry));
-
/* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
* off, thus creating a memory window and not a memory region.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index caaa154..97d342f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -469,8 +469,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
inbuf = inmailbox->buf;
outbuf = outmailbox->buf;
- memset(inbuf, 0, 256);
- memset(outbuf, 0, 256);
inbuf[0] = 1;
inbuf[1] = 1;
inbuf[2] = 1;
@@ -653,8 +651,6 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, 256);
-
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
@@ -692,8 +688,6 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
context->flags = SET_PORT_GEN_ALL_VALID;
context->mtu = cpu_to_be16(mtu);
context->pptx = (pptx * (!pfctx)) << 7;
@@ -727,8 +721,6 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
context->base_qpn = cpu_to_be32(base_qpn);
context->n_mac = dev->caps.log_num_macs;
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
@@ -761,8 +753,6 @@ int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
for (i = 0; i < MLX4_NUM_UP; i += 2)
context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
@@ -788,7 +778,6 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
for (i = 0; i < MLX4_NUM_TC; i++) {
struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 9e08e35..8fdf237 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -189,8 +189,6 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
}
srq_context = mailbox->buf;
- memset(srq_context, 0, sizeof *srq_context);
-
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
srq->srqn);
srq_context->logstride = srq->wqe_shift - 4;
--
1.8.3.4
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next 3/6] net/mlx4_core: Add immediate activate for VGT->VST->VGT
2013-11-07 10:19 [PATCH net-next 0/6] net/mlx4: Mellanox driver update 07-11-2013 Amir Vadai
2013-11-07 10:19 ` [PATCH net-next 1/6] net/mlx4_en: Add RFS support in UDP Amir Vadai
2013-11-07 10:19 ` [PATCH net-next 2/6] net/mlx4_core: Initialize all mailbox buffers to zero before use Amir Vadai
@ 2013-11-07 10:19 ` Amir Vadai
2013-11-07 10:19 ` [PATCH net-next 4/6] net/mlx4_en: Datapath resources allocated dynamically Amir Vadai
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Amir Vadai @ 2013-11-07 10:19 UTC (permalink / raw)
To: David S. Miller
Cc: netdev, Amir Vadai, Yevgeny Petrilin, Or Gerlitz, Rony Efraim,
Jack Morgenstein
From: Rony Efraim <ronye@mellanox.com>
Allow immediate activate of VGT->VST and VST->VGT transitions, without
the need of rebinding in mlx4_master_immediate_activate_vlan_qos().
Also in struct res_qp: add qp parameters (vlan_index,fvl,vlan_cntrol..)
to the saved set, in order to restore when move to VGT.
- Clear at mlx4_RST2INIT_QP_wrapper()
- Save at mlx4_INIT2RTR_QP_wrapper()
- Restore at mlx4_vf_immed_vlan_work_handler()
Update mlx4_vf_immed_vlan_work_handler() to support VGT.
Signed-off-by: Rony Efraim <ronye@mellanox.com>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Reviewed-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
---
drivers/net/ethernet/mellanox/mlx4/cmd.c | 34 ++++------
.../net/ethernet/mellanox/mlx4/resource_tracker.c | 79 ++++++++++++++++++----
2 files changed, 81 insertions(+), 32 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 7207dcd..1e9970d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1539,11 +1539,6 @@ out:
return ret;
}
-static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
-{
- return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
-}
-
static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
int slave, int port)
{
@@ -1553,7 +1548,6 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
struct mlx4_dev *dev = &(priv->dev);
int err;
int admin_vlan_ix = NO_INDX;
- enum mlx4_vlan_transition vlan_trans;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
@@ -1563,12 +1557,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
vp_oper->state.link_state == vp_admin->link_state)
return 0;
- vlan_trans = calculate_transition(vp_oper->state.default_vlan,
- vp_admin->default_vlan);
-
if (!(priv->mfunc.master.slave_state[slave].active &&
- dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP &&
- vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) {
+ dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
/* even if the UPDATE_QP command isn't supported, we still want
* to set this VF link according to the admin directive
*/
@@ -1586,15 +1576,19 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
return -ENOMEM;
if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
- err = __mlx4_register_vlan(&priv->dev, port,
- vp_admin->default_vlan,
- &admin_vlan_ix);
- if (err) {
- kfree(work);
- mlx4_warn((&priv->dev),
- "No vlan resources slave %d, port %d\n",
- slave, port);
- return err;
+ if (MLX4_VGT != vp_admin->default_vlan) {
+ err = __mlx4_register_vlan(&priv->dev, port,
+ vp_admin->default_vlan,
+ &admin_vlan_ix);
+ if (err) {
+ kfree(work);
+ mlx4_warn((&priv->dev),
+ "No vlan resources slave %d, port %d\n",
+ slave, port);
+ return err;
+ }
+ } else {
+ admin_vlan_ix = NO_INDX;
}
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
mlx4_dbg((&(priv->dev)),
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b1603e2..2f3f2bc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -110,7 +110,14 @@ struct res_qp {
int local_qpn;
atomic_t ref_count;
u32 qpc_flags;
+ /* saved qp params before VST enforcement in order to restore on VGT */
u8 sched_queue;
+ __be32 param3;
+ u8 vlan_control;
+ u8 fvl_rx;
+ u8 pri_path_fl;
+ u8 vlan_index;
+ u8 feup;
};
enum res_mtt_states {
@@ -2568,6 +2575,12 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
return err;
qp->local_qpn = local_qpn;
qp->sched_queue = 0;
+ qp->param3 = 0;
+ qp->vlan_control = 0;
+ qp->fvl_rx = 0;
+ qp->pri_path_fl = 0;
+ qp->vlan_index = 0;
+ qp->feup = 0;
qp->qpc_flags = be32_to_cpu(qpc->flags);
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
@@ -3294,6 +3307,12 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
+ __be32 orig_param3 = qpc->param3;
+ u8 orig_vlan_control = qpc->pri_path.vlan_control;
+ u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
+ u8 orig_pri_path_fl = qpc->pri_path.fl;
+ u8 orig_vlan_index = qpc->pri_path.vlan_index;
+ u8 orig_feup = qpc->pri_path.feup;
err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
if (err)
@@ -3321,9 +3340,15 @@ out:
* essentially the QOS value provided by the VF. This will be useful
* if we allow dynamic changes from VST back to VGT
*/
- if (!err)
+ if (!err) {
qp->sched_queue = orig_sched_queue;
-
+ qp->param3 = orig_param3;
+ qp->vlan_control = orig_vlan_control;
+ qp->fvl_rx = orig_fvl_rx;
+ qp->pri_path_fl = orig_pri_path_fl;
+ qp->vlan_index = orig_vlan_index;
+ qp->feup = orig_feup;
+ }
put_res(dev, slave, qpn, RES_QP);
return err;
}
@@ -4437,13 +4462,20 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
&tracker->slave_list[work->slave].res_list[RES_QP];
struct res_qp *qp;
struct res_qp *tmp;
- u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
+ u64 qp_path_mask_vlan_ctrl =
+ ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
- (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
- (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
+
+ u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
int err;
@@ -4475,9 +4507,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf;
- upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
- upd_context->qp_context.pri_path.vlan_control = vlan_control;
- upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
+ upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4495,10 +4525,35 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
spin_lock_irq(mlx4_tlock(dev));
continue;
}
- upd_context->qp_context.pri_path.sched_queue =
- qp->sched_queue & 0xC7;
- upd_context->qp_context.pri_path.sched_queue |=
- ((work->qos & 0x7) << 3);
+ if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
+ upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
+ else
+ upd_context->primary_addr_path_mask =
+ cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
+ if (work->vlan_id == MLX4_VGT) {
+ upd_context->qp_context.param3 = qp->param3;
+ upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
+ upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
+ upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
+ upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
+ upd_context->qp_context.pri_path.feup = qp->feup;
+ upd_context->qp_context.pri_path.sched_queue =
+ qp->sched_queue;
+ } else {
+ upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
+ upd_context->qp_context.pri_path.vlan_control = vlan_control;
+ upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
+ upd_context->qp_context.pri_path.fvl_rx =
+ qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
+ upd_context->qp_context.pri_path.fl =
+ qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ upd_context->qp_context.pri_path.feup =
+ qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
+ upd_context->qp_context.pri_path.sched_queue =
+ qp->sched_queue & 0xC7;
+ upd_context->qp_context.pri_path.sched_queue |=
+ ((work->qos & 0x7) << 3);
+ }
err = mlx4_cmd(dev, mailbox->dma,
qp->local_qpn & 0xffffff,
--
1.8.3.4
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next 4/6] net/mlx4_en: Datapath resources allocated dynamically
2013-11-07 10:19 [PATCH net-next 0/6] net/mlx4: Mellanox driver update 07-11-2013 Amir Vadai
` (2 preceding siblings ...)
2013-11-07 10:19 ` [PATCH net-next 3/6] net/mlx4_core: Add immediate activate for VGT->VST->VGT Amir Vadai
@ 2013-11-07 10:19 ` Amir Vadai
2013-11-07 10:19 ` [PATCH net-next 5/6] net/mlx4_core: ICM pages are allocated on device NUMA node Amir Vadai
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Amir Vadai @ 2013-11-07 10:19 UTC (permalink / raw)
To: David S. Miller
Cc: netdev, Amir Vadai, Yevgeny Petrilin, Or Gerlitz,
Eugenia Emantayev
From: Eugenia Emantayev <eugenia@mellanox.com>
Currently all TX/RX rings and completion queues are part of the
netdev priv structure and are allocated statically. This patch
will change the priv to hold only arrays of pointers and therefore
all TX/RX rings and completetion queues will be allocated
dynamically. This is in preparation for NUMA aware allocations.
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.com>
Signed-off-by: Eugenia Emantayev <eugenia@mellanox.com>
Reviewed-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
---
drivers/net/ethernet/mellanox/mlx4/en_cq.c | 34 ++++++---
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 36 +++++-----
drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 87 ++++++++++++++----------
drivers/net/ethernet/mellanox/mlx4/en_port.c | 14 ++--
drivers/net/ethernet/mellanox/mlx4/en_rx.c | 57 ++++++++++------
drivers/net/ethernet/mellanox/mlx4/en_selftest.c | 2 +-
drivers/net/ethernet/mellanox/mlx4/en_tx.c | 32 ++++++---
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 27 ++++----
8 files changed, 178 insertions(+), 111 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3e2d504..d203f11 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -44,12 +44,19 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
- struct mlx4_en_cq *cq,
+ struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq;
int err;
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ en_err(priv, "Failed to allocate CQ structure\n");
+ return -ENOMEM;
+ }
+
cq->size = entries;
cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
@@ -60,14 +67,22 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);
if (err)
- return err;
+ goto err_cq;
err = mlx4_en_map_buffer(&cq->wqres.buf);
if (err)
- mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- else
- cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
+ goto err_res;
+
+ cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
+ *pcq = cq;
+ return 0;
+
+err_res:
+ mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+err_cq:
+ kfree(cq);
+ *pcq = NULL;
return err;
}
@@ -117,12 +132,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
struct mlx4_en_cq *rx_cq;
cq_idx = cq_idx % priv->rx_ring_num;
- rx_cq = &priv->rx_cq[cq_idx];
+ rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
}
if (!cq->is_tx)
- cq->size = priv->rx_ring[cq->ring].actual_size;
+ cq->size = priv->rx_ring[cq->ring]->actual_size;
if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
(!cq->is_tx && priv->hwtstamp_config.rx_filter))
@@ -146,9 +161,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
return 0;
}
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq = *pcq;
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
@@ -157,6 +173,8 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
+ kfree(cq);
+ *pcq = NULL;
}
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0c75098..0596f9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -51,10 +51,10 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
int err = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_cq[i].moder_cnt = priv->tx_frames;
- priv->tx_cq[i].moder_time = priv->tx_usecs;
+ priv->tx_cq[i]->moder_cnt = priv->tx_frames;
+ priv->tx_cq[i]->moder_time = priv->tx_usecs;
if (priv->port_up) {
- err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
+ err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
if (err)
return err;
}
@@ -64,11 +64,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
return 0;
for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_cq[i].moder_cnt = priv->rx_frames;
- priv->rx_cq[i].moder_time = priv->rx_usecs;
+ priv->rx_cq[i]->moder_cnt = priv->rx_frames;
+ priv->rx_cq[i]->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
if (priv->port_up) {
- err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
if (err)
return err;
}
@@ -274,16 +274,16 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
}
}
for (i = 0; i < priv->tx_ring_num; i++) {
- data[index++] = priv->tx_ring[i].packets;
- data[index++] = priv->tx_ring[i].bytes;
+ data[index++] = priv->tx_ring[i]->packets;
+ data[index++] = priv->tx_ring[i]->bytes;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- data[index++] = priv->rx_ring[i].packets;
- data[index++] = priv->rx_ring[i].bytes;
+ data[index++] = priv->rx_ring[i]->packets;
+ data[index++] = priv->rx_ring[i]->bytes;
#ifdef CONFIG_NET_RX_BUSY_POLL
- data[index++] = priv->rx_ring[i].yields;
- data[index++] = priv->rx_ring[i].misses;
- data[index++] = priv->rx_ring[i].cleaned;
+ data[index++] = priv->rx_ring[i]->yields;
+ data[index++] = priv->rx_ring[i]->misses;
+ data[index++] = priv->rx_ring[i]->cleaned;
#endif
}
spin_unlock_bh(&priv->stats_lock);
@@ -510,9 +510,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
- if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
- priv->rx_ring[0].size) &&
- tx_size == priv->tx_ring[0].size)
+ if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
+ priv->rx_ring[0]->size) &&
+ tx_size == priv->tx_ring[0]->size)
return 0;
mutex_lock(&mdev->state_lock);
@@ -553,8 +553,8 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
param->rx_pending = priv->port_up ?
- priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
- param->tx_pending = priv->tx_ring[0].size;
+ priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
+ param->tx_pending = priv->tx_ring[0]->size;
}
static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index cd61e26..f430788 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
int done;
if (!priv->port_up)
@@ -355,8 +355,7 @@ err:
return ret;
}
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *rx_ring)
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
{
struct mlx4_en_filter *filter, *tmp;
LIST_HEAD(del_list);
@@ -1242,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev)
int i;
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = &priv->rx_cq[i];
+ cq = priv->rx_cq[i];
spin_lock_irqsave(&cq->lock, flags);
napi_synchronize(&cq->napi);
mlx4_en_process_rx_cq(dev, cq, 0);
@@ -1264,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
continue;
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
- i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn,
- priv->tx_ring[i].cons, priv->tx_ring[i].prod);
+ i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
+ priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
}
priv->port_stats.tx_timeout++;
@@ -1305,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = &priv->rx_cq[i];
+ cq = priv->rx_cq[i];
cq->moder_cnt = priv->rx_frames;
cq->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
@@ -1314,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
}
for (i = 0; i < priv->tx_ring_num; i++) {
- cq = &priv->tx_cq[i];
+ cq = priv->tx_cq[i];
cq->moder_cnt = priv->tx_frames;
cq->moder_time = priv->tx_usecs;
}
@@ -1348,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
for (ring = 0; ring < priv->rx_ring_num; ring++) {
spin_lock_bh(&priv->stats_lock);
- rx_packets = priv->rx_ring[ring].packets;
- rx_bytes = priv->rx_ring[ring].bytes;
+ rx_packets = priv->rx_ring[ring]->packets;
+ rx_bytes = priv->rx_ring[ring]->bytes;
spin_unlock_bh(&priv->stats_lock);
rx_pkt_diff = ((unsigned long) (rx_packets -
@@ -1378,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
if (moder_time != priv->last_moder_time[ring]) {
priv->last_moder_time[ring] = moder_time;
- cq = &priv->rx_cq[ring];
+ cq = priv->rx_cq[ring];
cq->moder_time = moder_time;
cq->moder_cnt = priv->rx_frames;
err = mlx4_en_set_cq_moder(priv, cq);
@@ -1501,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev)
return err;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = &priv->rx_cq[i];
+ cq = priv->rx_cq[i];
mlx4_en_cq_init_lock(cq);
@@ -1519,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev)
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
- priv->rx_ring[i].cqn = cq->mcq.cqn;
+ priv->rx_ring[i]->cqn = cq->mcq.cqn;
++rx_index;
}
@@ -1545,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
- cq = &priv->tx_cq[i];
+ cq = priv->tx_cq[i];
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed allocating Tx CQ\n");
@@ -1561,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev)
cq->buf->wqe_index = cpu_to_be16(0xffff);
/* Configure ring */
- tx_ring = &priv->tx_ring[i];
+ tx_ring = priv->tx_ring[i];
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
i / priv->num_tx_rings_p_up);
if (err) {
@@ -1631,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev)
tx_err:
while (tx_index--) {
- mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
- mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
}
mlx4_en_destroy_drop_qp(priv);
rss_err:
@@ -1641,9 +1640,9 @@ mac_err:
mlx4_en_put_qp(priv);
cq_err:
while (rx_index--)
- mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+ mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
for (i = 0; i < priv->rx_ring_num; i++)
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+ mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
return err; /* need to close devices */
}
@@ -1739,13 +1738,13 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
- mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
- mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
}
msleep(10);
for (i = 0; i < priv->tx_ring_num; i++)
- mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
+ mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
@@ -1757,7 +1756,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
- struct mlx4_en_cq *cq = &priv->rx_cq[i];
+ struct mlx4_en_cq *cq = priv->rx_cq[i];
local_bh_disable();
while (!mlx4_en_cq_lock_napi(cq)) {
@@ -1768,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
msleep(1);
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+ mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
}
}
@@ -1806,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev)
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_ring[i].bytes = 0;
- priv->tx_ring[i].packets = 0;
- priv->tx_ring[i].tx_csum = 0;
+ priv->tx_ring[i]->bytes = 0;
+ priv->tx_ring[i]->packets = 0;
+ priv->tx_ring[i]->tx_csum = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_ring[i].bytes = 0;
- priv->rx_ring[i].packets = 0;
- priv->rx_ring[i].csum_ok = 0;
- priv->rx_ring[i].csum_none = 0;
+ priv->rx_ring[i]->bytes = 0;
+ priv->rx_ring[i]->packets = 0;
+ priv->rx_ring[i]->csum_ok = 0;
+ priv->rx_ring[i]->csum_none = 0;
}
}
@@ -1871,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
#endif
for (i = 0; i < priv->tx_ring_num; i++) {
- if (priv->tx_ring[i].tx_info)
+ if (priv->tx_ring && priv->tx_ring[i])
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
- if (priv->tx_cq[i].buf)
+ if (priv->tx_cq && priv->tx_cq[i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
}
for (i = 0; i < priv->rx_ring_num; i++) {
- if (priv->rx_ring[i].rx_info)
+ if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
priv->prof->rx_ring_size, priv->stride);
- if (priv->rx_cq[i].buf)
+ if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
@@ -1937,6 +1936,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
err:
en_err(priv, "Failed to allocate NIC resources\n");
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (priv->rx_ring[i])
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
+ prof->rx_ring_size,
+ priv->stride);
+ if (priv->rx_cq[i])
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+ }
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ if (priv->tx_ring[i])
+ mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
+ if (priv->tx_cq[i])
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+ }
return -ENOMEM;
}
@@ -2230,13 +2243,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
+ priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_ring) {
err = -ENOMEM;
goto out;
}
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS,
+ priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_cq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 5f8535e..dae1a1f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -140,18 +140,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
- stats->rx_packets += priv->rx_ring[i].packets;
- stats->rx_bytes += priv->rx_ring[i].bytes;
- priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok;
- priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none;
+ stats->rx_packets += priv->rx_ring[i]->packets;
+ stats->rx_bytes += priv->rx_ring[i]->bytes;
+ priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
+ priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
priv->port_stats.tx_chksum_offload = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
- stats->tx_packets += priv->tx_ring[i].packets;
- stats->tx_bytes += priv->tx_ring[i].bytes;
- priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum;
+ stats->tx_packets += priv->tx_ring[i]->packets;
+ stats->tx_bytes += priv->tx_ring[i]->bytes;
+ priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index afe2efa..1c45f88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -264,7 +264,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size,
@@ -289,7 +289,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
reduce_rings:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
while (ring->actual_size > new_size) {
ring->actual_size--;
ring->prod--;
@@ -319,12 +319,20 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
}
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ en_err(priv, "Failed to allocate RX ring structure\n");
+ return -ENOMEM;
+ }
+
ring->prod = 0;
ring->cons = 0;
ring->size = size;
@@ -336,8 +344,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = vmalloc(tmp);
- if (!ring->rx_info)
- return -ENOMEM;
+ if (!ring->rx_info) {
+ err = -ENOMEM;
+ goto err_ring;
+ }
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
ring->rx_info, tmp);
@@ -345,7 +355,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
ring->buf_size, 2 * PAGE_SIZE);
if (err)
- goto err_ring;
+ goto err_info;
err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) {
@@ -356,13 +366,18 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
+ *pring = ring;
return 0;
err_hwq:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
-err_ring:
+err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
+err_ring:
+ kfree(ring);
+ *pring = NULL;
+
return err;
}
@@ -376,12 +391,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
DS_SIZE * priv->num_frags);
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
ring->prod = 0;
ring->cons = 0;
ring->actual_size = 0;
- ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
+ ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring->stride = stride;
if (ring->stride <= TXBB_SIZE)
@@ -412,7 +427,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
goto err_buffers;
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
ring->size_mask = ring->actual_size - 1;
mlx4_en_update_rx_prod_db(ring);
@@ -422,30 +437,34 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
err_buffers:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
- mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
+ mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
ring_ind = priv->rx_ring_num - 1;
err_allocator:
while (ring_ind >= 0) {
- if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
- priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
- mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
+ if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
+ priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
+ mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
ring_ind--;
}
return err;
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rx_ring *ring = *pring;
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
+ kfree(ring);
+ *pring = NULL;
#ifdef CONFIG_RFS_ACCEL
- mlx4_en_cleanup_filters(priv, ring);
+ mlx4_en_cleanup_filters(priv);
#endif
}
@@ -592,7 +611,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_cqe *cqe;
- struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
@@ -991,7 +1010,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
for (i = 0; i < priv->rx_ring_num; i++) {
qpn = rss_map->base_qpn + i;
- err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i],
+ err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
&rss_map->state[i],
&rss_map->qps[i]);
if (err)
@@ -1008,7 +1027,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
}
rss_map->indir_qp.event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
- priv->rx_ring[0].cqn, -1, &context);
+ priv->rx_ring[0]->cqn, -1, &context);
if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2448f0d..4062669 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -156,7 +156,7 @@ retry_tx:
* since we turned the carrier off */
msleep(200);
for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
- tx_ring = &priv->tx_ring[i];
+ tx_ring = priv->tx_ring[i];
if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
goto retry_tx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0698c82..d4e4cf3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -54,13 +54,20 @@ module_param_named(inline_thold, inline_thold, int, 0444);
MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring, int qpn, u32 size,
+ struct mlx4_en_tx_ring **pring, int qpn, u32 size,
u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *ring;
int tmp;
int err;
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ en_err(priv, "Failed allocating TX ring\n");
+ return -ENOMEM;
+ }
+
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
@@ -69,8 +76,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = vmalloc(tmp);
- if (!ring->tx_info)
- return -ENOMEM;
+ if (!ring->tx_info) {
+ err = -ENOMEM;
+ goto err_ring;
+ }
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
@@ -78,7 +87,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
if (!ring->bounce_buf) {
err = -ENOMEM;
- goto err_tx;
+ goto err_info;
}
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
@@ -120,6 +129,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+ *pring = ring;
return 0;
err_map:
@@ -129,16 +139,20 @@ err_hwq_res:
err_bounce:
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
-err_tx:
+err_info:
vfree(ring->tx_info);
ring->tx_info = NULL;
+err_ring:
+ kfree(ring);
+ *pring = NULL;
return err;
}
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring)
+ struct mlx4_en_tx_ring **pring)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *ring = *pring;
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
if (ring->bf_enabled)
@@ -151,6 +165,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
ring->bounce_buf = NULL;
vfree(ring->tx_info);
ring->tx_info = NULL;
+ kfree(ring);
+ *pring = NULL;
}
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -330,7 +346,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
- struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+ struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
struct mlx4_cqe *cqe;
u16 index;
u16 new_index, ring_index, stamp_index;
@@ -622,7 +638,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
}
tx_ind = skb->queue_mapping;
- ring = &priv->tx_ring[tx_ind];
+ ring = priv->tx_ring[tx_ind];
if (vlan_tx_tag_present(skb))
vlan_tag = vlan_tx_tag_get(skb);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index bf06e36..b2547ae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -530,10 +530,10 @@ struct mlx4_en_priv {
u16 num_frags;
u16 log_rx_info;
- struct mlx4_en_tx_ring *tx_ring;
- struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
- struct mlx4_en_cq *tx_cq;
- struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct mlx4_en_tx_ring **tx_ring;
+ struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
+ struct mlx4_en_cq **tx_cq;
+ struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
struct work_struct watchdog_task;
@@ -626,7 +626,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
if ((cq->state & MLX4_CQ_LOCKED)) {
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
rc = false;
@@ -704,9 +704,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach);
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
-int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode);
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx);
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -717,9 +717,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
-int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring **pring,
int qpn, u32 size, u16 stride);
-void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int cq, int user_prio);
@@ -727,10 +729,10 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_ring **pring,
u32 size, u16 stride);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_ring **pring,
u32 size, u16 stride);
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -768,8 +770,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
int mlx4_en_setup_tc(struct net_device *dev, u8 up);
#ifdef CONFIG_RFS_ACCEL
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *rx_ring);
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
#endif
#define MLX4_EN_NUM_SELF_TEST 5
--
1.8.3.4
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next 5/6] net/mlx4_core: ICM pages are allocated on device NUMA node
2013-11-07 10:19 [PATCH net-next 0/6] net/mlx4: Mellanox driver update 07-11-2013 Amir Vadai
` (3 preceding siblings ...)
2013-11-07 10:19 ` [PATCH net-next 4/6] net/mlx4_en: Datapath resources allocated dynamically Amir Vadai
@ 2013-11-07 10:19 ` Amir Vadai
2013-11-07 10:19 ` [PATCH net-next 6/6] net/mlx4_en: Datapath structures are allocated per " Amir Vadai
2013-11-08 0:23 ` [PATCH net-next 0/6] net/mlx4: Mellanox driver update 07-11-2013 David Miller
6 siblings, 0 replies; 8+ messages in thread
From: Amir Vadai @ 2013-11-07 10:19 UTC (permalink / raw)
To: David S. Miller
Cc: netdev, Amir Vadai, Yevgeny Petrilin, Or Gerlitz,
Eugenia Emantayev
From: Eugenia Emantayev <eugenia@mellanox.com>
>From now ICM pages will be allocated on device NUMA node.
This is done to optimize FW/HW access to host memory.
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.com>
Signed-off-by: Eugenia Emantayev <eugenia@mellanox.com>
Reviewed-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
---
drivers/net/ethernet/mellanox/mlx4/icm.c | 42 ++++++++++++++++++++++---------
drivers/net/ethernet/mellanox/mlx4/main.c | 1 +
include/linux/mlx4/device.h | 1 +
3 files changed, 32 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 31d0264..5fbf492 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
kfree(icm);
}
-static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
+static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
+ gfp_t gfp_mask, int node)
{
struct page *page;
- page = alloc_pages(gfp_mask, order);
- if (!page)
- return -ENOMEM;
+ page = alloc_pages_node(node, gfp_mask, order);
+ if (!page) {
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
+ return -ENOMEM;
+ }
sg_set_page(mem, page, PAGE_SIZE << order, 0);
return 0;
@@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
/* We use sg_set_buf for coherent allocs, which assumes low memory */
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
- icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
- if (!icm)
- return NULL;
+ icm = kmalloc_node(sizeof(*icm),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
+ dev->numa_node);
+ if (!icm) {
+ icm = kmalloc(sizeof(*icm),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!icm)
+ return NULL;
+ }
icm->refcount = 0;
INIT_LIST_HEAD(&icm->chunk_list);
@@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
while (npages > 0) {
if (!chunk) {
- chunk = kmalloc(sizeof *chunk,
- gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
- if (!chunk)
- goto fail;
+ chunk = kmalloc_node(sizeof(*chunk),
+ gfp_mask & ~(__GFP_HIGHMEM |
+ __GFP_NOWARN),
+ dev->numa_node);
+ if (!chunk) {
+ chunk = kmalloc(sizeof(*chunk),
+ gfp_mask & ~(__GFP_HIGHMEM |
+ __GFP_NOWARN));
+ if (!chunk)
+ goto fail;
+ }
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
chunk->npages = 0;
@@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
cur_order, gfp_mask);
else
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
- cur_order, gfp_mask);
+ cur_order, gfp_mask,
+ dev->numa_node);
if (ret) {
if (--cur_order < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7d2628d..5789ea2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2191,6 +2191,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision;
+ dev->numa_node = dev_to_node(&pdev->dev);
/* Detect if this device is a virtual function */
if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
/* When acting as pf, we normally skip vfs unless explicitly
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index f6f5927..4cf0b01 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -662,6 +662,7 @@ struct mlx4_dev {
u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
int num_vfs;
+ int numa_node;
int oper_log_mgm_entry_size;
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
--
1.8.3.4
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next 6/6] net/mlx4_en: Datapath structures are allocated per NUMA node
2013-11-07 10:19 [PATCH net-next 0/6] net/mlx4: Mellanox driver update 07-11-2013 Amir Vadai
` (4 preceding siblings ...)
2013-11-07 10:19 ` [PATCH net-next 5/6] net/mlx4_core: ICM pages are allocated on device NUMA node Amir Vadai
@ 2013-11-07 10:19 ` Amir Vadai
2013-11-08 0:23 ` [PATCH net-next 0/6] net/mlx4: Mellanox driver update 07-11-2013 David Miller
6 siblings, 0 replies; 8+ messages in thread
From: Amir Vadai @ 2013-11-07 10:19 UTC (permalink / raw)
To: David S. Miller
Cc: netdev, Amir Vadai, Yevgeny Petrilin, Or Gerlitz,
Eugenia Emantayev
From: Eugenia Emantayev <eugenia@mellanox.com>
For each RX/TX ring and its CQ, allocation is done on a NUMA node that
corresponds to the core that the data structure should operate on.
The assumption is that the core number is reflected by the ring index.
The affected allocations are the ring/CQ data structures,
the TX/RX info and the shared HW/SW buffer.
For TX rings, each core has rings of all UPs.
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.com>
Signed-off-by: Eugenia Emantayev <eugenia@mellanox.com>
Reviewed-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
---
drivers/net/ethernet/mellanox/mlx4/en_cq.c | 17 ++++++++++---
drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 12 ++++++---
drivers/net/ethernet/mellanox/mlx4/en_rx.c | 23 +++++++++++------
drivers/net/ethernet/mellanox/mlx4/en_tx.c | 34 +++++++++++++++++---------
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 6 ++---
drivers/net/ethernet/mellanox/mlx4/pd.c | 11 ++++++---
include/linux/mlx4/device.h | 2 +-
7 files changed, 71 insertions(+), 34 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index d203f11..3a098cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -45,16 +45,20 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
struct mlx4_en_cq **pcq,
- int entries, int ring, enum cq_type mode)
+ int entries, int ring, enum cq_type mode,
+ int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq;
int err;
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
if (!cq) {
- en_err(priv, "Failed to allocate CQ structure\n");
- return -ENOMEM;
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ en_err(priv, "Failed to allocate CQ structure\n");
+ return -ENOMEM;
+ }
}
cq->size = entries;
@@ -64,8 +68,13 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->is_tx = mode;
spin_lock_init(&cq->lock);
+ /* Allocate HW buffers on provided NUMA node.
+ * dev->numa_node is used in mtt range allocation flow.
+ */
+ set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);
+ set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f430788..e72d8a1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1895,6 +1895,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
struct mlx4_en_port_profile *prof = priv->prof;
int i;
int err;
+ int node;
err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
if (err) {
@@ -1904,23 +1905,26 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
/* Create tx Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
+ node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
- prof->tx_ring_size, i, TX))
+ prof->tx_ring_size, i, TX, node))
goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
- prof->tx_ring_size, TXBB_SIZE))
+ prof->tx_ring_size, TXBB_SIZE, node))
goto err;
}
/* Create rx Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
+ node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
- prof->rx_ring_size, i, RX))
+ prof->rx_ring_size, i, RX, node))
goto err;
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
- prof->rx_ring_size, priv->stride))
+ prof->rx_ring_size, priv->stride,
+ node))
goto err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 1c45f88..07a1d0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -320,17 +320,20 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride)
+ u32 size, u16 stride, int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
- en_err(priv, "Failed to allocate RX ring structure\n");
- return -ENOMEM;
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ en_err(priv, "Failed to allocate RX ring structure\n");
+ return -ENOMEM;
+ }
}
ring->prod = 0;
@@ -343,17 +346,23 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
- ring->rx_info = vmalloc(tmp);
+ ring->rx_info = vmalloc_node(tmp, node);
if (!ring->rx_info) {
- err = -ENOMEM;
- goto err_ring;
+ ring->rx_info = vmalloc(tmp);
+ if (!ring->rx_info) {
+ err = -ENOMEM;
+ goto err_ring;
+ }
}
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
ring->rx_info, tmp);
+ /* Allocate HW buffers on provided NUMA node */
+ set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
ring->buf_size, 2 * PAGE_SIZE);
+ set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_info;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index d4e4cf3..f54ebd5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -55,17 +55,20 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
- u16 stride)
+ u16 stride, int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring;
int tmp;
int err;
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
- en_err(priv, "Failed allocating TX ring\n");
- return -ENOMEM;
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ en_err(priv, "Failed allocating TX ring\n");
+ return -ENOMEM;
+ }
}
ring->size = size;
@@ -75,24 +78,33 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
inline_thold = min(inline_thold, MAX_INLINE);
tmp = size * sizeof(struct mlx4_en_tx_info);
- ring->tx_info = vmalloc(tmp);
+ ring->tx_info = vmalloc_node(tmp, node);
if (!ring->tx_info) {
- err = -ENOMEM;
- goto err_ring;
+ ring->tx_info = vmalloc(tmp);
+ if (!ring->tx_info) {
+ err = -ENOMEM;
+ goto err_ring;
+ }
}
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
- ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
if (!ring->bounce_buf) {
- err = -ENOMEM;
- goto err_info;
+ ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ if (!ring->bounce_buf) {
+ err = -ENOMEM;
+ goto err_info;
+ }
}
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
+ /* Allocate HW buffers on provided NUMA node */
+ set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
2 * PAGE_SIZE);
+ set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
@@ -118,7 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
}
ring->qp.event = mlx4_en_sqp_event;
- err = mlx4_bf_alloc(mdev->dev, &ring->bf);
+ err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) {
en_dbg(DRV, priv, "working without blueflame (%d)", err);
ring->bf.uar = &mdev->priv_uar;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index b2547ae..f3758de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -705,7 +705,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
- int entries, int ring, enum cq_type mode);
+ int entries, int ring, enum cq_type mode, int node);
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx);
@@ -719,7 +719,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
- int qpn, u32 size, u16 stride);
+ int qpn, u32 size, u16 stride, int node);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -730,7 +730,7 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride);
+ u32 size, u16 stride, int node);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 00f223a..84cfb40 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -168,7 +168,7 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);
-int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_uar *uar;
@@ -186,10 +186,13 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
err = -ENOMEM;
goto out;
}
- uar = kmalloc(sizeof *uar, GFP_KERNEL);
+ uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
if (!uar) {
- err = -ENOMEM;
- goto out;
+ uar = kmalloc(sizeof(*uar), GFP_KERNEL);
+ if (!uar) {
+ err = -ENOMEM;
+ goto out;
+ }
}
err = mlx4_uar_alloc(dev, uar);
if (err)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 4cf0b01..7d3a523 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -835,7 +835,7 @@ void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
-int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf);
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
--
1.8.3.4
^ permalink raw reply related [flat|nested] 8+ messages in thread* Re: [PATCH net-next 0/6] net/mlx4: Mellanox driver update 07-11-2013
2013-11-07 10:19 [PATCH net-next 0/6] net/mlx4: Mellanox driver update 07-11-2013 Amir Vadai
` (5 preceding siblings ...)
2013-11-07 10:19 ` [PATCH net-next 6/6] net/mlx4_en: Datapath structures are allocated per " Amir Vadai
@ 2013-11-08 0:23 ` David Miller
6 siblings, 0 replies; 8+ messages in thread
From: David Miller @ 2013-11-08 0:23 UTC (permalink / raw)
To: amirv; +Cc: netdev, yevgenyp, ogerlitz
From: Amir Vadai <amirv@mellanox.com>
Date: Thu, 7 Nov 2013 12:19:48 +0200
> This patchset contains some enhancements and bug fixes for the mlx4_* drivers.
> Patchset was applied and tested against commit: "9bb8ca8 virtio-net: switch to
> use XPS to choose txq"
Series applied, thanks Amir.
^ permalink raw reply [flat|nested] 8+ messages in thread