* [jkirsher/next-queue PATCH v2 7/7] net: allow fallback function to pass netdev
2018-06-12 15:18 [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue Alexander Duyck
@ 2018-06-12 15:19 ` Alexander Duyck
0 siblings, 0 replies; 9+ messages in thread
From: Alexander Duyck @ 2018-06-12 15:19 UTC (permalink / raw)
To: intel-wired-lan, jeffrey.t.kirsher, netdev
For most of these calls we can just pass NULL through to the fallback
function as the sb_dev. The only cases where we cannot are the cases where
we might be dealing with either an upper device or a driver that would
have configured things to support an sb_dev itself.
The only driver that has any signficant change in this patchset should be
ixgbe as we can drop the redundant functionality that existed in both the
ndo_select_queue function and the fallback function that was passed through
to us.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---
drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 +-
drivers/net/ethernet/broadcom/bcmsysport.c | 4 ++--
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 ++-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +-
drivers/net/ethernet/hisilicon/hns/hns_enet.c | 2 +-
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++--
drivers/net/ethernet/mellanox/mlx4/en_tx.c | 4 ++--
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +-
drivers/net/hyperv/netvsc_drv.c | 2 +-
drivers/net/net_failover.c | 2 +-
drivers/net/xen-netback/interface.c | 2 +-
include/linux/netdevice.h | 3 ++-
net/core/dev.c | 12 +++---------
net/packet/af_packet.c | 7 ++++---
14 files changed, 24 insertions(+), 27 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index e3befb1..c673ac2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2224,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb);
else
- qid = fallback(dev, skb);
+ qid = fallback(dev, skb, NULL);
return qid;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 32f548e..eb890c4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port;
if (!netdev_uses_dsa(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
@@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
return tx_ring->index;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 969dcc9..7a1b99f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1928,7 +1928,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ return fallback(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8de3039..380931d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -972,7 +972,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
- return fallback(dev, skb) % dev->real_num_tx_queues;
+ return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int closest_timer(const struct sge *s, int time)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index c36a231..8327254 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2033,7 +2033,7 @@ static void hns_nic_get_stats64(struct net_device *ndev,
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
- return fallback(ndev, skb);
+ return fallback(ndev, skb, NULL);
}
static const struct net_device_ops hns_nic_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5d9867e..eef64d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8248,11 +8248,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
case htons(ETH_P_FIP):
adapter = netdev_priv(dev);
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
/* fall through */
default:
- return fallback(dev, skb);
+ return fallback(dev, skb, sb_dev);
}
f = &adapter->ring_feature[RING_F_FCOE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index df29966..1857ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -695,9 +695,9 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
- return fallback(dev, skb) % rings_p_up;
+ return fallback(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 0119e86..88c0c85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -115,7 +115,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
select_queue_fallback_t fallback)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int channel_ix = fallback(dev, skb);
+ int channel_ix = fallback(dev, skb, NULL);
u16 num_channels;
int up = 0;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 0a01572..5bc32e7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -344,7 +344,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
sb_dev, fallback);
else
- txq = fallback(vf_netdev, skb);
+ txq = fallback(vf_netdev, skb, NULL);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index b2dc2e7..6f3d143 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -131,7 +131,7 @@ static u16 net_failover_select_queue(struct net_device *dev,
txq = ops->ndo_select_queue(primary_dev, skb,
sb_dev, fallback);
else
- txq = fallback(primary_dev, skb);
+ txq = fallback(primary_dev, skb, NULL);
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 19c4c58..92274c2 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -155,7 +155,7 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int size = vif->hash.size;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
- return fallback(dev, skb) % dev->real_num_tx_queues;
+ return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 94d8f9b..db02e5f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -782,7 +782,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
}
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ struct net_device *sb_dev);
enum tc_setup_type {
TC_SETUP_QDISC_MQPRIO,
diff --git a/net/core/dev.c b/net/core/dev.c
index a78000a..5abf7ee 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3521,8 +3521,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
-static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
@@ -3547,12 +3547,6 @@ static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
return queue_index;
}
-static u16 __netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb)
-{
- return ___netdev_pick_tx(dev, skb, NULL);
-}
-
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev)
@@ -3573,7 +3567,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
__netdev_pick_tx);
else
- queue_index = ___netdev_pick_tx(dev, skb, sb_dev);
+ queue_index = __netdev_pick_tx(dev, skb, sb_dev);
queue_index = netdev_cap_txqueue(dev, queue_index);
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 905f7cd..e24d9b4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -275,9 +275,10 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
return po->xmit == packet_direct_xmit;
}
-static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
{
- return dev_pick_tx_cpu_id(dev, skb, NULL, NULL);
+ return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
}
static u16 packet_pick_tx_queue(struct sk_buff *skb)
@@ -291,7 +292,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb)
__packet_pick_tx_queue);
queue_index = netdev_cap_txqueue(dev, queue_index);
} else {
- queue_index = __packet_pick_tx_queue(dev, skb);
+ queue_index = __packet_pick_tx_queue(dev, skb, NULL);
}
return queue_index;
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue
@ 2018-07-09 16:19 Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 1/7] net-sysfs: Drop support for XPS and traffic_class on single queue device Alexander Duyck
` (6 more replies)
0 siblings, 7 replies; 9+ messages in thread
From: Alexander Duyck @ 2018-07-09 16:19 UTC (permalink / raw)
To: netdev, intel-wired-lan, jeffrey.t.kirsher
This patch series is meant to allow support for the L2 forward offload, aka
MACVLAN offload without the need for using ndo_select_queue.
The existing solution currently requires that we use ndo_select_queue in
the transmit path if we want to associate specific Tx queues with a given
MACVLAN interface. In order to get away from this we need to repurpose the
tc_to_txq array and XPS pointer for the MACVLAN interface and use those as
a means of accessing the queues on the lower device. As a result we cannot
offload a device that is configured as multiqueue, however it doesn't
really make sense to configure a macvlan interfaced as being multiqueue
anyway since it doesn't really have a qdisc of its own in the first place.
I am submitting this as an RFC for the netdev mailing list, and officially
submitting it for testing to Jeff Kirsher's next-queue in order to validate
the ixgbe specific bits.
The big changes in this set are:
Allow lower device to update tc_to_txq and XPS map of offloaded MACVLAN
Disable XPS for single queue devices
Replace accel_priv with sb_dev in ndo_select_queue
Add sb_dev parameter to fallback function for ndo_select_queue
Consolidated ndo_select_queue functions that appeared to be duplicates
v2:
Updated patch set to rebase the netdev_pick_tx logic off of recent Rx
symmetric queue changes.
---
Alexander Duyck (7):
net-sysfs: Drop support for XPS and traffic_class on single queue device
net: Add support for subordinate device traffic classes
ixgbe: Add code to populate and use macvlan tc to Tx queue map
net: Add support for subordinate traffic classes to netdev_pick_tx
net: Add generic ndo_select_queue functions
net: allow ndo_select_queue to pass netdev
net: allow fallback function to pass netdev
drivers/infiniband/hw/hfi1/vnic_main.c | 2
drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c | 4 -
drivers/net/bonding/bond_main.c | 3
drivers/net/ethernet/amazon/ena/ena_netdev.c | 5 -
drivers/net/ethernet/broadcom/bcmsysport.c | 6 -
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 6 +
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 3
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 5 -
drivers/net/ethernet/hisilicon/hns/hns_enet.c | 5 -
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 62 ++++++--
drivers/net/ethernet/lantiq_etop.c | 10 -
drivers/net/ethernet/mellanox/mlx4/en_tx.c | 7 +
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 3
drivers/net/ethernet/mellanox/mlx5/core/en.h | 3
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 5 -
drivers/net/ethernet/renesas/ravb_main.c | 3
drivers/net/ethernet/sun/ldmvsw.c | 3
drivers/net/ethernet/sun/sunvnet.c | 3
drivers/net/ethernet/ti/netcp_core.c | 9 -
drivers/net/hyperv/netvsc_drv.c | 6 -
drivers/net/macvlan.c | 10 -
drivers/net/net_failover.c | 7 +
drivers/net/team/team.c | 3
drivers/net/tun.c | 3
drivers/net/wireless/marvell/mwifiex/main.c | 3
drivers/net/xen-netback/interface.c | 4 -
drivers/net/xen-netfront.c | 3
drivers/staging/netlogic/xlr_net.c | 9 -
drivers/staging/rtl8188eu/os_dep/os_intfs.c | 3
drivers/staging/rtl8723bs/os_dep/os_intfs.c | 7 -
include/linux/netdevice.h | 34 ++++-
net/core/dev.c | 157 ++++++++++++++++++---
net/core/net-sysfs.c | 36 ++++-
net/mac80211/iface.c | 4 -
net/packet/af_packet.c | 7 +
35 files changed, 312 insertions(+), 131 deletions(-)
^ permalink raw reply [flat|nested] 9+ messages in thread
* [jkirsher/next-queue PATCH v2 1/7] net-sysfs: Drop support for XPS and traffic_class on single queue device
2018-07-09 16:19 [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue Alexander Duyck
@ 2018-07-09 16:19 ` Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 2/7] net: Add support for subordinate device traffic classes Alexander Duyck
` (5 subsequent siblings)
6 siblings, 0 replies; 9+ messages in thread
From: Alexander Duyck @ 2018-07-09 16:19 UTC (permalink / raw)
To: netdev, intel-wired-lan, jeffrey.t.kirsher
This patch makes it so that we do not report the traffic class or allow XPS
configuration on single queue devices. This is mostly to avoid unnecessary
complexity with changes I have planned that will allow us to reuse
the unused tc_to_txq and XPS configuration on a single queue device to
allow it to make use of a subset of queues on an underlying device.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---
net/core/net-sysfs.c | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f25ac5f..dce3ae0 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1047,9 +1047,14 @@ static ssize_t traffic_class_show(struct netdev_queue *queue,
char *buf)
{
struct net_device *dev = queue->dev;
- int index = get_netdev_queue_index(queue);
- int tc = netdev_txq_to_tc(dev, index);
+ int index;
+ int tc;
+ if (!netif_is_multiqueue(dev))
+ return -ENOENT;
+
+ index = get_netdev_queue_index(queue);
+ tc = netdev_txq_to_tc(dev, index);
if (tc < 0)
return -EINVAL;
@@ -1214,6 +1219,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
cpumask_var_t mask;
unsigned long index;
+ if (!netif_is_multiqueue(dev))
+ return -ENOENT;
+
index = get_netdev_queue_index(queue);
if (dev->num_tc) {
@@ -1260,6 +1268,9 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
cpumask_var_t mask;
int err;
+ if (!netif_is_multiqueue(dev))
+ return -ENOENT;
+
if (!capable(CAP_NET_ADMIN))
return -EPERM;
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [jkirsher/next-queue PATCH v2 2/7] net: Add support for subordinate device traffic classes
2018-07-09 16:19 [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 1/7] net-sysfs: Drop support for XPS and traffic_class on single queue device Alexander Duyck
@ 2018-07-09 16:19 ` Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 3/7] ixgbe: Add code to populate and use macvlan tc to Tx queue map Alexander Duyck
` (4 subsequent siblings)
6 siblings, 0 replies; 9+ messages in thread
From: Alexander Duyck @ 2018-07-09 16:19 UTC (permalink / raw)
To: netdev, intel-wired-lan, jeffrey.t.kirsher
This patch is meant to provide the basic tools needed to allow us to create
subordinate device traffic classes. The general idea here is to allow
subdividing the queues of a device into queue groups accessible through an
upper device such as a macvlan.
The idea here is to enforce the idea that an upper device has to be a
single queue device, ideally with IFF_NO_QUQUE set. With that being the
case we can pretty much guarantee that the tc_to_txq mappings and XPS maps
for the upper device are unused. As such we could reuse those in order to
support subdividing the lower device and distributing those queues between
the subordinate devices.
In order to distinguish between a regular set of traffic classes and if a
device is carrying subordinate traffic classes I changed num_tc from a u8
to a s16 value and use the negative values to represent the suboordinate
pool values. So starting at -1 and running to -32768 we can encode those as
pool values, and the existing values of 0 to 15 can be maintained.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---
include/linux/netdevice.h | 16 ++++++++
net/core/dev.c | 89 +++++++++++++++++++++++++++++++++++++++++++++
net/core/net-sysfs.c | 21 ++++++++++-
3 files changed, 124 insertions(+), 2 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b683971..4648a9a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -575,6 +575,9 @@ struct netdev_queue {
* (/sys/class/net/DEV/Q/trans_timeout)
*/
unsigned long trans_timeout;
+
+ /* Suboordinate device that the queue has been assigned to */
+ struct net_device *sb_dev;
/*
* write-mostly part
*/
@@ -1991,7 +1994,7 @@ struct net_device {
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- u8 num_tc;
+ s16 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
@@ -2045,6 +2048,17 @@ int netdev_get_num_tc(struct net_device *dev)
return dev->num_tc;
}
+void netdev_unbind_sb_channel(struct net_device *dev,
+ struct net_device *sb_dev);
+int netdev_bind_sb_channel_queue(struct net_device *dev,
+ struct net_device *sb_dev,
+ u8 tc, u16 count, u16 offset);
+int netdev_set_sb_channel(struct net_device *dev, u16 channel);
+static inline int netdev_get_sb_channel(struct net_device *dev)
+{
+ return max_t(int, -dev->num_tc, 0);
+}
+
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
diff --git a/net/core/dev.c b/net/core/dev.c
index 89825c1..cc1d6bb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2067,11 +2067,13 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
int i;
+ /* walk through the TCs and see if it falls into any of them */
for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
if ((txq - tc->offset) < tc->count)
return i;
}
+ /* didn't find it, just return -1 to indicate no match */
return -1;
}
@@ -2260,7 +2262,14 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
unsigned int nr_ids;
if (dev->num_tc) {
+ /* Do not allow XPS on subordinate device directly */
num_tc = dev->num_tc;
+ if (num_tc < 0)
+ return -EINVAL;
+
+ /* If queue belongs to subordinate dev use its map */
+ dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
+
tc = netdev_txq_to_tc(dev, index);
if (tc < 0)
return -EINVAL;
@@ -2448,11 +2457,25 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
EXPORT_SYMBOL(netif_set_xps_queue);
#endif
+static void netdev_unbind_all_sb_channels(struct net_device *dev)
+{
+ struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
+
+ /* Unbind any subordinate channels */
+ while (txq-- != &dev->_tx[0]) {
+ if (txq->sb_dev)
+ netdev_unbind_sb_channel(dev, txq->sb_dev);
+ }
+}
+
void netdev_reset_tc(struct net_device *dev)
{
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, 0);
#endif
+ netdev_unbind_all_sb_channels(dev);
+
+ /* Reset TC configuration of device */
dev->num_tc = 0;
memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
@@ -2481,11 +2504,77 @@ int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, 0);
#endif
+ netdev_unbind_all_sb_channels(dev);
+
dev->num_tc = num_tc;
return 0;
}
EXPORT_SYMBOL(netdev_set_num_tc);
+void netdev_unbind_sb_channel(struct net_device *dev,
+ struct net_device *sb_dev)
+{
+ struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
+
+#ifdef CONFIG_XPS
+ netif_reset_xps_queues_gt(sb_dev, 0);
+#endif
+ memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
+ memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
+
+ while (txq-- != &dev->_tx[0]) {
+ if (txq->sb_dev == sb_dev)
+ txq->sb_dev = NULL;
+ }
+}
+EXPORT_SYMBOL(netdev_unbind_sb_channel);
+
+int netdev_bind_sb_channel_queue(struct net_device *dev,
+ struct net_device *sb_dev,
+ u8 tc, u16 count, u16 offset)
+{
+ /* Make certain the sb_dev and dev are already configured */
+ if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
+ return -EINVAL;
+
+ /* We cannot hand out queues we don't have */
+ if ((offset + count) > dev->real_num_tx_queues)
+ return -EINVAL;
+
+ /* Record the mapping */
+ sb_dev->tc_to_txq[tc].count = count;
+ sb_dev->tc_to_txq[tc].offset = offset;
+
+ /* Provide a way for Tx queue to find the tc_to_txq map or
+ * XPS map for itself.
+ */
+ while (count--)
+ netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
+
+ return 0;
+}
+EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
+
+int netdev_set_sb_channel(struct net_device *dev, u16 channel)
+{
+ /* Do not use a multiqueue device to represent a subordinate channel */
+ if (netif_is_multiqueue(dev))
+ return -ENODEV;
+
+ /* We allow channels 1 - 32767 to be used for subordinate channels.
+ * Channel 0 is meant to be "native" mode and used only to represent
+ * the main root device. We allow writing 0 to reset the device back
+ * to normal mode after being used as a subordinate channel.
+ */
+ if (channel > S16_MAX)
+ return -EINVAL;
+
+ dev->num_tc = -channel;
+
+ return 0;
+}
+EXPORT_SYMBOL(netdev_set_sb_channel);
+
/*
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
* greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index dce3ae0..91184d5 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1054,11 +1054,23 @@ static ssize_t traffic_class_show(struct netdev_queue *queue,
return -ENOENT;
index = get_netdev_queue_index(queue);
+
+ /* If queue belongs to subordinate dev use its tc mapping */
+ dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
+
tc = netdev_txq_to_tc(dev, index);
if (tc < 0)
return -EINVAL;
- return sprintf(buf, "%u\n", tc);
+ /* We can report the traffic class one of two ways:
+ * Subordinate device traffic classes are reported with the traffic
+ * class first, and then the subordinate class so for example TC0 on
+ * subordinate device 2 will be reported as "0-2". If the queue
+ * belongs to the root device it will be reported with just the
+ * traffic class, so just "0" for TC 0 for example.
+ */
+ return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) :
+ sprintf(buf, "%u\n", tc);
}
#ifdef CONFIG_XPS
@@ -1225,7 +1237,14 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
index = get_netdev_queue_index(queue);
if (dev->num_tc) {
+ /* Do not allow XPS on subordinate device directly */
num_tc = dev->num_tc;
+ if (num_tc < 0)
+ return -EINVAL;
+
+ /* If queue belongs to subordinate dev use its map */
+ dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
+
tc = netdev_txq_to_tc(dev, index);
if (tc < 0)
return -EINVAL;
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [jkirsher/next-queue PATCH v2 3/7] ixgbe: Add code to populate and use macvlan tc to Tx queue map
2018-07-09 16:19 [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 1/7] net-sysfs: Drop support for XPS and traffic_class on single queue device Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 2/7] net: Add support for subordinate device traffic classes Alexander Duyck
@ 2018-07-09 16:19 ` Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 4/7] net: Add support for subordinate traffic classes to netdev_pick_tx Alexander Duyck
` (3 subsequent siblings)
6 siblings, 0 replies; 9+ messages in thread
From: Alexander Duyck @ 2018-07-09 16:19 UTC (permalink / raw)
To: netdev, intel-wired-lan, jeffrey.t.kirsher
This patch makes it so that we use the tc_to_txq mapping in the macvlan
device in order to select the Tx queue for outgoing packets.
The idea here is to try and move away from using ixgbe_select_queue and to
come up with a generic way to make this work for devices going forward. By
encoding this information in the netdev this can become something that can
be used generically as a solution for similar setups going forward.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 44 ++++++++++++++++++++++---
1 file changed, 38 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c265963..3ff34ca 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5275,6 +5275,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
struct ixgbe_fwd_adapter *accel)
{
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ int num_tc = netdev_get_num_tc(adapter->netdev);
struct net_device *vdev = accel->netdev;
int i, baseq, err;
@@ -5286,6 +5288,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
accel->rx_base_queue = baseq;
accel->tx_base_queue = baseq;
+ /* record configuration for macvlan interface in vdev */
+ for (i = 0; i < num_tc; i++)
+ netdev_bind_sb_channel_queue(adapter->netdev, vdev,
+ i, rss_i, baseq + (rss_i * i));
+
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = vdev;
@@ -5310,6 +5317,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(adapter->netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
@@ -8206,18 +8217,22 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
- struct ixgbe_adapter *adapter;
- int txq;
#ifdef IXGBE_FCOE
+ struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
#endif
+ int txq;
if (fwd_adapter) {
- adapter = netdev_priv(dev);
- txq = reciprocal_scale(skb_get_hash(skb),
- adapter->num_rx_queues_per_pool);
+ u8 tc = netdev_get_num_tc(dev) ?
+ netdev_get_prio_tc_map(dev, skb->priority) : 0;
+ struct net_device *vdev = fwd_adapter->netdev;
+
+ txq = vdev->tc_to_txq[tc].offset;
+ txq += reciprocal_scale(skb_get_hash(skb),
+ vdev->tc_to_txq[tc].count);
- return txq + fwd_adapter->tx_base_queue;
+ return txq;
}
#ifdef IXGBE_FCOE
@@ -8771,6 +8786,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
/* if we cannot find a free pool then disable the offload */
netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
macvlan_release_l2fw_offload(vdev);
+
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(adapter->netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
kfree(accel);
return 0;
@@ -9779,6 +9799,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (!macvlan_supports_dest_filter(vdev))
return ERR_PTR(-EMEDIUMTYPE);
+ /* We need to lock down the macvlan to be a single queue device so that
+ * we can reuse the tc_to_txq field in the macvlan netdev to represent
+ * the queue mapping to our netdev.
+ */
+ if (netif_is_multiqueue(vdev))
+ return ERR_PTR(-ERANGE);
+
pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
if (pool == adapter->num_rx_pools) {
u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
@@ -9835,6 +9862,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
return ERR_PTR(-ENOMEM);
set_bit(pool, adapter->fwd_bitmask);
+ netdev_set_sb_channel(vdev, pool);
accel->pool = pool;
accel->netdev = vdev;
@@ -9876,6 +9904,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
ring->netdev = NULL;
}
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(pdev, accel->netdev);
+ netdev_set_sb_channel(accel->netdev, 0);
+
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
}
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [jkirsher/next-queue PATCH v2 4/7] net: Add support for subordinate traffic classes to netdev_pick_tx
2018-07-09 16:19 [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue Alexander Duyck
` (2 preceding siblings ...)
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 3/7] ixgbe: Add code to populate and use macvlan tc to Tx queue map Alexander Duyck
@ 2018-07-09 16:19 ` Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 5/7] net: Add generic ndo_select_queue functions Alexander Duyck
` (2 subsequent siblings)
6 siblings, 0 replies; 9+ messages in thread
From: Alexander Duyck @ 2018-07-09 16:19 UTC (permalink / raw)
To: netdev, intel-wired-lan, jeffrey.t.kirsher
This change makes it so that we can support the concept of subordinate
device traffic classes to the core networking code. In doing this we can
start pulling out the driver specific bits needed to support selecting a
queue based on an upper device.
The solution at is currently stands is only partially implemented. I have
the start of some XPS bits in here, but I would still need to allow for
configuration of the XPS maps on the queues reserved for the subordinate
devices. For now I am using the reference to the sb_dev XPS map as just a
way to skip the lookup of the lower device XPS map for now as that would
result in the wrong queue being picked.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 19 +++-----
drivers/net/macvlan.c | 10 +---
include/linux/netdevice.h | 4 +-
net/core/dev.c | 58 +++++++++++++++----------
4 files changed, 45 insertions(+), 46 deletions(-)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3ff34ca..41ef58f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8213,20 +8213,17 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
input, common, ring->queue_index);
}
+#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
- struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
-#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
-#endif
int txq;
- if (fwd_adapter) {
- u8 tc = netdev_get_num_tc(dev) ?
- netdev_get_prio_tc_map(dev, skb->priority) : 0;
- struct net_device *vdev = fwd_adapter->netdev;
+ if (accel_priv) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+ struct net_device *vdev = accel_priv;
txq = vdev->tc_to_txq[tc].offset;
txq += reciprocal_scale(skb_get_hash(skb),
@@ -8235,8 +8232,6 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
-#ifdef IXGBE_FCOE
-
/*
* only execute the code below if protocol is FCoE
* or FIP and we have FCoE enabled on the adapter
@@ -8262,11 +8257,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
txq -= f->indices;
return txq + f->offset;
-#else
- return fallback(dev, skb);
-#endif
}
+#endif
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
struct xdp_frame *xdpf)
{
@@ -10068,7 +10061,6 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
- .ndo_select_queue = ixgbe_select_queue,
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -10091,6 +10083,7 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
.ndo_poll_controller = ixgbe_netpoll,
#endif
#ifdef IXGBE_FCOE
+ .ndo_select_queue = ixgbe_select_queue,
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index adde8fc..401e1d1 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -514,7 +514,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
- void *accel_priv = NULL;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
@@ -533,15 +532,10 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
return NET_XMIT_SUCCESS;
}
}
-
- /* For packets that are non-multicast and not bridged we will pass
- * the necessary information so that the lowerdev can distinguish
- * the source of the packets via the accel_priv value.
- */
- accel_priv = vlan->accel_priv;
xmit_world:
skb->dev = vlan->lowerdev;
- return dev_queue_xmit_accel(skb, accel_priv);
+ return dev_queue_xmit_accel(skb,
+ netdev_get_sb_channel(dev) ? dev : NULL);
}
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 4648a9a..5729bc80 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2103,7 +2103,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv);
+ struct net_device *sb_dev);
/* returns the headroom that the master device needs to take in account
* when forwarding to this dev
@@ -2568,7 +2568,7 @@ struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
int dev_queue_xmit(struct sk_buff *skb);
-int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
+int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
diff --git a/net/core/dev.c b/net/core/dev.c
index cc1d6bb..09a7cc2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2786,24 +2786,26 @@ void netif_device_attach(struct net_device *dev)
* Returns a Tx hash based on the given packet descriptor a Tx queues' number
* to be used as a distribution range.
*/
-static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb)
+static u16 skb_tx_hash(const struct net_device *dev,
+ const struct net_device *sb_dev,
+ struct sk_buff *skb)
{
u32 hash;
u16 qoffset = 0;
u16 qcount = dev->real_num_tx_queues;
+ if (dev->num_tc) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+
+ qoffset = sb_dev->tc_to_txq[tc].offset;
+ qcount = sb_dev->tc_to_txq[tc].count;
+ }
+
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
while (unlikely(hash >= qcount))
hash -= qcount;
- return hash;
- }
-
- if (dev->num_tc) {
- u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
-
- qoffset = dev->tc_to_txq[tc].offset;
- qcount = dev->tc_to_txq[tc].count;
+ return hash + qoffset;
}
return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
@@ -3573,7 +3575,8 @@ static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
}
#endif
-static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
+ struct sk_buff *skb)
{
#ifdef CONFIG_XPS
struct xps_dev_maps *dev_maps;
@@ -3587,7 +3590,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
if (!static_key_false(&xps_rxqs_needed))
goto get_cpus_map;
- dev_maps = rcu_dereference(dev->xps_rxqs_map);
+ dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
if (dev_maps) {
int tci = sk_rx_queue_get(sk);
@@ -3598,7 +3601,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
get_cpus_map:
if (queue_index < 0) {
- dev_maps = rcu_dereference(dev->xps_cpus_map);
+ dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
if (dev_maps) {
unsigned int tci = skb->sender_cpu - 1;
@@ -3614,17 +3617,20 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
#endif
}
-static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
+ sb_dev = sb_dev ? : dev;
+
if (queue_index < 0 || skb->ooo_okay ||
queue_index >= dev->real_num_tx_queues) {
- int new_index = get_xps_queue(dev, skb);
+ int new_index = get_xps_queue(dev, sb_dev, skb);
if (new_index < 0)
- new_index = skb_tx_hash(dev, skb);
+ new_index = skb_tx_hash(dev, sb_dev, skb);
if (queue_index != new_index && sk &&
sk_fullsock(sk) &&
@@ -3637,9 +3643,15 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
return queue_index;
}
+static u16 __netdev_pick_tx(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return ___netdev_pick_tx(dev, skb, NULL);
+}
+
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv)
+ struct net_device *sb_dev)
{
int queue_index = 0;
@@ -3654,10 +3666,10 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
- queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
+ queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
__netdev_pick_tx);
else
- queue_index = __netdev_pick_tx(dev, skb);
+ queue_index = ___netdev_pick_tx(dev, skb, sb_dev);
queue_index = netdev_cap_txqueue(dev, queue_index);
}
@@ -3669,7 +3681,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
/**
* __dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
- * @accel_priv: private data used for L2 forwarding offload
+ * @sb_dev: suboordinate device used for L2 forwarding offload
*
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
@@ -3692,7 +3704,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
-static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
@@ -3731,7 +3743,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
else
skb_dst_force(skb);
- txq = netdev_pick_tx(dev, skb, accel_priv);
+ txq = netdev_pick_tx(dev, skb, sb_dev);
q = rcu_dereference_bh(txq->qdisc);
trace_net_dev_queue(skb);
@@ -3805,9 +3817,9 @@ int dev_queue_xmit(struct sk_buff *skb)
}
EXPORT_SYMBOL(dev_queue_xmit);
-int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
+int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
{
- return __dev_queue_xmit(skb, accel_priv);
+ return __dev_queue_xmit(skb, sb_dev);
}
EXPORT_SYMBOL(dev_queue_xmit_accel);
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [jkirsher/next-queue PATCH v2 5/7] net: Add generic ndo_select_queue functions
2018-07-09 16:19 [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue Alexander Duyck
` (3 preceding siblings ...)
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 4/7] net: Add support for subordinate traffic classes to netdev_pick_tx Alexander Duyck
@ 2018-07-09 16:19 ` Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 6/7] net: allow ndo_select_queue to pass netdev Alexander Duyck
2018-07-09 16:20 ` [jkirsher/next-queue PATCH v2 7/7] net: allow fallback function " Alexander Duyck
6 siblings, 0 replies; 9+ messages in thread
From: Alexander Duyck @ 2018-07-09 16:19 UTC (permalink / raw)
To: netdev, intel-wired-lan, jeffrey.t.kirsher
This patch adds a generic version of the ndo_select_queue functions for
either returning 0 or selecting a queue based on the processor ID. This is
generally meant to just reduce the number of functions we have to change
in the future when we have to deal with ndo_select_queue changes.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---
drivers/net/ethernet/lantiq_etop.c | 10 +---------
drivers/net/ethernet/ti/netcp_core.c | 9 +--------
drivers/staging/netlogic/xlr_net.c | 9 +--------
include/linux/netdevice.h | 4 ++++
net/core/dev.c | 14 ++++++++++++++
net/packet/af_packet.c | 2 +-
6 files changed, 22 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index afc8100..7a637b5 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -563,14 +563,6 @@ struct ltq_etop_priv {
spin_unlock_irqrestore(&priv->lock, flags);
}
-static u16
-ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
-{
- /* we are currently only using the first queue */
- return 0;
-}
-
static int
ltq_etop_init(struct net_device *dev)
{
@@ -641,7 +633,7 @@ struct ltq_etop_priv {
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
- .ndo_select_queue = ltq_etop_select_queue,
+ .ndo_select_queue = dev_pick_tx_zero,
.ndo_init = ltq_etop_init,
.ndo_tx_timeout = ltq_etop_tx_timeout,
};
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 6ebf110..a1d335a 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
return err;
}
-static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback)
-{
- return 0;
-}
-
static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -1972,7 +1965,7 @@ static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
.ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
.ndo_tx_timeout = netcp_ndo_tx_timeout,
- .ndo_select_queue = netcp_select_queue,
+ .ndo_select_queue = dev_pick_tx_zero,
.ndo_setup_tc = netcp_setup_tc,
};
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index e461168..4e6611e 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -290,13 +290,6 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback)
-{
- return (u16)smp_processor_id();
-}
-
static void xlr_hw_set_mac_addr(struct net_device *ndev)
{
struct xlr_net_priv *priv = netdev_priv(ndev);
@@ -403,7 +396,7 @@ static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
.ndo_open = xlr_net_open,
.ndo_stop = xlr_net_stop,
.ndo_start_xmit = xlr_net_start_xmit,
- .ndo_select_queue = xlr_net_select_queue,
+ .ndo_select_queue = dev_pick_tx_cpu_id,
.ndo_set_mac_address = xlr_net_set_mac_addr,
.ndo_set_rx_mode = xlr_set_rx_mode,
.ndo_get_stats64 = xlr_stats,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5729bc80..2e056bc 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2567,6 +2567,10 @@ struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
+u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
+u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
diff --git a/net/core/dev.c b/net/core/dev.c
index 09a7cc2..b5e5380 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3617,6 +3617,20 @@ static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
#endif
}
+u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ return 0;
+}
+EXPORT_SYMBOL(dev_pick_tx_zero);
+
+u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
+}
+EXPORT_SYMBOL(dev_pick_tx_cpu_id);
+
static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 47931eb..f37d087 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -277,7 +277,7 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
{
- return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
+ return dev_pick_tx_cpu_id(dev, skb, NULL, NULL);
}
static u16 packet_pick_tx_queue(struct sk_buff *skb)
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [jkirsher/next-queue PATCH v2 6/7] net: allow ndo_select_queue to pass netdev
2018-07-09 16:19 [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue Alexander Duyck
` (4 preceding siblings ...)
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 5/7] net: Add generic ndo_select_queue functions Alexander Duyck
@ 2018-07-09 16:19 ` Alexander Duyck
2018-07-09 16:20 ` [jkirsher/next-queue PATCH v2 7/7] net: allow fallback function " Alexander Duyck
6 siblings, 0 replies; 9+ messages in thread
From: Alexander Duyck @ 2018-07-09 16:19 UTC (permalink / raw)
To: netdev, intel-wired-lan, jeffrey.t.kirsher
This patch makes it so that instead of passing a void pointer as the
accel_priv we instead pass a net_device pointer as sb_dev. Making this
change allows us to pass the subordinate device through to the fallback
function eventually so that we can keep the actual code in the
ndo_select_queue call as focused on possible on the exception cases.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---
drivers/infiniband/hw/hfi1/vnic_main.c | 2 +-
drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c | 4 ++--
drivers/net/bonding/bond_main.c | 3 ++-
drivers/net/ethernet/amazon/ena/ena_netdev.c | 3 ++-
drivers/net/ethernet/broadcom/bcmsysport.c | 2 +-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 ++-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 3 ++-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3 ++-
drivers/net/ethernet/hisilicon/hns/hns_enet.c | 3 ++-
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 ++++---
drivers/net/ethernet/mellanox/mlx4/en_tx.c | 3 ++-
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 3 ++-
drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 ++-
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 3 ++-
drivers/net/ethernet/renesas/ravb_main.c | 3 ++-
drivers/net/ethernet/sun/ldmvsw.c | 3 ++-
drivers/net/ethernet/sun/sunvnet.c | 3 ++-
drivers/net/hyperv/netvsc_drv.c | 4 ++--
drivers/net/net_failover.c | 5 +++--
drivers/net/team/team.c | 3 ++-
drivers/net/tun.c | 3 ++-
drivers/net/wireless/marvell/mwifiex/main.c | 3 ++-
drivers/net/xen-netback/interface.c | 2 +-
drivers/net/xen-netfront.c | 3 ++-
drivers/staging/rtl8188eu/os_dep/os_intfs.c | 3 ++-
drivers/staging/rtl8723bs/os_dep/os_intfs.c | 7 +++----
include/linux/netdevice.h | 11 +++++++----
net/core/dev.c | 6 ++++--
net/mac80211/iface.c | 4 ++--
29 files changed, 66 insertions(+), 42 deletions(-)
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index 5d65582..616fc9b 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -423,7 +423,7 @@ static netdev_tx_t hfi1_netdev_start_xmit(struct sk_buff *skb,
static u16 hfi1_vnic_select_queue(struct net_device *netdev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index 0c8aec6..6155878 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -95,7 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
}
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
@@ -107,7 +107,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
- accel_priv, fallback);
+ sb_dev, fallback);
skb_pull(skb, sizeof(*mdata));
return rc;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 63e3844..9a2ea3c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4094,7 +4094,8 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f2af87d..e3befb1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev)
#endif /* CONFIG_NET_POLL_CONTROLLER */
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
u16 qid;
/* we suspect that this is good for in--kernel network services that
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index d5fca2e..32f548e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2107,7 +2107,7 @@ static int bcm_sysport_stop(struct net_device *dev)
};
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index af7b5a4..e4e1cf9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1910,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a8ce5c5..0e508e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d91716..5dc5e56 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -930,7 +930,8 @@ static int setup_sge_queues(struct adapter *adap)
}
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
int txq;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ef9ef70..ff7a74e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev,
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 41ef58f..a0cf33d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8215,15 +8215,16 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
int txq;
- if (accel_priv) {
+ if (sb_dev) {
u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
- struct net_device *vdev = accel_priv;
+ struct net_device *vdev = sb_dev;
txq = vdev->tc_to_txq[tc].offset;
txq += reciprocal_scale(skb_get_hash(skb),
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0227786..df29966 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -688,7 +688,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index ace6545..c3228b8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -699,7 +699,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index e2b7586..e1b237c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -865,7 +865,8 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f0739da..dfcc371 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -111,7 +111,8 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
#endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 68f1221..4a7f54c 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1656,7 +1656,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index a5dd627..d42f47f 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
}
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct vnet_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index a94f504..12539b3 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
}
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index dd1d6e1..98c0107 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -329,7 +329,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct net_device_context *ndc = netdev_priv(ndev);
@@ -343,7 +343,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
if (vf_ops->ndo_select_queue)
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
- accel_priv, fallback);
+ sb_dev, fallback);
else
txq = fallback(vf_netdev, skb);
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 4f390fa..78b5496 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -115,7 +115,8 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
}
static u16 net_failover_select_queue(struct net_device *dev,
- struct sk_buff *skb, void *accel_priv,
+ struct sk_buff *skb,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct net_failover_info *nfo_info = netdev_priv(dev);
@@ -128,7 +129,7 @@ static u16 net_failover_select_queue(struct net_device *dev,
if (ops->ndo_select_queue)
txq = ops->ndo_select_queue(primary_dev, skb,
- accel_priv, fallback);
+ sb_dev, fallback);
else
txq = fallback(primary_dev, skb);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b070959..3a95eaa 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1707,7 +1707,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a192a01..76f0f41 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -607,7 +607,8 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
}
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct tun_struct *tun = netdev_priv(dev);
u16 ret;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 510f6b8..fa3e8dd 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1279,7 +1279,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 78ebe49..19c4c58 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -148,7 +148,7 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct xenvif *vif = netdev_priv(dev);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index a57daec..d67cd37 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -545,7 +545,8 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
}
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
unsigned int num_queues = dev->real_num_tx_queues;
u32 hash;
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index add1ba0..38e85c8 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -253,7 +253,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index ace68f0..1816423 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -403,10 +403,9 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
-static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb
- , void *accel_priv
- , select_queue_fallback_t fallback
-)
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2e056bc..0d3df30 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -957,7 +957,8 @@ struct dev_ifalias {
* those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- * void *accel_priv, select_queue_fallback_t fallback);
+ * struct net_device *sb_dev,
+ * select_queue_fallback_t fallback);
* Called to decide which queue to use when device supports multiple
* transmit queues.
*
@@ -1229,7 +1230,7 @@ struct net_device_ops {
netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
@@ -2568,9 +2569,11 @@ struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
diff --git a/net/core/dev.c b/net/core/dev.c
index b5e5380..a051ce2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3618,14 +3618,16 @@ static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
}
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
return 0;
}
EXPORT_SYMBOL(dev_pick_tx_zero);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 555e389..5e6cf2c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1130,7 +1130,7 @@ static void ieee80211_uninit(struct net_device *dev)
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
@@ -1176,7 +1176,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [jkirsher/next-queue PATCH v2 7/7] net: allow fallback function to pass netdev
2018-07-09 16:19 [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue Alexander Duyck
` (5 preceding siblings ...)
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 6/7] net: allow ndo_select_queue to pass netdev Alexander Duyck
@ 2018-07-09 16:20 ` Alexander Duyck
6 siblings, 0 replies; 9+ messages in thread
From: Alexander Duyck @ 2018-07-09 16:20 UTC (permalink / raw)
To: netdev, intel-wired-lan, jeffrey.t.kirsher
For most of these calls we can just pass NULL through to the fallback
function as the sb_dev. The only cases where we cannot are the cases where
we might be dealing with either an upper device or a driver that would
have configured things to support an sb_dev itself.
The only driver that has any signficant change in this patchset should be
ixgbe as we can drop the redundant functionality that existed in both the
ndo_select_queue function and the fallback function that was passed through
to us.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---
drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 +-
drivers/net/ethernet/broadcom/bcmsysport.c | 4 ++--
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 ++-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +-
drivers/net/ethernet/hisilicon/hns/hns_enet.c | 2 +-
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++--
drivers/net/ethernet/mellanox/mlx4/en_tx.c | 4 ++--
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +-
drivers/net/hyperv/netvsc_drv.c | 2 +-
drivers/net/net_failover.c | 2 +-
drivers/net/xen-netback/interface.c | 2 +-
include/linux/netdevice.h | 3 ++-
net/core/dev.c | 12 +++---------
net/packet/af_packet.c | 7 ++++---
14 files changed, 24 insertions(+), 27 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index e3befb1..c673ac2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2224,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb);
else
- qid = fallback(dev, skb);
+ qid = fallback(dev, skb, NULL);
return qid;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 32f548e..eb890c4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port;
if (!netdev_uses_dsa(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
@@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
return tx_ring->index;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e4e1cf9..5a727d4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1933,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ return fallback(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 5dc5e56..40cf8dc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -973,7 +973,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
- return fallback(dev, skb) % dev->real_num_tx_queues;
+ return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int closest_timer(const struct sge *s, int time)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ff7a74e..948b3e0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2033,7 +2033,7 @@ static void hns_nic_get_stats64(struct net_device *ndev,
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
- return fallback(ndev, skb);
+ return fallback(ndev, skb, NULL);
}
static const struct net_device_ops hns_nic_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a0cf33d..bdaecae 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8242,11 +8242,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
case htons(ETH_P_FIP):
adapter = netdev_priv(dev);
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
/* fall through */
default:
- return fallback(dev, skb);
+ return fallback(dev, skb, sb_dev);
}
f = &adapter->ring_feature[RING_F_FCOE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index df29966..1857ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -695,9 +695,9 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
- return fallback(dev, skb) % rings_p_up;
+ return fallback(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index dfcc371..9106ea4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -115,7 +115,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
select_queue_fallback_t fallback)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int channel_ix = fallback(dev, skb);
+ int channel_ix = fallback(dev, skb, NULL);
u16 num_channels;
int up = 0;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 98c0107..cf4f40a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -345,7 +345,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
sb_dev, fallback);
else
- txq = fallback(vf_netdev, skb);
+ txq = fallback(vf_netdev, skb, NULL);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 78b5496..d00d42c 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -131,7 +131,7 @@ static u16 net_failover_select_queue(struct net_device *dev,
txq = ops->ndo_select_queue(primary_dev, skb,
sb_dev, fallback);
else
- txq = fallback(primary_dev, skb);
+ txq = fallback(primary_dev, skb, NULL);
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 19c4c58..92274c2 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -155,7 +155,7 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int size = vif->hash.size;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
- return fallback(dev, skb) % dev->real_num_tx_queues;
+ return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0d3df30..6bb01c7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -793,7 +793,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
}
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ struct net_device *sb_dev);
enum tc_setup_type {
TC_SETUP_QDISC_MQPRIO,
diff --git a/net/core/dev.c b/net/core/dev.c
index a051ce2..e18d818 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3633,8 +3633,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
-static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
@@ -3659,12 +3659,6 @@ static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
return queue_index;
}
-static u16 __netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb)
-{
- return ___netdev_pick_tx(dev, skb, NULL);
-}
-
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev)
@@ -3685,7 +3679,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
__netdev_pick_tx);
else
- queue_index = ___netdev_pick_tx(dev, skb, sb_dev);
+ queue_index = __netdev_pick_tx(dev, skb, sb_dev);
queue_index = netdev_cap_txqueue(dev, queue_index);
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f37d087..00189a3 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -275,9 +275,10 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
return po->xmit == packet_direct_xmit;
}
-static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
{
- return dev_pick_tx_cpu_id(dev, skb, NULL, NULL);
+ return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
}
static u16 packet_pick_tx_queue(struct sk_buff *skb)
@@ -291,7 +292,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb)
__packet_pick_tx_queue);
queue_index = netdev_cap_txqueue(dev, queue_index);
} else {
- queue_index = __packet_pick_tx_queue(dev, skb);
+ queue_index = __packet_pick_tx_queue(dev, skb, NULL);
}
return queue_index;
^ permalink raw reply related [flat|nested] 9+ messages in thread
end of thread, other threads:[~2018-07-09 16:29 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-07-09 16:19 [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 1/7] net-sysfs: Drop support for XPS and traffic_class on single queue device Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 2/7] net: Add support for subordinate device traffic classes Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 3/7] ixgbe: Add code to populate and use macvlan tc to Tx queue map Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 4/7] net: Add support for subordinate traffic classes to netdev_pick_tx Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 5/7] net: Add generic ndo_select_queue functions Alexander Duyck
2018-07-09 16:19 ` [jkirsher/next-queue PATCH v2 6/7] net: allow ndo_select_queue to pass netdev Alexander Duyck
2018-07-09 16:20 ` [jkirsher/next-queue PATCH v2 7/7] net: allow fallback function " Alexander Duyck
-- strict thread matches above, loose matches on Subject: below --
2018-06-12 15:18 [jkirsher/next-queue PATCH v2 0/7] Add support for L2 Fwd Offload w/o ndo_select_queue Alexander Duyck
2018-06-12 15:19 ` [jkirsher/next-queue PATCH v2 7/7] net: allow fallback function to pass netdev Alexander Duyck
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).