netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Shannon Nelson <shannon.nelson@oracle.com>
To: netdev@vger.kernel.org
Cc: silviu.smarandache@oracle.com
Subject: [PATCH RFC net-next] net: enable RPS on vlan devices
Date: Mon, 24 Sep 2018 11:01:00 -0700	[thread overview]
Message-ID: <1537812060-22388-1-git-send-email-shannon.nelson@oracle.com> (raw)

This patch modifies the RPS processing code so that it searches
for a matching vlan interface on the packet and then uses the
RPS settings of the vlan interface.  If no vlan interface
is found or the vlan interface does not have RPS enabled,
it will fall back to the RPS settings of the underlying device.

In supporting VMs where we can't control the OS being used,
we'd like to separate the VM cpu processing from the host's
cpus as a way to help mitigate the impact of the L1TF issue.
When running the VM's traffic on a vlan we can stick the Rx
processing on one set of cpus separate from the VM's cpus.
Yes, choosing to use this may cause a bit of throughput pain
when the packets are actually passed into the VM and have to
move from one cache to another.

Orabug: 28645929

Signed-off-by: Silviu Smarandache <silviu.smarandache@oracle.com>
Signed-off-by: Shannon Nelson <shannon.nelson@oracle.com>
---
 net/core/dev.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 56 insertions(+), 3 deletions(-)

diff --git a/net/core/dev.c b/net/core/dev.c
index 559a912..28b12d1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3735,8 +3735,8 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  * CPU from the RPS map of the receiving queue for a given skb.
  * rcu_read_lock must be held on entry.
  */
-static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
-		       struct rps_dev_flow **rflowp)
+static int __get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+			 struct rps_dev_flow **rflowp)
 {
 	const struct rps_sock_flow_table *sock_flow_table;
 	struct netdev_rx_queue *rxqueue = dev->_rx;
@@ -3830,6 +3830,35 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
 	return cpu;
 }
 
+static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+		       struct rps_dev_flow **rflowp)
+{
+	/* Check for a vlan device with RPS settings */
+	if (skb_vlan_tag_present(skb)) {
+		struct net_device *vdev;
+		u16 vid;
+
+		vid = skb_vlan_tag_get_id(skb);
+		vdev = __vlan_find_dev_deep_rcu(dev, skb->vlan_proto, vid);
+		if (vdev) {
+			/* recorded queue is not referring to the vlan device.
+			 * Save and restore it
+			 */
+			int cpu;
+			u16 queue_mapping = skb_get_queue_mapping(skb);
+
+			skb_set_queue_mapping(skb, 0);
+			cpu = __get_rps_cpu(vdev, skb, rflowp);
+			skb_set_queue_mapping(skb, queue_mapping);
+			if (cpu != -1)
+				return cpu;
+		}
+	}
+
+	/* Fall back to RPS settings of original device */
+	return __get_rps_cpu(dev, skb, rflowp);
+}
+
 #ifdef CONFIG_RFS_ACCEL
 
 /**
@@ -4201,12 +4230,23 @@ static int netif_rx_internal(struct sk_buff *skb)
 		preempt_disable();
 		rcu_read_lock();
 
+		/* strip any vlan tag before calling get_rps_cpu() */
+		if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+		    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+			skb = skb_vlan_untag(skb);
+			if (unlikely(!skb)) {
+				ret = NET_RX_DROP;
+				goto unlock;
+			}
+		}
+
 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
 		if (cpu < 0)
 			cpu = smp_processor_id();
 
 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
 
+unlock:
 		rcu_read_unlock();
 		preempt_enable();
 	} else
@@ -4755,8 +4795,19 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
 #ifdef CONFIG_RPS
 	if (static_key_false(&rps_needed)) {
 		struct rps_dev_flow voidflow, *rflow = &voidflow;
-		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
+		int cpu;
+
+		/* strip any vlan tag before calling get_rps_cpu() */
+		if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+		    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+			skb = skb_vlan_untag(skb);
+			if (unlikely(!skb)) {
+				ret = NET_RX_DROP;
+				goto out;
+			}
+		}
 
+		cpu = get_rps_cpu(skb->dev, skb, &rflow);
 		if (cpu >= 0) {
 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
 			rcu_read_unlock();
@@ -4765,6 +4816,8 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
 	}
 #endif
 	ret = __netif_receive_skb(skb);
+
+out:
 	rcu_read_unlock();
 	return ret;
 }
-- 
2.7.4

                 reply	other threads:[~2018-09-25  0:04 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1537812060-22388-1-git-send-email-shannon.nelson@oracle.com \
    --to=shannon.nelson@oracle.com \
    --cc=netdev@vger.kernel.org \
    --cc=silviu.smarandache@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).