netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Alexis Bauvin <abauvin@scaleway.com>
To: dsa@cumulusnetworks.com, roopa@cumulusnetworks.com
Cc: netdev@vger.kernel.org, abauvin@scaleway.com, akherbouche@scaleway.com
Subject: [RFC v2 3/3] vxlan: handle underlay VRF changes
Date: Mon, 19 Nov 2018 18:19:29 +0100	[thread overview]
Message-ID: <20181119171929.69743-4-abauvin@scaleway.com> (raw)
In-Reply-To: <20181119171929.69743-1-abauvin@scaleway.com>

When underlay VRF changes, either because the lower device itself changed,
or its VRF changed, this patch releases the current socket of the VXLAN
device and recreates another one in the right VRF. This allows for
on-the-fly change of the underlay VRF of a VXLAN device.

Signed-off-by: Alexis Bauvin <abauvin@scaleway.com>
Reviewed-by: Amine Kherbouche <akherbouche@scaleway.com>
Tested-by: Amine Kherbouche <akherbouche@scaleway.com>
---
 drivers/net/vxlan.c | 94 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 94 insertions(+)

diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a3de08122269..13ed9569ec79 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -208,6 +208,18 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
 	return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
 }
 
+static int vxlan_is_in_l3mdev_chain(struct net_device *chain,
+				    struct net_device *dev)
+{
+	if (!chain)
+		return 0;
+
+	if (chain->ifindex == dev->ifindex)
+		return 1;
+	return vxlan_is_in_l3mdev_chain(netdev_master_upper_dev_get(chain),
+					dev);
+}
+
 /* Find VXLAN socket based on network namespace, address family and UDP port
  * and enabled unshareable flags.
  */
@@ -3720,6 +3732,33 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
 }
 EXPORT_SYMBOL_GPL(vxlan_dev_create);
 
+static int vxlan_reopen(struct vxlan_net *vn, struct vxlan_dev *vxlan)
+{
+	int ret = 0;
+
+	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+	    !vxlan_group_used(vn, vxlan))
+		ret = vxlan_igmp_leave(vxlan);
+	vxlan_sock_release(vxlan);
+
+	if (ret < 0)
+		return ret;
+
+	ret = vxlan_sock_add(vxlan);
+	if (ret < 0)
+		return ret;
+
+	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
+		ret = vxlan_igmp_join(vxlan);
+		if (ret == -EADDRINUSE)
+			ret = 0;
+		if (ret)
+			vxlan_sock_release(vxlan);
+	}
+
+	return ret;
+}
+
 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
 					     struct net_device *dev)
 {
@@ -3742,6 +3781,55 @@ static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
 	unregister_netdevice_many(&list_kill);
 }
 
+static void vxlan_handle_change_upper(struct vxlan_net *vn,
+				      struct net_device *dev)
+{
+	struct vxlan_dev *vxlan, *next;
+
+	list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
+		struct net_device *lower;
+		int err;
+
+		lower = __dev_get_by_index(vxlan->net,
+					   vxlan->cfg.remote_ifindex);
+		if (!vxlan_is_in_l3mdev_chain(lower, dev))
+			continue;
+
+		err = vxlan_reopen(vn, vxlan);
+		if (err < 0)
+			netdev_err(vxlan->dev, "Failed to reopen socket: %d\n",
+				   err);
+	}
+}
+
+static void vxlan_handle_change(struct vxlan_net *vn, struct net_device *dev)
+{
+	struct vxlan_dev *vxlan = netdev_priv(dev);
+	struct vxlan_sock *sock;
+	int l3mdev_index;
+
+#if IS_ENABLED(CONFIG_IPV6)
+	bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
+	bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata;
+#else
+	bool ipv6 = false;
+#endif
+
+	l3mdev_index =
+		l3mdev_master_upper_ifindex_by_index(vxlan->net,
+						     vxlan->cfg.remote_ifindex);
+
+	sock = ipv6 ? rcu_dereference(vxlan->vn6_sock)
+		    : rcu_dereference(vxlan->vn4_sock);
+	if (sock->sock->sk->sk_bound_dev_if != l3mdev_index) {
+		int ret = vxlan_reopen(vn, vxlan);
+
+		if (ret < 0)
+			netdev_err(vxlan->dev, "Failed to reopen socket: %d\n",
+				   ret);
+	}
+}
+
 static int vxlan_netdevice_event(struct notifier_block *unused,
 				 unsigned long event, void *ptr)
 {
@@ -3756,6 +3844,12 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
 	} else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
 		   event == NETDEV_UDP_TUNNEL_DROP_INFO) {
 		vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
+	} else if (event == NETDEV_CHANGEUPPER) {
+		vxlan_handle_change_upper(vn, dev);
+	} else if (event == NETDEV_CHANGE) {
+		if (dev->rtnl_link_ops &&
+		    !strcmp(dev->rtnl_link_ops->kind, vxlan_link_ops.kind))
+			vxlan_handle_change(vn, dev);
 	}
 
 	return NOTIFY_DONE;
-- 

      parent reply	other threads:[~2018-11-20  3:44 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-19 17:19 [RFC v2 0/3] Add VRF support for VXLAN underlay Alexis Bauvin
2018-11-19 17:19 ` [RFC v2 1/3] udp_tunnel: add config option to bind to a device Alexis Bauvin
2018-11-19 17:19 ` [RFC v2 2/3] vxlan: add support for underlay in non-default VRF Alexis Bauvin
2018-11-19 17:19 ` Alexis Bauvin [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181119171929.69743-4-abauvin@scaleway.com \
    --to=abauvin@scaleway.com \
    --cc=akherbouche@scaleway.com \
    --cc=dsa@cumulusnetworks.com \
    --cc=netdev@vger.kernel.org \
    --cc=roopa@cumulusnetworks.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).