netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ido Schimmel <idosch@nvidia.com>
To: <netdev@vger.kernel.org>
Cc: <davem@davemloft.net>, <kuba@kernel.org>, <pabeni@redhat.com>,
	<edumazet@google.com>, <andrew+netdev@lunn.ch>,
	<horms@kernel.org>, <petrm@nvidia.com>, <razor@blackwall.org>,
	Ido Schimmel <idosch@nvidia.com>
Subject: [PATCH net-next 08/15] vxlan: Use linked list to traverse FDB entries
Date: Tue, 15 Apr 2025 15:11:36 +0300	[thread overview]
Message-ID: <20250415121143.345227-9-idosch@nvidia.com> (raw)
In-Reply-To: <20250415121143.345227-1-idosch@nvidia.com>

In preparation for removing the fixed size hash table, convert FDB entry
traversal to use the newly added FDB linked list.

No functional changes intended.

Reviewed-by: Petr Machata <petrm@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
---
 drivers/net/vxlan/vxlan_core.c | 172 ++++++++++++++-------------------
 1 file changed, 75 insertions(+), 97 deletions(-)

diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 511c24e29d45..f9840a4b6e44 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -517,7 +517,6 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
 	struct vxlan_dev *vxlan;
 	struct vxlan_rdst *rdst;
 	struct vxlan_fdb *f;
-	unsigned int h;
 	int rc = 0;
 
 	if (!netif_is_vxlan(dev))
@@ -525,16 +524,13 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
 	vxlan = netdev_priv(dev);
 
 	spin_lock_bh(&vxlan->hash_lock);
-	for (h = 0; h < FDB_HASH_SIZE; ++h) {
-		hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) {
-			if (f->vni == vni) {
-				list_for_each_entry(rdst, &f->remotes, list) {
-					rc = vxlan_fdb_notify_one(nb, vxlan,
-								  f, rdst,
-								  extack);
-					if (rc)
-						goto unlock;
-				}
+	hlist_for_each_entry(f, &vxlan->fdb_list, fdb_node) {
+		if (f->vni == vni) {
+			list_for_each_entry(rdst, &f->remotes, list) {
+				rc = vxlan_fdb_notify_one(nb, vxlan, f, rdst,
+							  extack);
+				if (rc)
+					goto unlock;
 			}
 		}
 	}
@@ -552,18 +548,17 @@ void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
 	struct vxlan_dev *vxlan;
 	struct vxlan_rdst *rdst;
 	struct vxlan_fdb *f;
-	unsigned int h;
 
 	if (!netif_is_vxlan(dev))
 		return;
 	vxlan = netdev_priv(dev);
 
 	spin_lock_bh(&vxlan->hash_lock);
-	for (h = 0; h < FDB_HASH_SIZE; ++h) {
-		hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist)
-			if (f->vni == vni)
-				list_for_each_entry(rdst, &f->remotes, list)
-					rdst->offloaded = false;
+	hlist_for_each_entry(f, &vxlan->fdb_list, fdb_node) {
+		if (f->vni == vni) {
+			list_for_each_entry(rdst, &f->remotes, list)
+				rdst->offloaded = false;
+		}
 	}
 	spin_unlock_bh(&vxlan->hash_lock);
 
@@ -1351,52 +1346,46 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
 {
 	struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
 	struct vxlan_dev *vxlan = netdev_priv(dev);
-	unsigned int h;
+	struct vxlan_fdb *f;
 	int err = 0;
 
-	for (h = 0; h < FDB_HASH_SIZE; ++h) {
-		struct vxlan_fdb *f;
-
-		rcu_read_lock();
-		hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
-			struct vxlan_rdst *rd;
-
-			if (rcu_access_pointer(f->nh)) {
-				if (*idx < ctx->fdb_idx)
-					goto skip_nh;
-				err = vxlan_fdb_info(skb, vxlan, f,
-						     NETLINK_CB(cb->skb).portid,
-						     cb->nlh->nlmsg_seq,
-						     RTM_NEWNEIGH,
-						     NLM_F_MULTI, NULL);
-				if (err < 0) {
-					rcu_read_unlock();
-					goto out;
-				}
-skip_nh:
-				*idx += 1;
-				continue;
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) {
+		struct vxlan_rdst *rd;
+
+		if (rcu_access_pointer(f->nh)) {
+			if (*idx < ctx->fdb_idx)
+				goto skip_nh;
+			err = vxlan_fdb_info(skb, vxlan, f,
+					     NETLINK_CB(cb->skb).portid,
+					     cb->nlh->nlmsg_seq,
+					     RTM_NEWNEIGH, NLM_F_MULTI, NULL);
+			if (err < 0) {
+				rcu_read_unlock();
+				goto out;
 			}
+skip_nh:
+			*idx += 1;
+			continue;
+		}
 
-			list_for_each_entry_rcu(rd, &f->remotes, list) {
-				if (*idx < ctx->fdb_idx)
-					goto skip;
-
-				err = vxlan_fdb_info(skb, vxlan, f,
-						     NETLINK_CB(cb->skb).portid,
-						     cb->nlh->nlmsg_seq,
-						     RTM_NEWNEIGH,
-						     NLM_F_MULTI, rd);
-				if (err < 0) {
-					rcu_read_unlock();
-					goto out;
-				}
-skip:
-				*idx += 1;
+		list_for_each_entry_rcu(rd, &f->remotes, list) {
+			if (*idx < ctx->fdb_idx)
+				goto skip;
+
+			err = vxlan_fdb_info(skb, vxlan, f,
+					     NETLINK_CB(cb->skb).portid,
+					     cb->nlh->nlmsg_seq,
+					     RTM_NEWNEIGH, NLM_F_MULTI, rd);
+			if (err < 0) {
+				rcu_read_unlock();
+				goto out;
 			}
+skip:
+			*idx += 1;
 		}
-		rcu_read_unlock();
 	}
+	rcu_read_unlock();
 out:
 	return err;
 }
@@ -2830,35 +2819,30 @@ static void vxlan_cleanup(struct timer_list *t)
 {
 	struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
 	unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
-	unsigned int h;
+	struct hlist_node *n;
+	struct vxlan_fdb *f;
 
 	if (!netif_running(vxlan->dev))
 		return;
 
 	spin_lock(&vxlan->hash_lock);
-	for (h = 0; h < FDB_HASH_SIZE; ++h) {
-		struct hlist_node *p, *n;
-
-		hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
-			struct vxlan_fdb *f
-				= container_of(p, struct vxlan_fdb, hlist);
-			unsigned long timeout;
+	hlist_for_each_entry_safe(f, n, &vxlan->fdb_list, fdb_node) {
+		unsigned long timeout;
 
-			if (f->state & (NUD_PERMANENT | NUD_NOARP))
-				continue;
+		if (f->state & (NUD_PERMANENT | NUD_NOARP))
+			continue;
 
-			if (f->flags & NTF_EXT_LEARNED)
-				continue;
+		if (f->flags & NTF_EXT_LEARNED)
+			continue;
 
-			timeout = READ_ONCE(f->updated) + vxlan->cfg.age_interval * HZ;
-			if (time_before_eq(timeout, jiffies)) {
-				netdev_dbg(vxlan->dev,
-					   "garbage collect %pM\n",
-					   f->eth_addr);
-				f->state = NUD_STALE;
-				vxlan_fdb_destroy(vxlan, f, true, true);
-			} else if (time_before(timeout, next_timer))
-				next_timer = timeout;
+		timeout = READ_ONCE(f->updated) + vxlan->cfg.age_interval * HZ;
+		if (time_before_eq(timeout, jiffies)) {
+			netdev_dbg(vxlan->dev, "garbage collect %pM\n",
+				   f->eth_addr);
+			f->state = NUD_STALE;
+			vxlan_fdb_destroy(vxlan, f, true, true);
+		} else if (time_before(timeout, next_timer)) {
+			next_timer = timeout;
 		}
 	}
 	spin_unlock(&vxlan->hash_lock);
@@ -3050,31 +3034,25 @@ static void vxlan_flush(struct vxlan_dev *vxlan,
 			const struct vxlan_fdb_flush_desc *desc)
 {
 	bool match_remotes = vxlan_fdb_flush_should_match_remotes(desc);
-	unsigned int h;
+	struct hlist_node *n;
+	struct vxlan_fdb *f;
 
 	spin_lock_bh(&vxlan->hash_lock);
-	for (h = 0; h < FDB_HASH_SIZE; ++h) {
-		struct hlist_node *p, *n;
-
-		hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
-			struct vxlan_fdb *f
-				= container_of(p, struct vxlan_fdb, hlist);
-
-			if (!vxlan_fdb_flush_matches(f, vxlan, desc))
-				continue;
-
-			if (match_remotes) {
-				bool destroy_fdb = false;
+	hlist_for_each_entry_safe(f, n, &vxlan->fdb_list, fdb_node) {
+		if (!vxlan_fdb_flush_matches(f, vxlan, desc))
+			continue;
 
-				vxlan_fdb_flush_match_remotes(f, vxlan, desc,
-							      &destroy_fdb);
+		if (match_remotes) {
+			bool destroy_fdb = false;
 
-				if (!destroy_fdb)
-					continue;
-			}
+			vxlan_fdb_flush_match_remotes(f, vxlan, desc,
+						      &destroy_fdb);
 
-			vxlan_fdb_destroy(vxlan, f, true, true);
+			if (!destroy_fdb)
+				continue;
 		}
+
+		vxlan_fdb_destroy(vxlan, f, true, true);
 	}
 	spin_unlock_bh(&vxlan->hash_lock);
 }
@@ -4860,7 +4838,7 @@ static void vxlan_fdb_nh_flush(struct nexthop *nh)
 		vxlan = rcu_dereference(fdb->vdev);
 		WARN_ON(!vxlan);
 		spin_lock_bh(&vxlan->hash_lock);
-		if (!hlist_unhashed(&fdb->hlist))
+		if (!hlist_unhashed(&fdb->fdb_node))
 			vxlan_fdb_destroy(vxlan, fdb, false, false);
 		spin_unlock_bh(&vxlan->hash_lock);
 	}
-- 
2.49.0


  parent reply	other threads:[~2025-04-15 12:12 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-15 12:11 [PATCH net-next 00/15] vxlan: Convert FDB table to rhashtable Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 01/15] vxlan: Add RCU read-side critical sections in the Tx path Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 02/15] vxlan: Simplify creation of default FDB entry Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 03/15] vxlan: Insert FDB into hash table in vxlan_fdb_create() Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 04/15] vxlan: Unsplit default FDB entry creation and notification Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 05/15] vxlan: Relocate assignment of default remote device Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 06/15] vxlan: Use a single lock to protect the FDB table Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 07/15] vxlan: Add a linked list of FDB entries Ido Schimmel
2025-04-15 12:11 ` Ido Schimmel [this message]
2025-04-15 12:11 ` [PATCH net-next 09/15] vxlan: Convert FDB garbage collection to RCU Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 10/15] vxlan: Convert FDB flushing " Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 11/15] vxlan: Rename FDB Tx lookup function Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 12/15] vxlan: Create wrappers for FDB lookup Ido Schimmel
2025-04-22  8:46   ` Paolo Abeni
2025-04-23 12:21     ` Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 13/15] vxlan: Do not treat dst cache initialization errors as fatal Ido Schimmel
2025-04-22  8:49   ` Paolo Abeni
2025-04-24  8:18     ` Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 14/15] vxlan: Introduce FDB key structure Ido Schimmel
2025-04-15 12:11 ` [PATCH net-next 15/15] vxlan: Convert FDB table to rhashtable Ido Schimmel
2025-04-15 14:15 ` [PATCH net-next 00/15] " Nikolay Aleksandrov
2025-04-22  9:38 ` patchwork-bot+netdevbpf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250415121143.345227-9-idosch@nvidia.com \
    --to=idosch@nvidia.com \
    --cc=andrew+netdev@lunn.ch \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=horms@kernel.org \
    --cc=kuba@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=petrm@nvidia.com \
    --cc=razor@blackwall.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).