From: Florian Westphal <fw@strlen.de>
To: <netdev@vger.kernel.org>
Cc: Paolo Abeni <pabeni@redhat.com>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>,
<netfilter-devel@vger.kernel.org>,
pablo@netfilter.org
Subject: [PATCH net-next 6/9] ipvs: no_cport and dropentry counters can be per-net
Date: Tue, 24 Feb 2026 21:50:45 +0100 [thread overview]
Message-ID: <20260224205048.4718-7-fw@strlen.de> (raw)
In-Reply-To: <20260224205048.4718-1-fw@strlen.de>
From: Julian Anastasov <ja@ssi.bg>
Change the no_cport counters to be per-net and address family.
This should reduce the extra conn lookups done during present
NO_CPORT connections.
By changing from global to per-net dropentry counters, one net
will not affect the drop rate of another net.
Signed-off-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Florian Westphal <fw@strlen.de>
---
include/net/ip_vs.h | 2 ++
net/netfilter/ipvs/ip_vs_conn.c | 64 ++++++++++++++++++++-------------
2 files changed, 41 insertions(+), 25 deletions(-)
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index f2291be36409..ad8a16146ac5 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -948,6 +948,7 @@ struct netns_ipvs {
#endif
/* ip_vs_conn */
atomic_t conn_count; /* connection counter */
+ atomic_t no_cport_conns[IP_VS_AF_MAX];
/* ip_vs_ctl */
struct ip_vs_stats_rcu *tot_stats; /* Statistics & est. */
@@ -973,6 +974,7 @@ struct netns_ipvs {
int drop_counter;
int old_secure_tcp;
atomic_t dropentry;
+ s8 dropentry_counters[8];
/* locks in ctl.c */
spinlock_t dropentry_lock; /* drop entry handling */
spinlock_t droppacket_lock; /* drop packet handling */
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 50cc492c7553..66057db63d02 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -59,9 +59,6 @@ static struct hlist_head *ip_vs_conn_tab __read_mostly;
/* SLAB cache for IPVS connections */
static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
-/* counter for no client port connections */
-static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
-
/* random value for IPVS connection hash */
static unsigned int ip_vs_conn_rnd __read_mostly;
@@ -294,10 +291,16 @@ struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
struct ip_vs_conn *cp;
cp = __ip_vs_conn_in_get(p);
- if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) {
- struct ip_vs_conn_param cport_zero_p = *p;
- cport_zero_p.cport = 0;
- cp = __ip_vs_conn_in_get(&cport_zero_p);
+ if (!cp) {
+ struct netns_ipvs *ipvs = p->ipvs;
+ int af_id = ip_vs_af_index(p->af);
+
+ if (atomic_read(&ipvs->no_cport_conns[af_id])) {
+ struct ip_vs_conn_param cport_zero_p = *p;
+
+ cport_zero_p.cport = 0;
+ cp = __ip_vs_conn_in_get(&cport_zero_p);
+ }
}
IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
@@ -490,9 +493,12 @@ void ip_vs_conn_put(struct ip_vs_conn *cp)
void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
{
if (ip_vs_conn_unhash(cp)) {
+ struct netns_ipvs *ipvs = cp->ipvs;
+ int af_id = ip_vs_af_index(cp->af);
+
spin_lock_bh(&cp->lock);
if (cp->flags & IP_VS_CONN_F_NO_CPORT) {
- atomic_dec(&ip_vs_conn_no_cport_cnt);
+ atomic_dec(&ipvs->no_cport_conns[af_id]);
cp->flags &= ~IP_VS_CONN_F_NO_CPORT;
cp->cport = cport;
}
@@ -891,8 +897,11 @@ static void ip_vs_conn_expire(struct timer_list *t)
if (unlikely(cp->app != NULL))
ip_vs_unbind_app(cp);
ip_vs_unbind_dest(cp);
- if (cp->flags & IP_VS_CONN_F_NO_CPORT)
- atomic_dec(&ip_vs_conn_no_cport_cnt);
+ if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
+ int af_id = ip_vs_af_index(cp->af);
+
+ atomic_dec(&ipvs->no_cport_conns[af_id]);
+ }
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
ip_vs_conn_rcu_free(&cp->rcu_head);
else
@@ -999,8 +1008,11 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
cp->out_seq.delta = 0;
atomic_inc(&ipvs->conn_count);
- if (flags & IP_VS_CONN_F_NO_CPORT)
- atomic_inc(&ip_vs_conn_no_cport_cnt);
+ if (unlikely(flags & IP_VS_CONN_F_NO_CPORT)) {
+ int af_id = ip_vs_af_index(cp->af);
+
+ atomic_inc(&ipvs->no_cport_conns[af_id]);
+ }
/* Bind the connection with a destination server */
cp->dest = NULL;
@@ -1257,6 +1269,7 @@ static const struct seq_operations ip_vs_conn_sync_seq_ops = {
};
#endif
+#ifdef CONFIG_SYSCTL
/* Randomly drop connection entries before running out of memory
* Can be used for DATA and CTL conns. For TPL conns there are exceptions:
@@ -1266,12 +1279,7 @@ static const struct seq_operations ip_vs_conn_sync_seq_ops = {
*/
static inline int todrop_entry(struct ip_vs_conn *cp)
{
- /*
- * The drop rate array needs tuning for real environments.
- * Called from timer bh only => no locking
- */
- static const signed char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
- static signed char todrop_counter[9] = {0};
+ struct netns_ipvs *ipvs = cp->ipvs;
int i;
/* if the conn entry hasn't lasted for 60 seconds, don't drop it.
@@ -1280,15 +1288,17 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ))
return 0;
- /* Don't drop the entry if its number of incoming packets is not
- located in [0, 8] */
+ /* Drop only conns with number of incoming packets in [1..8] range */
i = atomic_read(&cp->in_pkts);
- if (i > 8 || i < 0) return 0;
+ if (i > 8 || i < 1)
+ return 0;
- if (!todrop_rate[i]) return 0;
- if (--todrop_counter[i] > 0) return 0;
+ i--;
+ if (--ipvs->dropentry_counters[i] > 0)
+ return 0;
- todrop_counter[i] = todrop_rate[i];
+ /* Prefer to drop conns with less number of incoming packets */
+ ipvs->dropentry_counters[i] = i + 1;
return 1;
}
@@ -1368,7 +1378,7 @@ void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
}
rcu_read_unlock();
}
-
+#endif
/*
* Flush all the connection entries in the ip_vs_conn_tab
@@ -1450,7 +1460,11 @@ void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs)
*/
int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs)
{
+ int idx;
+
atomic_set(&ipvs->conn_count, 0);
+ for (idx = 0; idx < IP_VS_AF_MAX; idx++)
+ atomic_set(&ipvs->no_cport_conns[idx], 0);
#ifdef CONFIG_PROC_FS
if (!proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net,
--
2.52.0
next prev parent reply other threads:[~2026-02-24 20:51 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-24 20:50 [PATCH net-next 0/9] netfilter: updates for net-next Florian Westphal
2026-02-24 20:50 ` [PATCH net-next 1/9] ipvs: make ip_vs_svc_table and ip_vs_svc_fwm_table per netns Florian Westphal
2026-02-26 3:41 ` [net-next,1/9] " Jakub Kicinski
2026-02-26 19:19 ` Julian Anastasov
2026-02-24 20:50 ` [PATCH net-next 2/9] ipvs: some service readers can use RCU Florian Westphal
2026-02-24 20:50 ` [PATCH net-next 3/9] ipvs: use single svc table Florian Westphal
2026-02-26 3:41 ` [net-next,3/9] " Jakub Kicinski
2026-02-24 20:50 ` [PATCH net-next 4/9] ipvs: do not keep dest_dst after dest is removed Florian Westphal
2026-02-26 3:41 ` [net-next,4/9] " Jakub Kicinski
2026-02-26 3:44 ` Jakub Kicinski
2026-02-24 20:50 ` [PATCH net-next 5/9] ipvs: use more counters to avoid service lookups Florian Westphal
2026-02-24 20:50 ` Florian Westphal [this message]
2026-02-24 20:50 ` [PATCH net-next 7/9] netfilter: nft_set_rbtree: don't disable bh when acquiring tree lock Florian Westphal
2026-02-24 20:50 ` [PATCH net-next 8/9] netfilter: nf_tables: drop obsolete EXPORT_SYMBOLs Florian Westphal
2026-02-24 20:50 ` [PATCH net-next 9/9] netfilter: nf_tables: remove register tracking infrastructure Florian Westphal
2026-02-26 3:50 ` [PATCH net-next 0/9] netfilter: updates for net-next patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260224205048.4718-7-fw@strlen.de \
--to=fw@strlen.de \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=netfilter-devel@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=pablo@netfilter.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox