netfilter-devel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Pablo Neira Ayuso <pablo@netfilter.org>
To: Julian Anastasov <ja@ssi.bg>
Cc: Simon Horman <horms@verge.net.au>,
	lvs-devel@vger.kernel.org, netfilter-devel@vger.kernel.org,
	Dust Li <dust.li@linux.alibaba.com>,
	Jiejian Wu <jiejian@linux.alibaba.com>,
	rcu@vger.kernel.org
Subject: Re: [PATCHv6 net-next 13/14] ipvs: add ip_vs_status info
Date: Mon, 24 Nov 2025 22:42:06 +0100	[thread overview]
Message-ID: <aSTRLowH5pHe-IvC@calendula> (raw)
In-Reply-To: <20251019155711.67609-14-ja@ssi.bg>

On Sun, Oct 19, 2025 at 06:57:10PM +0300, Julian Anastasov wrote:
> Add /proc/net/ip_vs_status to show current state of IPVS.

The motivation for this new /proc interface is to provide the output
for the users to help them decide when to shrink or grow the
hashtable, which is possible with the new sysctl knobs coming in 14/14
in this series.

> Signed-off-by: Julian Anastasov <ja@ssi.bg>
> ---
>  net/netfilter/ipvs/ip_vs_ctl.c | 145 +++++++++++++++++++++++++++++++++
>  1 file changed, 145 insertions(+)
> 
> diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
> index 3dfc01ef1890..a508e9bdde73 100644
> --- a/net/netfilter/ipvs/ip_vs_ctl.c
> +++ b/net/netfilter/ipvs/ip_vs_ctl.c
> @@ -2915,6 +2915,144 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
>  
>  	return 0;
>  }
> +
> +static int ip_vs_status_show(struct seq_file *seq, void *v)
> +{
> +	struct net *net = seq_file_single_net(seq);
> +	struct netns_ipvs *ipvs = net_ipvs(net);
> +	unsigned int resched_score = 0;
> +	struct ip_vs_conn_hnode *hn;
> +	struct hlist_bl_head *head;
> +	struct ip_vs_service *svc;
> +	struct ip_vs_rht *t, *pt;
> +	struct hlist_bl_node *e;
> +	int old_gen, new_gen;
> +	u32 counts[8];
> +	u32 bucket;
> +	int count;
> +	u32 sum1;
> +	u32 sum;
> +	int i;
> +
> +	rcu_read_lock();
> +
> +	t = rcu_dereference(ipvs->conn_tab);
> +
> +	seq_printf(seq, "Conns:\t%d\n", atomic_read(&ipvs->conn_count));
> +	seq_printf(seq, "Conn buckets:\t%d (%d bits, lfactor %d)\n",
> +		   t ? t->size : 0, t ? t->bits : 0, t ? t->lfactor : 0);
> +
> +	if (!atomic_read(&ipvs->conn_count))
> +		goto after_conns;
> +	old_gen = atomic_read(&ipvs->conn_tab_changes);
> +
> +repeat_conn:
> +	smp_rmb(); /* ipvs->conn_tab and conn_tab_changes */
> +	memset(counts, 0, sizeof(counts));
> +	ip_vs_rht_for_each_table_rcu(ipvs->conn_tab, t, pt) {
> +		for (bucket = 0; bucket < t->size; bucket++) {
> +			DECLARE_IP_VS_RHT_WALK_BUCKET_RCU();
> +
> +			count = 0;
> +			resched_score++;
> +			ip_vs_rht_walk_bucket_rcu(t, bucket, head) {
> +				count = 0;
> +				hlist_bl_for_each_entry_rcu(hn, e, head, node)
> +					count++;
> +			}
> +			resched_score += count;
> +			if (resched_score >= 100) {
> +				resched_score = 0;
> +				cond_resched_rcu();
> +				new_gen = atomic_read(&ipvs->conn_tab_changes);
> +				/* New table installed ? */
> +				if (old_gen != new_gen) {
> +					old_gen = new_gen;
> +					goto repeat_conn;
> +				}
> +			}
> +			counts[min(count, (int)ARRAY_SIZE(counts) - 1)]++;
> +		}
> +	}
> +	for (sum = 0, i = 0; i < ARRAY_SIZE(counts); i++)
> +		sum += counts[i];
> +	sum1 = sum - counts[0];
> +	seq_printf(seq, "Conn buckets empty:\t%u (%lu%%)\n",
> +		   counts[0], (unsigned long)counts[0] * 100 / max(sum, 1U));
> +	for (i = 1; i < ARRAY_SIZE(counts); i++) {
> +		if (!counts[i])
> +			continue;
> +		seq_printf(seq, "Conn buckets len-%d:\t%u (%lu%%)\n",
> +			   i, counts[i],
> +			   (unsigned long)counts[i] * 100 / max(sum1, 1U));
> +	}
> +
> +after_conns:
> +	t = rcu_dereference(ipvs->svc_table);
> +
> +	count = ip_vs_get_num_services(ipvs);
> +	seq_printf(seq, "Services:\t%d\n", count);
> +	seq_printf(seq, "Service buckets:\t%d (%d bits, lfactor %d)\n",
> +		   t ? t->size : 0, t ? t->bits : 0, t ? t->lfactor : 0);
> +
> +	if (!count)
> +		goto after_svc;
> +	old_gen = atomic_read(&ipvs->svc_table_changes);
> +
> +repeat_svc:
> +	smp_rmb(); /* ipvs->svc_table and svc_table_changes */
> +	memset(counts, 0, sizeof(counts));
> +	ip_vs_rht_for_each_table_rcu(ipvs->svc_table, t, pt) {
> +		for (bucket = 0; bucket < t->size; bucket++) {
> +			DECLARE_IP_VS_RHT_WALK_BUCKET_RCU();
> +
> +			count = 0;
> +			resched_score++;
> +			ip_vs_rht_walk_bucket_rcu(t, bucket, head) {
> +				count = 0;
> +				hlist_bl_for_each_entry_rcu(svc, e, head,
> +							    s_list)
> +					count++;
> +			}
> +			resched_score += count;
> +			if (resched_score >= 100) {
> +				resched_score = 0;
> +				cond_resched_rcu();
> +				new_gen = atomic_read(&ipvs->svc_table_changes);
> +				/* New table installed ? */
> +				if (old_gen != new_gen) {
> +					old_gen = new_gen;
> +					goto repeat_svc;
> +				}
> +			}
> +			counts[min(count, (int)ARRAY_SIZE(counts) - 1)]++;
> +		}
> +	}
> +	for (sum = 0, i = 0; i < ARRAY_SIZE(counts); i++)
> +		sum += counts[i];
> +	sum1 = sum - counts[0];
> +	seq_printf(seq, "Service buckets empty:\t%u (%lu%%)\n",
> +		   counts[0], (unsigned long)counts[0] * 100 / max(sum, 1U));
> +	for (i = 1; i < ARRAY_SIZE(counts); i++) {
> +		if (!counts[i])
> +			continue;
> +		seq_printf(seq, "Service buckets len-%d:\t%u (%lu%%)\n",
> +			   i, counts[i],
> +			   (unsigned long)counts[i] * 100 / max(sum1, 1U));
> +	}
> +
> +after_svc:
> +	seq_printf(seq, "Stats thread slots:\t%d (max %lu)\n",
> +		   ipvs->est_kt_count, ipvs->est_max_threads);
> +	seq_printf(seq, "Stats chain max len:\t%d\n", ipvs->est_chain_max);
> +	seq_printf(seq, "Stats thread ests:\t%d\n",
> +		   ipvs->est_chain_max * IPVS_EST_CHAIN_FACTOR *
> +		   IPVS_EST_NTICKS);
> +
> +	rcu_read_unlock();
> +	return 0;
> +}
> +
>  #endif
>  
>  /*
> @@ -4835,6 +4973,9 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
>  				    ipvs->net->proc_net,
>  				    ip_vs_stats_percpu_show, NULL))
>  		goto err_percpu;
> +	if (!proc_create_net_single("ip_vs_status", 0, ipvs->net->proc_net,
> +				    ip_vs_status_show, NULL))
> +		goto err_status;
>  #endif
>  
>  	ret = ip_vs_control_net_init_sysctl(ipvs);
> @@ -4845,6 +4986,9 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
>  
>  err:
>  #ifdef CONFIG_PROC_FS
> +	remove_proc_entry("ip_vs_status", ipvs->net->proc_net);
> +
> +err_status:
>  	remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
>  
>  err_percpu:
> @@ -4870,6 +5014,7 @@ void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
>  	ip_vs_control_net_cleanup_sysctl(ipvs);
>  	cancel_delayed_work_sync(&ipvs->est_reload_work);
>  #ifdef CONFIG_PROC_FS
> +	remove_proc_entry("ip_vs_status", ipvs->net->proc_net);
>  	remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
>  	remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
>  	remove_proc_entry("ip_vs", ipvs->net->proc_net);
> -- 
> 2.51.0
> 
> 
> 

  reply	other threads:[~2025-11-24 21:42 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-19 15:56 [PATCHv6 net-next 00/14] ipvs: per-net tables and optimizations Julian Anastasov
2025-10-19 15:56 ` [PATCHv6 net-next 01/14] rculist_bl: add hlist_bl_for_each_entry_continue_rcu Julian Anastasov
2025-10-23 11:44   ` Florian Westphal
2025-10-23 13:33     ` Julian Anastasov
2025-10-19 15:56 ` [PATCHv6 net-next 02/14] ipvs: make ip_vs_svc_table and ip_vs_svc_fwm_table per netns Julian Anastasov
2025-10-19 15:57 ` [PATCHv6 net-next 03/14] ipvs: some service readers can use RCU Julian Anastasov
2025-10-24  2:21   ` Dust Li
2025-11-24 21:00   ` Pablo Neira Ayuso
2025-10-19 15:57 ` [PATCHv6 net-next 04/14] ipvs: use single svc table Julian Anastasov
2025-11-24 21:07   ` Pablo Neira Ayuso
2025-10-19 15:57 ` [PATCHv6 net-next 05/14] ipvs: do not keep dest_dst after dest is removed Julian Anastasov
2025-10-19 15:57 ` [PATCHv6 net-next 06/14] ipvs: use more counters to avoid service lookups Julian Anastasov
2025-10-19 15:57 ` [PATCHv6 net-next 07/14] ipvs: add resizable hash tables Julian Anastasov
2025-11-24 21:16   ` Pablo Neira Ayuso
2025-10-19 15:57 ` [PATCHv6 net-next 08/14] ipvs: use resizable hash table for services Julian Anastasov
2025-10-19 15:57 ` [PATCHv6 net-next 09/14] ipvs: switch to per-net connection table Julian Anastasov
2025-10-19 15:57 ` [PATCHv6 net-next 10/14] ipvs: show the current conn_tab size to users Julian Anastasov
2025-11-24 21:21   ` Pablo Neira Ayuso
2025-10-19 15:57 ` [PATCHv6 net-next 11/14] ipvs: no_cport and dropentry counters can be per-net Julian Anastasov
2025-11-24 21:29   ` Pablo Neira Ayuso
2025-10-19 15:57 ` [PATCHv6 net-next 12/14] ipvs: use more keys for connection hashing Julian Anastasov
2025-10-19 15:57 ` [PATCHv6 net-next 13/14] ipvs: add ip_vs_status info Julian Anastasov
2025-11-24 21:42   ` Pablo Neira Ayuso [this message]
2025-10-19 15:57 ` [PATCHv6 net-next 14/14] ipvs: add conn_lfactor and svc_lfactor sysctl vars Julian Anastasov
2025-11-24 21:46 ` [PATCHv6 net-next 00/14] ipvs: per-net tables and optimizations Pablo Neira Ayuso

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aSTRLowH5pHe-IvC@calendula \
    --to=pablo@netfilter.org \
    --cc=dust.li@linux.alibaba.com \
    --cc=horms@verge.net.au \
    --cc=ja@ssi.bg \
    --cc=jiejian@linux.alibaba.com \
    --cc=lvs-devel@vger.kernel.org \
    --cc=netfilter-devel@vger.kernel.org \
    --cc=rcu@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).