From: Julian Anastasov <ja@ssi.bg>
To: Simon Horman <horms@verge.net.au>
Cc: lvs-devel@vger.kernel.org, netfilter-devel@vger.kernel.org,
Dust Li <dust.li@linux.alibaba.com>,
Jiejian Wu <jiejian@linux.alibaba.com>,
rcu@vger.kernel.org
Subject: [PATCHv4 net-next 13/14] ipvs: add ip_vs_status info
Date: Tue, 28 May 2024 11:02:33 +0300 [thread overview]
Message-ID: <20240528080234.10148-14-ja@ssi.bg> (raw)
In-Reply-To: <20240528080234.10148-1-ja@ssi.bg>
Add /proc/net/ip_vs_status to show current state of IPVS.
Signed-off-by: Julian Anastasov <ja@ssi.bg>
---
net/netfilter/ipvs/ip_vs_ctl.c | 145 +++++++++++++++++++++++++++++++++
1 file changed, 145 insertions(+)
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 187a5e238231..6b84b6f17a32 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2913,6 +2913,144 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
return 0;
}
+
+static int ip_vs_status_show(struct seq_file *seq, void *v)
+{
+ struct net *net = seq_file_single_net(seq);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ unsigned int resched_score = 0;
+ struct ip_vs_conn_hnode *hn;
+ struct hlist_bl_head *head;
+ struct ip_vs_service *svc;
+ struct ip_vs_rht *t, *pt;
+ struct hlist_bl_node *e;
+ int old_gen, new_gen;
+ u32 counts[8];
+ u32 bucket;
+ int count;
+ u32 sum1;
+ u32 sum;
+ int i;
+
+ rcu_read_lock();
+
+ t = rcu_dereference(ipvs->conn_tab);
+
+ seq_printf(seq, "Conns:\t%d\n", atomic_read(&ipvs->conn_count));
+ seq_printf(seq, "Conn buckets:\t%d (%d bits, lfactor %d)\n",
+ t ? t->size : 0, t ? t->bits : 0, t ? t->lfactor : 0);
+
+ if (!atomic_read(&ipvs->conn_count))
+ goto after_conns;
+ old_gen = atomic_read(&ipvs->conn_tab_changes);
+
+repeat_conn:
+ smp_rmb(); /* ipvs->conn_tab and conn_tab_changes */
+ memset(counts, 0, sizeof(counts));
+ ip_vs_rht_for_each_table_rcu(ipvs->conn_tab, t, pt) {
+ for (bucket = 0; bucket < t->size; bucket++) {
+ DECLARE_IP_VS_RHT_WALK_BUCKET_RCU();
+
+ count = 0;
+ resched_score++;
+ ip_vs_rht_walk_bucket_rcu(t, bucket, head) {
+ count = 0;
+ hlist_bl_for_each_entry_rcu(hn, e, head, node)
+ count++;
+ }
+ resched_score += count;
+ if (resched_score >= 100) {
+ resched_score = 0;
+ cond_resched_rcu();
+ new_gen = atomic_read(&ipvs->conn_tab_changes);
+ /* New table installed ? */
+ if (old_gen != new_gen) {
+ old_gen = new_gen;
+ goto repeat_conn;
+ }
+ }
+ counts[min(count, (int)ARRAY_SIZE(counts) - 1)]++;
+ }
+ }
+ for (sum = 0, i = 0; i < ARRAY_SIZE(counts); i++)
+ sum += counts[i];
+ sum1 = sum - counts[0];
+ seq_printf(seq, "Conn buckets empty:\t%u (%lu%%)\n",
+ counts[0], (unsigned long)counts[0] * 100 / max(sum, 1U));
+ for (i = 1; i < ARRAY_SIZE(counts); i++) {
+ if (!counts[i])
+ continue;
+ seq_printf(seq, "Conn buckets len-%d:\t%u (%lu%%)\n",
+ i, counts[i],
+ (unsigned long)counts[i] * 100 / max(sum1, 1U));
+ }
+
+after_conns:
+ t = rcu_dereference(ipvs->svc_table);
+
+ count = ip_vs_get_num_services(ipvs);
+ seq_printf(seq, "Services:\t%d\n", count);
+ seq_printf(seq, "Service buckets:\t%d (%d bits, lfactor %d)\n",
+ t ? t->size : 0, t ? t->bits : 0, t ? t->lfactor : 0);
+
+ if (!count)
+ goto after_svc;
+ old_gen = atomic_read(&ipvs->svc_table_changes);
+
+repeat_svc:
+ smp_rmb(); /* ipvs->svc_table and svc_table_changes */
+ memset(counts, 0, sizeof(counts));
+ ip_vs_rht_for_each_table_rcu(ipvs->svc_table, t, pt) {
+ for (bucket = 0; bucket < t->size; bucket++) {
+ DECLARE_IP_VS_RHT_WALK_BUCKET_RCU();
+
+ count = 0;
+ resched_score++;
+ ip_vs_rht_walk_bucket_rcu(t, bucket, head) {
+ count = 0;
+ hlist_bl_for_each_entry_rcu(svc, e, head,
+ s_list)
+ count++;
+ }
+ resched_score += count;
+ if (resched_score >= 100) {
+ resched_score = 0;
+ cond_resched_rcu();
+ new_gen = atomic_read(&ipvs->svc_table_changes);
+ /* New table installed ? */
+ if (old_gen != new_gen) {
+ old_gen = new_gen;
+ goto repeat_svc;
+ }
+ }
+ counts[min(count, (int)ARRAY_SIZE(counts) - 1)]++;
+ }
+ }
+ for (sum = 0, i = 0; i < ARRAY_SIZE(counts); i++)
+ sum += counts[i];
+ sum1 = sum - counts[0];
+ seq_printf(seq, "Service buckets empty:\t%u (%lu%%)\n",
+ counts[0], (unsigned long)counts[0] * 100 / max(sum, 1U));
+ for (i = 1; i < ARRAY_SIZE(counts); i++) {
+ if (!counts[i])
+ continue;
+ seq_printf(seq, "Service buckets len-%d:\t%u (%lu%%)\n",
+ i, counts[i],
+ (unsigned long)counts[i] * 100 / max(sum1, 1U));
+ }
+
+after_svc:
+ seq_printf(seq, "Stats thread slots:\t%d (max %lu)\n",
+ ipvs->est_kt_count, ipvs->est_max_threads);
+ seq_printf(seq, "Stats chain max len:\t%d\n", ipvs->est_chain_max);
+ seq_printf(seq, "Stats thread ests:\t%d\n",
+ ipvs->est_chain_max * IPVS_EST_CHAIN_FACTOR *
+ IPVS_EST_NTICKS);
+
+ rcu_read_unlock();
+ return 0;
+}
+
#endif
/*
@@ -4836,6 +4974,9 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
ipvs->net->proc_net,
ip_vs_stats_percpu_show, NULL))
goto err_percpu;
+ if (!proc_create_net_single("ip_vs_status", 0, ipvs->net->proc_net,
+ ip_vs_status_show, NULL))
+ goto err_status;
#endif
ret = ip_vs_control_net_init_sysctl(ipvs);
@@ -4846,6 +4987,9 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
err:
#ifdef CONFIG_PROC_FS
+ remove_proc_entry("ip_vs_status", ipvs->net->proc_net);
+
+err_status:
remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
err_percpu:
@@ -4871,6 +5015,7 @@ void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
ip_vs_control_net_cleanup_sysctl(ipvs);
cancel_delayed_work_sync(&ipvs->est_reload_work);
#ifdef CONFIG_PROC_FS
+ remove_proc_entry("ip_vs_status", ipvs->net->proc_net);
remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
remove_proc_entry("ip_vs", ipvs->net->proc_net);
--
2.44.0
next prev parent reply other threads:[~2024-05-28 8:06 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-28 8:02 [PATCHv4 net-next 00/14] ipvs: per-net tables and optimizations Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 01/14] rculist_bl: add hlist_bl_for_each_entry_continue_rcu Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 02/14] ipvs: make ip_vs_svc_table and ip_vs_svc_fwm_table per netns Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 03/14] ipvs: some service readers can use RCU Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 04/14] ipvs: use single svc table Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 05/14] ipvs: do not keep dest_dst after dest is removed Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 06/14] ipvs: use more counters to avoid service lookups Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 07/14] ipvs: add resizable hash tables Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 08/14] ipvs: use resizable hash table for services Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 09/14] ipvs: switch to per-net connection table Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 10/14] ipvs: show the current conn_tab size to users Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 11/14] ipvs: no_cport and dropentry counters can be per-net Julian Anastasov
2024-05-28 8:02 ` [PATCHv4 net-next 12/14] ipvs: use more keys for connection hashing Julian Anastasov
2024-05-28 8:02 ` Julian Anastasov [this message]
2024-05-28 8:02 ` [PATCHv4 net-next 14/14] ipvs: add conn_lfactor and svc_lfactor sysctl vars Julian Anastasov
2025-10-17 2:54 ` [PATCHv4 net-next 00/14] ipvs: per-net tables and optimizations Dust Li
2025-10-17 3:45 ` Julian Anastasov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240528080234.10148-14-ja@ssi.bg \
--to=ja@ssi.bg \
--cc=dust.li@linux.alibaba.com \
--cc=horms@verge.net.au \
--cc=jiejian@linux.alibaba.com \
--cc=lvs-devel@vger.kernel.org \
--cc=netfilter-devel@vger.kernel.org \
--cc=rcu@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).