From: Waiman Long <longman@redhat.com>
To: Simon Horman <horms@verge.net.au>, Julian Anastasov <ja@ssi.bg>,
"David S. Miller" <davem@davemloft.net>,
David Ahern <dsahern@kernel.org>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Pablo Neira Ayuso <pablo@netfilter.org>,
Florian Westphal <fw@strlen.de>, Phil Sutter <phil@nwl.cc>,
Frederic Weisbecker <frederic@kernel.org>,
Chen Ridong <chenridong@huawei.com>, Phil Auld <pauld@redhat.com>
Cc: linux-kernel@vger.kernel.org, netdev@vger.kernel.org,
lvs-devel@vger.kernel.org, netfilter-devel@vger.kernel.org,
coreteam@netfilter.org, sheviks <sheviks@gmail.com>,
Waiman Long <longman@redhat.com>
Subject: [PATCH 2/2] ipvs: Guard access of HK_TYPE_KTHREAD cpumask with RCU
Date: Tue, 24 Mar 2026 11:18:27 -0400 [thread overview]
Message-ID: <20260324151827.2006656-3-longman@redhat.com> (raw)
In-Reply-To: <20260324151827.2006656-1-longman@redhat.com>
The ip_vs_ctl.c file and the associated ip_vs.h file are the only places
in the kernel where HK_TYPE_KTHREAD cpumask is being retrieved and used.
Now that HK_TYPE_KTHREAD/HK_TYPE_DOMAIN cpumask can be changed at run
time. We need to use RCU to guard access to this cpumask to avoid a
potential UAF problem as the returned cpumask may be freed before it
is being used.
Signed-off-by: Waiman Long <longman@redhat.com>
---
include/net/ip_vs.h | 20 ++++++++++++++++----
net/netfilter/ipvs/ip_vs_ctl.c | 13 ++++++++-----
2 files changed, 24 insertions(+), 9 deletions(-)
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 29a36709e7f3..17c85a575ef4 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1155,7 +1155,7 @@ static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
return ipvs->sysctl_run_estimation;
}
-static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
+static inline const struct cpumask *__sysctl_est_cpulist(struct netns_ipvs *ipvs)
{
if (ipvs->est_cpulist_valid)
return ipvs->sysctl_est_cpulist;
@@ -1273,7 +1273,7 @@ static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
return 1;
}
-static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
+static inline const struct cpumask *__sysctl_est_cpulist(struct netns_ipvs *ipvs)
{
return housekeeping_cpumask(HK_TYPE_KTHREAD);
}
@@ -1290,6 +1290,18 @@ static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
#endif
+static inline bool sysctl_est_cpulist_empty(struct netns_ipvs *ipvs)
+{
+ guard(rcu)();
+ return cpumask_empty(__sysctl_est_cpulist(ipvs));
+}
+
+static inline unsigned int sysctl_est_cpulist_weight(struct netns_ipvs *ipvs)
+{
+ guard(rcu)();
+ return cpumask_weight(__sysctl_est_cpulist(ipvs));
+}
+
/* IPVS core functions
* (from ip_vs_core.c)
*/
@@ -1604,7 +1616,7 @@ static inline void ip_vs_est_stopped_recalc(struct netns_ipvs *ipvs)
/* Stop tasks while cpulist is empty or if disabled with flag */
ipvs->est_stopped = !sysctl_run_estimation(ipvs) ||
(ipvs->est_cpulist_valid &&
- cpumask_empty(sysctl_est_cpulist(ipvs)));
+ sysctl_est_cpulist_empty(ipvs));
#endif
}
@@ -1620,7 +1632,7 @@ static inline bool ip_vs_est_stopped(struct netns_ipvs *ipvs)
static inline int ip_vs_est_max_threads(struct netns_ipvs *ipvs)
{
unsigned int limit = IPVS_EST_CPU_KTHREADS *
- cpumask_weight(sysctl_est_cpulist(ipvs));
+ sysctl_est_cpulist_weight(ipvs);
return max(1U, limit);
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 35642de2a0fe..f38a2e2a9dc5 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1973,11 +1973,14 @@ static int ipvs_proc_est_cpumask_get(const struct ctl_table *table,
mutex_lock(&ipvs->est_mutex);
- if (ipvs->est_cpulist_valid)
- mask = *valp;
- else
- mask = (struct cpumask *)housekeeping_cpumask(HK_TYPE_KTHREAD);
- ret = scnprintf(buffer, size, "%*pbl\n", cpumask_pr_args(mask));
+ /* HK_TYPE_KTHREAD cpumask needs RCU protection */
+ scoped_guard(rcu) {
+ if (ipvs->est_cpulist_valid)
+ mask = *valp;
+ else
+ mask = (struct cpumask *)housekeeping_cpumask(HK_TYPE_KTHREAD);
+ ret = scnprintf(buffer, size, "%*pbl\n", cpumask_pr_args(mask));
+ }
mutex_unlock(&ipvs->est_mutex);
--
2.53.0
next prev parent reply other threads:[~2026-03-24 15:19 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-24 15:18 [PATCH 0/2] ipvs: Fix incorrect use of HK_TYPE_KTHREAD housekeeping cpumask Waiman Long
2026-03-24 15:18 ` [PATCH 1/2] sched/isolation: Make HK_TYPE_KTHREAD an alias of HK_TYPE_DOMAIN Waiman Long
2026-03-24 18:59 ` David Dull
2026-03-24 15:18 ` Waiman Long [this message]
2026-03-26 8:32 ` [PATCH 2/2] ipvs: Guard access of HK_TYPE_KTHREAD cpumask with RCU Julian Anastasov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260324151827.2006656-3-longman@redhat.com \
--to=longman@redhat.com \
--cc=chenridong@huawei.com \
--cc=coreteam@netfilter.org \
--cc=davem@davemloft.net \
--cc=dsahern@kernel.org \
--cc=edumazet@google.com \
--cc=frederic@kernel.org \
--cc=fw@strlen.de \
--cc=horms@verge.net.au \
--cc=ja@ssi.bg \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lvs-devel@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=netfilter-devel@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=pablo@netfilter.org \
--cc=pauld@redhat.com \
--cc=phil@nwl.cc \
--cc=sheviks@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox