From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
To: Mat Martineau <martineau@kernel.org>,
Geliang Tang <geliang@kernel.org>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>,
Paolo Abeni <pabeni@redhat.com>, Simon Horman <horms@kernel.org>,
Shuah Khan <shuah@kernel.org>
Cc: netdev@vger.kernel.org, mptcp@lists.linux.dev,
linux-kernel@vger.kernel.org, linux-kselftest@vger.kernel.org,
"Matthieu Baerts (NGI0)" <matttbe@kernel.org>
Subject: [PATCH net-next 1/4] mptcp: pm: in-kernel: record fullmesh endp nb
Date: Sat, 01 Nov 2025 18:56:51 +0100 [thread overview]
Message-ID: <20251101-net-next-mptcp-fm-endp-nb-bind-v1-1-b4166772d6bb@kernel.org> (raw)
In-Reply-To: <20251101-net-next-mptcp-fm-endp-nb-bind-v1-0-b4166772d6bb@kernel.org>
Instead of iterating over all endpoints, under RCU read lock, just to
check if one of them as the fullmesh flag, we can keep a counter of
fullmesh endpoint, similar to what is done with the other flags.
This counter is now checked, before iterating over all endpoints.
Similar to the other counters, this new one is also exposed. A userspace
app can then know when it is being used in a fullmesh mode, with
potentially (too) many subflows.
Reviewed-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
---
include/uapi/linux/mptcp.h | 3 ++-
net/mptcp/pm_kernel.c | 38 +++++++++++++++++++++++++++++++++++---
net/mptcp/protocol.h | 1 +
net/mptcp/sockopt.c | 2 ++
4 files changed, 40 insertions(+), 4 deletions(-)
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index 87cfab874e24..04eea6d1d0a9 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -70,7 +70,8 @@ struct mptcp_info {
__u64 mptcpi_bytes_acked;
__u8 mptcpi_subflows_total;
__u8 mptcpi_endp_laminar_max;
- __u8 reserved[2];
+ __u8 mptcpi_endp_fullmesh_max;
+ __u8 reserved;
__u32 mptcpi_last_data_sent;
__u32 mptcpi_last_data_recv;
__u32 mptcpi_last_ack_recv;
diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c
index 2ae95476dba3..e2918c68ff02 100644
--- a/net/mptcp/pm_kernel.c
+++ b/net/mptcp/pm_kernel.c
@@ -22,6 +22,7 @@ struct pm_nl_pernet {
u8 endp_signal_max;
u8 endp_subflow_max;
u8 endp_laminar_max;
+ u8 endp_fullmesh_max;
u8 limit_add_addr_accepted;
u8 limit_extra_subflows;
u8 next_id;
@@ -70,6 +71,14 @@ u8 mptcp_pm_get_endp_laminar_max(const struct mptcp_sock *msk)
}
EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_laminar_max);
+u8 mptcp_pm_get_endp_fullmesh_max(const struct mptcp_sock *msk)
+{
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
+
+ return READ_ONCE(pernet->endp_fullmesh_max);
+}
+EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_fullmesh_max);
+
u8 mptcp_pm_get_limit_add_addr_accepted(const struct mptcp_sock *msk)
{
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
@@ -603,9 +612,12 @@ fill_local_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info *remote,
int i;
/* If there is at least one MPTCP endpoint with a fullmesh flag */
- i = fill_local_addresses_vec_fullmesh(msk, remote, locals, c_flag_case);
- if (i)
- return i;
+ if (mptcp_pm_get_endp_fullmesh_max(msk)) {
+ i = fill_local_addresses_vec_fullmesh(msk, remote, locals,
+ c_flag_case);
+ if (i)
+ return i;
+ }
/* If there is at least one MPTCP endpoint with a laminar flag */
if (mptcp_pm_get_endp_laminar_max(msk))
@@ -790,6 +802,10 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
addr_max = pernet->endp_laminar_max;
WRITE_ONCE(pernet->endp_laminar_max, addr_max + 1);
}
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
+ addr_max = pernet->endp_fullmesh_max;
+ WRITE_ONCE(pernet->endp_fullmesh_max, addr_max + 1);
+ }
pernet->endpoints++;
if (!entry->addr.port)
@@ -1187,6 +1203,10 @@ int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info)
addr_max = pernet->endp_laminar_max;
WRITE_ONCE(pernet->endp_laminar_max, addr_max - 1);
}
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
+ addr_max = pernet->endp_fullmesh_max;
+ WRITE_ONCE(pernet->endp_fullmesh_max, addr_max - 1);
+ }
pernet->endpoints--;
list_del_rcu(&entry->list);
@@ -1502,6 +1522,18 @@ int mptcp_pm_nl_set_flags(struct mptcp_pm_addr_entry *local,
changed = (local->flags ^ entry->flags) & mask;
entry->flags = (entry->flags & ~mask) | (local->flags & mask);
*local = *entry;
+
+ if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH) {
+ u8 addr_max = pernet->endp_fullmesh_max;
+
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)
+ addr_max++;
+ else
+ addr_max--;
+
+ WRITE_ONCE(pernet->endp_fullmesh_max, addr_max);
+ }
+
spin_unlock_bh(&pernet->lock);
mptcp_pm_nl_set_flags_all(net, local, changed);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 379a88e14e8d..9a3429175758 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -1183,6 +1183,7 @@ void __mptcp_pm_kernel_worker(struct mptcp_sock *msk);
u8 mptcp_pm_get_endp_signal_max(const struct mptcp_sock *msk);
u8 mptcp_pm_get_endp_subflow_max(const struct mptcp_sock *msk);
u8 mptcp_pm_get_endp_laminar_max(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_endp_fullmesh_max(const struct mptcp_sock *msk);
u8 mptcp_pm_get_limit_add_addr_accepted(const struct mptcp_sock *msk);
u8 mptcp_pm_get_limit_extra_subflows(const struct mptcp_sock *msk);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index a28a48385885..de90a2897d2d 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -982,6 +982,8 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
mptcp_pm_get_endp_subflow_max(msk);
info->mptcpi_endp_laminar_max =
mptcp_pm_get_endp_laminar_max(msk);
+ info->mptcpi_endp_fullmesh_max =
+ mptcp_pm_get_endp_fullmesh_max(msk);
}
if (__mptcp_check_fallback(msk))
--
2.51.0
next prev parent reply other threads:[~2025-11-01 17:57 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-01 17:56 [PATCH net-next 0/4] mptcp: pm: in-kernel: fullmesh endp nb + bind cases Matthieu Baerts (NGI0)
2025-11-01 17:56 ` Matthieu Baerts (NGI0) [this message]
2025-11-01 17:56 ` [PATCH net-next 2/4] mptcp: pm: in kernel: only use fullmesh endp if any Matthieu Baerts (NGI0)
2025-11-01 17:56 ` [PATCH net-next 3/4] selftests: mptcp: join: do_transfer: reduce code dup Matthieu Baerts (NGI0)
2025-11-01 17:56 ` [PATCH net-next 4/4] selftests: mptcp: join: validate extra bind cases Matthieu Baerts (NGI0)
2025-11-05 1:30 ` [PATCH net-next 0/4] mptcp: pm: in-kernel: fullmesh endp nb + " patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251101-net-next-mptcp-fm-endp-nb-bind-v1-1-b4166772d6bb@kernel.org \
--to=matttbe@kernel.org \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=geliang@kernel.org \
--cc=horms@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=martineau@kernel.org \
--cc=mptcp@lists.linux.dev \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=shuah@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).