From: Paolo Abeni <pabeni@redhat.com>
To: mptcp@lists.linux.dev
Cc: Mat Martineau <martineau@kernel.org>, Geliang Tang <geliang@kernel.org>
Subject: Re: [PATCH v6 mptcp-next 11/11] mptcp: leverage the backlog for RX packet processing
Date: Thu, 23 Oct 2025 17:11:18 +0200 [thread overview]
Message-ID: <3feb8c2a-2098-4626-8bf2-edd66f679463@redhat.com> (raw)
In-Reply-To: <2201f259d2176bca0ad37500a352658f7ef5a1f0.1761142784.git.pabeni@redhat.com>
On 10/22/25 4:31 PM, Paolo Abeni wrote:
> When the msk socket is owned or the msk receive buffer is full,
> move the incoming skbs in a msk level backlog list. This avoid
> traversing the joined subflows and acquiring the subflow level
> socket lock at reception time, improving the RX performances.
>
> When processing the backlog, use the fwd alloc memory borrowed from
> the incoming subflow. skbs exceeding the msk receive space are
> not dropped; instead they are kept into the backlog until the receive
> buffer is freed. Dropping packets already acked at the TCP level is
> explicitly discouraged by the RFC and would corrupt the data stream
> for fallback sockets.
>
> Move the conditional reschedule in release_cb() to take action only
> after the first loop iteration, to avoid rescheduling just before
> releasing the lock.
>
> Special care is needed to avoid adding skbs to the backlog of a closed
> msk and to avoid leaving dangling references into the backlog
> at subflow closing time.
>
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
> ---
> v5 -> v6:
> - do backlog len update asap to advise the correct window.
> - explicitly bound backlog processing loop to the maximum BL len
>
> v4 -> v5:
> - consolidate ssk rcvbuf accunting in __mptcp_move_skb(), remove
> some code duplication
> - return soon in __mptcp_add_backlog() when dropping skbs due to
> the msk closed. This avoid later UaF
> ---
> net/mptcp/protocol.c | 151 +++++++++++++++++++++++++++----------------
> net/mptcp/protocol.h | 2 +-
> 2 files changed, 96 insertions(+), 57 deletions(-)
>
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index 5a1d8f9e0fb0ec..0aae17ab77edb2 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -696,7 +696,7 @@ static void __mptcp_add_backlog(struct sock *sk, struct sk_buff *skb)
> }
>
> static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
> - struct sock *ssk)
> + struct sock *ssk, bool own_msk)
> {
> struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
> struct sock *sk = (struct sock *)msk;
> @@ -712,9 +712,6 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
> struct sk_buff *skb;
> bool fin;
>
> - if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
> - break;
> -
> /* try to move as much data as available */
> map_remaining = subflow->map_data_len -
> mptcp_subflow_get_map_offset(subflow);
> @@ -742,9 +739,12 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
> int bmem;
>
> bmem = mptcp_init_skb(ssk, skb, offset, len);
> - sk_forward_alloc_add(sk, bmem);
> + if (own_msk)
> + sk_forward_alloc_add(sk, bmem);
> + else
> + msk->borrowed_mem += bmem;
>
> - if (true)
> + if (own_msk && sk_rmem_alloc_get(sk) < sk->sk_rcvbuf)
> ret |= __mptcp_move_skb(sk, skb);
> else
> __mptcp_add_backlog(sk, skb);
> @@ -866,7 +866,7 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
> struct sock *sk = (struct sock *)msk;
> bool moved;
>
> - moved = __mptcp_move_skbs_from_subflow(msk, ssk);
> + moved = __mptcp_move_skbs_from_subflow(msk, ssk, true);
> __mptcp_ofo_queue(msk);
> if (unlikely(ssk->sk_err))
> __mptcp_subflow_error_report(sk, ssk);
> @@ -898,9 +898,8 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
> /* Wake-up the reader only for in-sequence data */
> if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
> sk->sk_data_ready(sk);
> -
> } else {
> - __set_bit(MPTCP_DEQUEUE, &mptcp_sk(sk)->cb_flags);
> + __mptcp_move_skbs_from_subflow(msk, ssk, false);
> }
> mptcp_data_unlock(sk);
> }
> @@ -2135,60 +2134,92 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
> msk->rcvq_space.time = mstamp;
> }
>
> -static struct mptcp_subflow_context *
> -__mptcp_first_ready_from(struct mptcp_sock *msk,
> - struct mptcp_subflow_context *subflow)
> +static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta)
> {
> - struct mptcp_subflow_context *start_subflow = subflow;
> + struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list);
> + struct mptcp_sock *msk = mptcp_sk(sk);
> + bool moved = false;
> +
> + *delta = 0;
> + while (1) {
> + /* If the msk recvbuf is full stop, don't drop */
> + if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
> + break;
> +
> + prefetch(skb->next);
> + list_del(&skb->list);
> + *delta += skb->truesize;
> +
> + moved |= __mptcp_move_skb(sk, skb);
> + if (list_empty(skbs))
> + break;
>
> - while (!READ_ONCE(subflow->data_avail)) {
> - subflow = mptcp_next_subflow(msk, subflow);
> - if (subflow == start_subflow)
> - return NULL;
> + skb = list_first_entry(skbs, struct sk_buff, list);
> }
> - return subflow;
> +
> + __mptcp_ofo_queue(msk);
> + if (moved)
> + mptcp_check_data_fin((struct sock *)msk);
> + return moved;
> }
>
> -static bool __mptcp_move_skbs(struct sock *sk)
> +static bool mptcp_can_spool_backlog(struct sock *sk, u32 moved,
> + struct list_head *skbs)
> {
> - struct mptcp_subflow_context *subflow;
> struct mptcp_sock *msk = mptcp_sk(sk);
> - bool ret = false;
>
> - if (list_empty(&msk->conn_list))
> + if (list_empty(&msk->backlog_list))
> return false;
>
> - subflow = list_first_entry(&msk->conn_list,
> - struct mptcp_subflow_context, node);
> - for (;;) {
> - struct sock *ssk;
> - bool slowpath;
> + /* Borrowed mem could be zero only in the unlikely event that the bl
> + * is full
> + */
> + if (likely(msk->borrowed_mem)) {
> + sk_forward_alloc_add(sk, msk->borrowed_mem);
> + msk->borrowed_mem = 0;
> + sk->sk_reserved_mem = msk->backlog_len;
With the above I intended to prevent the fwd memory handling from
releasing backlog_len bytes. Re-reading the relevant code, it does not
allow that (experimentation confirmed), see:
https://elixir.bootlin.com/linux/v6.18-rc2/source/include/net/sock.h#L1593
and:
https://elixir.bootlin.com/linux/v6.18-rc2/source/include/net/sock.h#L1580
This will need some more care. Also patch 2 will require some
significant rework.
@Mat, @Matttbe: could you please consider merging patches 1,3-9?
I think they should be pretty uncontroversial, would make the series
more manegeable for future iterations (and would alleviate my
frustration to make this thing work correctly).
Thanks!
Paolo
> + }
>
> - /*
> - * As an optimization avoid traversing the subflows list
> - * and ev. acquiring the subflow socket lock before baling out
> - */
> - if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
> - break;
> + /* Limit the backlog loop to the maximum backlog size; moved skbs are
> + * accounted on both the backlog and the receive buffer; the caller
> + * should update the backlog usage ASAP, to avoid underestimate the
> + * rcvwnd.
> + */
> + if (moved > sk->sk_rcvbuf || sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
> + return false;
>
> - subflow = __mptcp_first_ready_from(msk, subflow);
> - if (!subflow)
> - break;
> + INIT_LIST_HEAD(skbs);
> + list_splice_init(&msk->backlog_list, skbs);
> + return true;
> +}
>
> - ssk = mptcp_subflow_tcp_sock(subflow);
> - slowpath = lock_sock_fast(ssk);
> - ret = __mptcp_move_skbs_from_subflow(msk, ssk) || ret;
> - if (unlikely(ssk->sk_err))
> - __mptcp_error_report(sk);
> - unlock_sock_fast(ssk, slowpath);
> +static void mptcp_backlog_spooled(struct sock *sk, u32 moved,
> + struct list_head *skbs)
> +{
> + struct mptcp_sock *msk = mptcp_sk(sk);
>
> - subflow = mptcp_next_subflow(msk, subflow);
> - }
> + WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved);
> + list_splice(skbs, &msk->backlog_list);
> + sk->sk_reserved_mem = msk->backlog_len;
> +}
>
> - __mptcp_ofo_queue(msk);
> - if (ret)
> - mptcp_check_data_fin((struct sock *)msk);
> - return ret;
> +static bool mptcp_move_skbs(struct sock *sk)
> +{
> + u32 moved, total_moved = 0;
> + struct list_head skbs;
> + bool enqueued = false;
> +
> + mptcp_data_lock(sk);
> + while (mptcp_can_spool_backlog(sk, total_moved, &skbs)) {
> + mptcp_data_unlock(sk);
> + enqueued |= __mptcp_move_skbs(sk, &skbs, &moved);
> +
> + mptcp_data_lock(sk);
> + total_moved += moved;
> + mptcp_backlog_spooled(sk, moved, &skbs);
> + }
> + mptcp_data_unlock(sk);
> + return enqueued;
> }
>
> static unsigned int mptcp_inq_hint(const struct sock *sk)
> @@ -2254,7 +2285,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
>
> copied += bytes_read;
>
> - if (skb_queue_empty(&sk->sk_receive_queue) && __mptcp_move_skbs(sk))
> + if (!list_empty(&msk->backlog_list) && mptcp_move_skbs(sk))
> continue;
>
> /* only the MPTCP socket status is relevant here. The exit
> @@ -3521,20 +3552,22 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
>
> #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
> BIT(MPTCP_RETRANSMIT) | \
> - BIT(MPTCP_FLUSH_JOIN_LIST) | \
> - BIT(MPTCP_DEQUEUE))
> + BIT(MPTCP_FLUSH_JOIN_LIST))
>
> /* processes deferred events and flush wmem */
> static void mptcp_release_cb(struct sock *sk)
> __must_hold(&sk->sk_lock.slock)
> {
> struct mptcp_sock *msk = mptcp_sk(sk);
> + u32 moved, total_moved = 0;
>
> for (;;) {
> unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
> - struct list_head join_list;
> + struct list_head join_list, skbs;
> + bool spool_bl;
>
> - if (!flags)
> + spool_bl = mptcp_can_spool_backlog(sk, total_moved, &skbs);
> + if (!flags && !spool_bl)
> break;
>
> INIT_LIST_HEAD(&join_list);
> @@ -3550,20 +3583,26 @@ static void mptcp_release_cb(struct sock *sk)
> msk->cb_flags &= ~flags;
> spin_unlock_bh(&sk->sk_lock.slock);
>
> + if (total_moved)
> + cond_resched();
> +
> if (flags & BIT(MPTCP_FLUSH_JOIN_LIST))
> __mptcp_flush_join_list(sk, &join_list);
> if (flags & BIT(MPTCP_PUSH_PENDING))
> __mptcp_push_pending(sk, 0);
> if (flags & BIT(MPTCP_RETRANSMIT))
> __mptcp_retrans(sk);
> - if ((flags & BIT(MPTCP_DEQUEUE)) && __mptcp_move_skbs(sk)) {
> + if (spool_bl && __mptcp_move_skbs(sk, &skbs, &moved)) {
> /* notify ack seq update */
> mptcp_cleanup_rbuf(msk, 0);
> sk->sk_data_ready(sk);
> }
>
> - cond_resched();
> spin_lock_bh(&sk->sk_lock.slock);
> + if (spool_bl) {
> + total_moved += moved;
> + mptcp_backlog_spooled(sk, moved, &skbs);
> + }
> }
>
> if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
> @@ -3796,7 +3835,7 @@ static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
> return -EINVAL;
>
> lock_sock(sk);
> - if (__mptcp_move_skbs(sk))
> + if (mptcp_move_skbs(sk))
> mptcp_cleanup_rbuf(msk, 0);
> *karg = mptcp_inq_hint(sk);
> release_sock(sk);
> diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
> index d814e8151458d5..9e2a44546354a0 100644
> --- a/net/mptcp/protocol.h
> +++ b/net/mptcp/protocol.h
> @@ -124,7 +124,6 @@
> #define MPTCP_FLUSH_JOIN_LIST 5
> #define MPTCP_SYNC_STATE 6
> #define MPTCP_SYNC_SNDBUF 7
> -#define MPTCP_DEQUEUE 8
>
> struct mptcp_skb_cb {
> u64 map_seq;
> @@ -301,6 +300,7 @@ struct mptcp_sock {
> u32 last_ack_recv;
> unsigned long timer_ival;
> u32 token;
> + u32 borrowed_mem;
> unsigned long flags;
> unsigned long cb_flags;
> bool recovery; /* closing subflow write queue reinjected */
next prev parent reply other threads:[~2025-10-23 15:11 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-22 14:31 [PATCH v6 mptcp-next 00/11] mptcp: introduce backlog processing Paolo Abeni
2025-10-22 14:31 ` [PATCH v6 mptcp-next 01/11] mptcp: drop bogus optimization in __mptcp_check_push() Paolo Abeni
2025-10-22 14:31 ` [PATCH v6 mptcp-next 02/11] mptcp: borrow forward memory from subflow Paolo Abeni
2025-10-23 6:38 ` Geliang Tang
2025-10-22 14:31 ` [PATCH v6 mptcp-next 03/11] mptcp: cleanup fallback data fin reception Paolo Abeni
2025-10-22 14:31 ` [PATCH v6 mptcp-next 04/11] mptcp: cleanup fallback dummy mapping generation Paolo Abeni
2025-10-22 14:31 ` [PATCH v6 mptcp-next 05/11] mptcp: fix MSG_PEEK stream corruption Paolo Abeni
2025-10-23 16:56 ` Mat Martineau
2025-10-24 7:34 ` Paolo Abeni
2025-10-22 14:31 ` [PATCH v6 mptcp-next 06/11] mptcp: ensure the kernel PM does not take action too late Paolo Abeni
2025-10-22 14:31 ` [PATCH v6 mptcp-next 07/11] mptcp: do not miss early first subflow close event notification Paolo Abeni
2025-10-22 14:31 ` [PATCH v6 mptcp-next 08/11] mptcp: make mptcp_destroy_common() static Paolo Abeni
2025-10-22 14:31 ` [PATCH v6 mptcp-next 09/11] mptcp: drop the __mptcp_data_ready() helper Paolo Abeni
2025-10-23 6:38 ` Geliang Tang
2025-10-22 14:31 ` [PATCH v6 mptcp-next 10/11] mptcp: introduce mptcp-level backlog Paolo Abeni
2025-10-22 14:31 ` [PATCH v6 mptcp-next 11/11] mptcp: leverage the backlog for RX packet processing Paolo Abeni
2025-10-23 15:11 ` Paolo Abeni [this message]
2025-10-23 15:52 ` Matthieu Baerts
2025-10-23 17:02 ` Mat Martineau
2025-10-23 17:43 ` Matthieu Baerts
2025-10-22 15:50 ` [PATCH v6 mptcp-next 00/11] mptcp: introduce backlog processing MPTCP CI
2025-10-23 6:37 ` Geliang Tang
2025-10-27 12:17 ` Matthieu Baerts
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=3feb8c2a-2098-4626-8bf2-edd66f679463@redhat.com \
--to=pabeni@redhat.com \
--cc=geliang@kernel.org \
--cc=martineau@kernel.org \
--cc=mptcp@lists.linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox