From: Nhat Pham <nphamcs@gmail.com>
To: kasong@tencent.com
Cc: Liam.Howlett@oracle.com, akpm@linux-foundation.org,
apopple@nvidia.com, axelrasmussen@google.com, baohua@kernel.org,
baolin.wang@linux.alibaba.com, bhe@redhat.com, byungchul@sk.com,
cgroups@vger.kernel.org, chengming.zhou@linux.dev,
chrisl@kernel.org, corbet@lwn.net, david@kernel.org,
dev.jain@arm.com, gourry@gourry.net, hannes@cmpxchg.org,
hughd@google.com, jannh@google.com, joshua.hahnjy@gmail.com,
lance.yang@linux.dev, lenb@kernel.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
linux-pm@vger.kernel.org, lorenzo.stoakes@oracle.com,
matthew.brost@intel.com, mhocko@suse.com, muchun.song@linux.dev,
npache@redhat.com, nphamcs@gmail.com, pavel@kernel.org,
peterx@redhat.com, peterz@infradead.org, pfalcato@suse.de,
rafael@kernel.org, rakie.kim@sk.com, roman.gushchin@linux.dev,
rppt@kernel.org, ryan.roberts@arm.com, shakeel.butt@linux.dev,
shikemeng@huaweicloud.com, surenb@google.com, tglx@kernel.org,
vbabka@suse.cz, weixugc@google.com, ying.huang@linux.alibaba.com,
yosry.ahmed@linux.dev, yuanchu@google.com,
zhengqi.arch@bytedance.com, ziy@nvidia.com, kernel-team@meta.com,
riel@surriel.com, haowenchao22@gmail.com
Subject: [PATCH v6 21/22] vswap: batch contiguous vswap free calls
Date: Tue, 5 May 2026 08:38:50 -0700 [thread overview]
Message-ID: <20260505153854.1612033-22-nphamcs@gmail.com> (raw)
In-Reply-To: <20260505153854.1612033-1-nphamcs@gmail.com>
In vswap_free(), we release and reacquire the cluster lock for every
single entry, even for non-disk-swap backends where the lock drop is
unnecessary. Batch consecutive free operations to avoid this overhead.
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
mm/vswap.c | 97 ++++++++++++++++++++++++++++++++++--------------------
1 file changed, 61 insertions(+), 36 deletions(-)
diff --git a/mm/vswap.c b/mm/vswap.c
index 3f86bbb3a5ea..f07e6d9ec1df 100644
--- a/mm/vswap.c
+++ b/mm/vswap.c
@@ -529,18 +529,18 @@ static void vswap_cluster_free(struct vswap_cluster *cluster)
call_rcu(&cluster->rcu, vswap_cluster_free_rcu);
}
-static inline void release_vswap_slot(struct vswap_cluster *cluster,
- unsigned long index)
+static inline void release_vswap_slot_nr(struct vswap_cluster *cluster,
+ unsigned long index, int nr)
{
unsigned long slot_index = VSWAP_IDX_WITHIN_CLUSTER_VAL(index);
lockdep_assert_held(&cluster->lock);
- cluster->count--;
+ cluster->count -= nr;
- bitmap_clear(cluster->bitmap, slot_index, 1);
+ bitmap_clear(cluster->bitmap, slot_index, nr);
/* we only free uncached empty clusters */
- if (refcount_dec_and_test(&cluster->refcnt))
+ if (refcount_sub_and_test(nr, &cluster->refcnt))
vswap_cluster_free(cluster);
else if (cluster->full && cluster_is_alloc_candidate(cluster)) {
cluster->full = false;
@@ -553,7 +553,7 @@ static inline void release_vswap_slot(struct vswap_cluster *cluster,
}
}
- atomic_dec(&vswap_used);
+ atomic_sub(nr, &vswap_used);
}
/*
@@ -585,7 +585,7 @@ static unsigned short swp_desc_memcgid(struct swp_desc *desc);
*
* 1. Callers ensure no concurrent modification of the swap entry's internal
* state can occur. This is guaranteed by one of the following:
- * - For vswap_free() callers: the swap entry's refcnt (swap count and
+ * - For vswap_free_nr() callers: the swap entry's refcnt (swap count and
* swapcache pin) is down to 0.
* - For vswap_store_folio(), swap_zeromap_folio_set(), and zswap_entry_store()
* callers: the folio is locked and in the swap cache.
@@ -706,26 +706,17 @@ static void __vswap_swap_cgroup_clear(struct vswap_cluster *cluster,
/*
* Entered with the cluster locked. The cluster lock is held throughout.
- *
- * This is safe, because:
- *
- * 1. The swap entry to be freed has refcnt (swap count and swapcache pin)
- * down to 0, so no one can change its internal state.
- *
- * 2. The swap entry to be freed still holds a refcnt to the cluster, keeping
- * the cluster itself valid.
- *
- * 3. swap_slot_free_nr() takes the physical swap cluster lock (ci->lock),
- * but the only vswap function called under ci->lock is vswap_rmap_set(),
- * which uses atomic ops and does not take cluster->lock. So there is no
- * ABBA deadlock risk.
*/
-static void vswap_free(struct vswap_cluster *cluster, struct swp_desc *desc,
- swp_entry_t entry)
+static void vswap_free_nr(struct vswap_cluster *cluster, swp_entry_t entry,
+ int nr)
{
- unsigned short id = swp_desc_memcgid(desc);
+ struct swp_desc *desc = __vswap_iter(cluster, entry.val);
+ unsigned short id;
struct mem_cgroup *memcg;
+ VM_WARN_ON(!desc);
+ id = swp_desc_memcgid(desc);
+
/*
* The swap_cgroup id reference taken at swapout time pins this
* memcg until swap_cgroup_clear() runs below, so we can resolve
@@ -733,11 +724,11 @@ static void vswap_free(struct vswap_cluster *cluster, struct swp_desc *desc,
*/
memcg = id ? mem_cgroup_from_id(id) : NULL;
- release_backing(cluster, entry, 1, memcg);
- __vswap_swap_cgroup_clear(cluster, entry, 1, memcg);
+ release_backing(cluster, entry, nr, memcg);
+ __vswap_swap_cgroup_clear(cluster, entry, nr, memcg);
- /* erase forward mapping and release the virtual slot for reallocation */
- release_vswap_slot(cluster, entry.val);
+ /* erase forward mapping and release the virtual slots for reallocation */
+ release_vswap_slot_nr(cluster, entry.val, nr);
}
@@ -908,10 +899,18 @@ static bool vswap_free_nr_any_cache_only(swp_entry_t entry, int nr)
struct vswap_cluster *cluster = NULL;
struct swp_desc *desc;
bool ret = false;
- int i;
+ swp_entry_t free_start;
+ unsigned short batch_memcgid = 0;
+ int i, free_nr = 0;
+ free_start.val = 0;
rcu_read_lock();
for (i = 0; i < nr; i++) {
+ /* flush pending free batch at cluster boundary */
+ if (free_nr && !VSWAP_IDX_WITHIN_CLUSTER_VAL(entry.val)) {
+ vswap_free_nr(cluster, free_start, free_nr);
+ free_nr = 0;
+ }
desc = vswap_iter(&cluster, entry.val);
VM_WARN_ON(!desc);
ret |= (desc->swap_count == 1 && desc->in_swapcache);
@@ -919,18 +918,34 @@ static bool vswap_free_nr_any_cache_only(swp_entry_t entry, int nr)
if (!desc->swap_count && !desc->in_swapcache) {
if (xa_is_value(desc->shadow))
desc->shadow = NULL;
- vswap_free(cluster, desc, entry);
- } else if (!desc->swap_count && desc->in_swapcache &&
- desc->type == VSWAP_SWAPFILE) {
+ /* flush at cgroup boundary */
+ if (free_nr &&
+ swp_desc_memcgid(desc) != batch_memcgid) {
+ vswap_free_nr(cluster, free_start, free_nr);
+ free_nr = 0;
+ }
+ if (!free_nr)
+ batch_memcgid = swp_desc_memcgid(desc);
+ if (!free_nr++)
+ free_start = entry;
+ } else {
+ if (free_nr) {
+ vswap_free_nr(cluster, free_start, free_nr);
+ free_nr = 0;
+ }
/*
* swap_count just dropped to 0, but still in swap
* cache. If backed by a physical swap slot, mark it
* so the physical swap allocator can check cheaply.
*/
- swap_rmap_mark_cache_only(desc->slot);
+ if (!desc->swap_count && desc->in_swapcache &&
+ desc->type == VSWAP_SWAPFILE)
+ swap_rmap_mark_cache_only(desc->slot);
}
entry.val++;
}
+ if (free_nr)
+ vswap_free_nr(cluster, free_start, free_nr);
if (cluster)
spin_unlock(&cluster->lock);
rcu_read_unlock();
@@ -1032,8 +1047,9 @@ bool folio_free_swap(struct folio *folio)
VM_WARN_ON_FOLIO(!desc || desc->swap_cache != folio, folio);
desc->swap_cache = NULL;
desc->in_swapcache = false;
- vswap_free(cluster, desc, (swp_entry_t){ entry.val + i });
}
+
+ vswap_free_nr(cluster, entry, nr);
spin_unlock_irq(&cluster->lock);
rcu_read_unlock();
@@ -1095,14 +1111,23 @@ static void __swapcache_clear(struct vswap_cluster *cluster,
swp_entry_t entry, int nr)
{
struct swp_desc *desc;
- int i;
+ swp_entry_t free_start;
+ int i, free_nr = 0;
+ free_start = entry;
for (i = 0; i < nr; i++) {
desc = __vswap_iter(cluster, entry.val + i);
desc->in_swapcache = false;
- if (!desc->swap_count)
- vswap_free(cluster, desc, (swp_entry_t){ entry.val + i });
+ if (!desc->swap_count) {
+ if (!free_nr++)
+ free_start.val = entry.val + i;
+ } else if (free_nr) {
+ vswap_free_nr(cluster, free_start, free_nr);
+ free_nr = 0;
+ }
}
+ if (free_nr)
+ vswap_free_nr(cluster, free_start, free_nr);
}
void swapcache_clear(swp_entry_t entry, int nr)
--
2.52.0
next prev parent reply other threads:[~2026-05-05 15:39 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-05 15:38 [PATCH v6 00/22] Virtual Swap Space Nhat Pham
2026-05-05 15:38 ` [PATCH v6 01/22] mm/swap: decouple swap cache from physical swap infrastructure Nhat Pham
2026-05-05 15:38 ` [PATCH v6 02/22] swap: rearrange the swap header file Nhat Pham
2026-05-05 15:38 ` [PATCH v6 03/22] mm: swap: add an abstract API for locking out swapoff Nhat Pham
2026-05-05 15:38 ` [PATCH v6 04/22] zswap: add new helpers for zswap entry operations Nhat Pham
2026-05-05 15:38 ` [PATCH v6 05/22] mm/swap: add a new function to check if a swap entry is in swap cached Nhat Pham
2026-05-05 15:38 ` [PATCH v6 06/22] mm: swap: add a separate type for physical swap slots Nhat Pham
2026-05-05 15:38 ` [PATCH v6 07/22] mm: create scaffolds for the new virtual swap implementation Nhat Pham
2026-05-05 15:38 ` [PATCH v6 08/22] zswap: prepare zswap for swap virtualization Nhat Pham
2026-05-05 15:38 ` [PATCH v6 09/22] mm: swap: allocate a virtual swap slot for each swapped out page Nhat Pham
2026-05-05 15:38 ` [PATCH v6 10/22] swap: move swap cache to virtual swap descriptor Nhat Pham
2026-05-05 15:38 ` [PATCH v6 11/22] zswap: move zswap entry management to the " Nhat Pham
2026-05-05 15:38 ` [PATCH v6 12/22] swap: implement the swap_cgroup API using virtual swap Nhat Pham
2026-05-05 15:38 ` [PATCH v6 13/22] swap: manage swap entry lifecycle at the virtual swap layer Nhat Pham
2026-05-05 15:38 ` [PATCH v6 14/22] mm: swap: decouple virtual swap slot from backing store Nhat Pham
2026-05-05 15:38 ` [PATCH v6 15/22] zswap: do not start zswap shrinker if there is no physical swap slots Nhat Pham
2026-05-05 15:38 ` [PATCH v6 16/22] swap: do not unnecessarily pin readahead swap entries Nhat Pham
2026-05-05 15:38 ` [PATCH v6 17/22] swapfile: remove zeromap bitmap Nhat Pham
2026-05-05 15:38 ` [PATCH v6 18/22] memcg: swap: only charge physical swap slots Nhat Pham
2026-05-05 15:38 ` [PATCH v6 19/22] swap: simplify swapoff using virtual swap Nhat Pham
2026-05-05 15:38 ` [PATCH v6 20/22] swapfile: replace the swap map with bitmaps Nhat Pham
2026-05-05 15:38 ` Nhat Pham [this message]
2026-05-05 15:38 ` [PATCH v6 22/22] vswap: cache cluster lookup Nhat Pham
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260505153854.1612033-22-nphamcs@gmail.com \
--to=nphamcs@gmail.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=apopple@nvidia.com \
--cc=axelrasmussen@google.com \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=bhe@redhat.com \
--cc=byungchul@sk.com \
--cc=cgroups@vger.kernel.org \
--cc=chengming.zhou@linux.dev \
--cc=chrisl@kernel.org \
--cc=corbet@lwn.net \
--cc=david@kernel.org \
--cc=dev.jain@arm.com \
--cc=gourry@gourry.net \
--cc=hannes@cmpxchg.org \
--cc=haowenchao22@gmail.com \
--cc=hughd@google.com \
--cc=jannh@google.com \
--cc=joshua.hahnjy@gmail.com \
--cc=kasong@tencent.com \
--cc=kernel-team@meta.com \
--cc=lance.yang@linux.dev \
--cc=lenb@kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-pm@vger.kernel.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=matthew.brost@intel.com \
--cc=mhocko@suse.com \
--cc=muchun.song@linux.dev \
--cc=npache@redhat.com \
--cc=pavel@kernel.org \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=pfalcato@suse.de \
--cc=rafael@kernel.org \
--cc=rakie.kim@sk.com \
--cc=riel@surriel.com \
--cc=roman.gushchin@linux.dev \
--cc=rppt@kernel.org \
--cc=ryan.roberts@arm.com \
--cc=shakeel.butt@linux.dev \
--cc=shikemeng@huaweicloud.com \
--cc=surenb@google.com \
--cc=tglx@kernel.org \
--cc=vbabka@suse.cz \
--cc=weixugc@google.com \
--cc=ying.huang@linux.alibaba.com \
--cc=yosry.ahmed@linux.dev \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox