From: Nhat Pham <nphamcs@gmail.com>
To: kasong@tencent.com
Cc: Liam.Howlett@oracle.com, akpm@linux-foundation.org,
apopple@nvidia.com, axelrasmussen@google.com, baohua@kernel.org,
baolin.wang@linux.alibaba.com, bhe@redhat.com, byungchul@sk.com,
cgroups@vger.kernel.org, chengming.zhou@linux.dev,
chrisl@kernel.org, corbet@lwn.net, david@kernel.org,
dev.jain@arm.com, gourry@gourry.net, hannes@cmpxchg.org,
hughd@google.com, jannh@google.com, joshua.hahnjy@gmail.com,
lance.yang@linux.dev, lenb@kernel.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
linux-pm@vger.kernel.org, lorenzo.stoakes@oracle.com,
matthew.brost@intel.com, mhocko@suse.com, muchun.song@linux.dev,
npache@redhat.com, nphamcs@gmail.com, pavel@kernel.org,
peterx@redhat.com, peterz@infradead.org, pfalcato@suse.de,
rafael@kernel.org, rakie.kim@sk.com, roman.gushchin@linux.dev,
rppt@kernel.org, ryan.roberts@arm.com, shakeel.butt@linux.dev,
shikemeng@huaweicloud.com, surenb@google.com, tglx@kernel.org,
vbabka@suse.cz, weixugc@google.com, ying.huang@linux.alibaba.com,
yosry.ahmed@linux.dev, yuanchu@google.com,
zhengqi.arch@bytedance.com, ziy@nvidia.com, kernel-team@meta.com,
riel@surriel.com
Subject: [PATCH v5 21/21] vswap: batch contiguous vswap free calls
Date: Fri, 20 Mar 2026 12:27:35 -0700 [thread overview]
Message-ID: <20260320192735.748051-22-nphamcs@gmail.com> (raw)
In-Reply-To: <20260320192735.748051-1-nphamcs@gmail.com>
In vswap_free(), we release and reacquire the cluster lock for every
single entry, even for non-disk-swap backends where the lock drop is
unnecessary. Batch consecutive free operations to avoid this overhead.
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
include/linux/memcontrol.h | 6 ++
mm/memcontrol.c | 2 +-
mm/vswap.c | 185 ++++++++++++++++++++++++-------------
3 files changed, 126 insertions(+), 67 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0651865a4564f..0f7f5489e1675 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -827,6 +827,7 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
return memcg->id.id;
}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n);
#ifdef CONFIG_SHRINKER_DEBUG
static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
@@ -1289,6 +1290,11 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL;
}
+static inline void mem_cgroup_id_put_many(struct mem_cgroup *memcg,
+ unsigned int n)
+{
+}
+
#ifdef CONFIG_SHRINKER_DEBUG
static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4525c21754e7f..c6d307b8127a8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3597,7 +3597,7 @@ void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
refcount_add(n, &memcg->id.ref);
}
-static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
+void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
{
if (refcount_sub_and_test(n, &memcg->id.ref)) {
mem_cgroup_id_remove(memcg);
diff --git a/mm/vswap.c b/mm/vswap.c
index fa37165cb10d0..82092502130a6 100644
--- a/mm/vswap.c
+++ b/mm/vswap.c
@@ -482,18 +482,18 @@ static void vswap_cluster_free(struct vswap_cluster *cluster)
kvfree_rcu(cluster, rcu);
}
-static inline void release_vswap_slot(struct vswap_cluster *cluster,
- unsigned long index)
+static inline void release_vswap_slot_nr(struct vswap_cluster *cluster,
+ unsigned long index, int nr)
{
unsigned long slot_index = VSWAP_IDX_WITHIN_CLUSTER_VAL(index);
lockdep_assert_held(&cluster->lock);
- cluster->count--;
+ cluster->count -= nr;
- bitmap_clear(cluster->bitmap, slot_index, 1);
+ bitmap_clear(cluster->bitmap, slot_index, nr);
/* we only free uncached empty clusters */
- if (refcount_dec_and_test(&cluster->refcnt))
+ if (refcount_sub_and_test(nr, &cluster->refcnt))
vswap_cluster_free(cluster);
else if (cluster->full && cluster_is_alloc_candidate(cluster)) {
cluster->full = false;
@@ -506,7 +506,7 @@ static inline void release_vswap_slot(struct vswap_cluster *cluster,
}
}
- atomic_dec(&vswap_used);
+ atomic_sub(nr, &vswap_used);
}
/*
@@ -528,23 +528,33 @@ void vswap_rmap_set(struct swap_cluster_info *ci, swp_slot_t slot,
}
/*
- * Caller needs to handle races with other operations themselves.
+ * release_backing - release the backend storage for a given range of virtual
+ * swap slots.
*
- * Specifically, this function is safe to be called in contexts where the swap
- * entry has been added to the swap cache and the associated folio is locked.
- * We cannot race with other accessors, and the swap entry is guaranteed to be
- * valid the whole time (since swap cache implies one refcount).
+ * Entered with the cluster locked, but might drop the lock in between.
+ * This is because several operations, such as releasing physical swap slots
+ * (i.e swap_slot_free_nr()) require the cluster to be unlocked to avoid
+ * deadlocks.
*
- * We cannot assume that the backends will be of the same type,
- * contiguous, etc. We might have a large folio coalesced from subpages with
- * mixed backend, which is only rectified when it is reclaimed.
+ * This is safe, because:
+ *
+ * 1. Callers ensure no concurrent modification of the swap entry's internal
+ * state can occur. This is guaranteed by one of the following:
+ * - For vswap_free_nr() callers: the swap entry's refcnt (swap count and
+ * swapcache pin) is down to 0.
+ * - For vswap_store_folio(), swap_zeromap_folio_set(), and zswap_entry_store()
+ * callers: the folio is locked and in the swap cache.
+ *
+ * 2. The swap entry still holds a refcnt to the cluster, keeping the cluster
+ * itself valid.
+ *
+ * We will exit the function with the cluster re-locked.
*/
-static void release_backing(swp_entry_t entry, int nr)
+static void release_backing(struct vswap_cluster *cluster, swp_entry_t entry,
+ int nr)
{
- struct vswap_cluster *cluster = NULL;
struct swp_desc *desc;
unsigned long flush_nr, phys_swap_start = 0, phys_swap_end = 0;
- unsigned long phys_swap_released = 0;
unsigned int phys_swap_type = 0;
bool need_flushing_phys_swap = false;
swp_slot_t flush_slot;
@@ -552,9 +562,8 @@ static void release_backing(swp_entry_t entry, int nr)
VM_WARN_ON(!entry.val);
- rcu_read_lock();
for (i = 0; i < nr; i++) {
- desc = vswap_iter(&cluster, entry.val + i);
+ desc = __vswap_iter(cluster, entry.val + i);
VM_WARN_ON(!desc);
/*
@@ -574,7 +583,6 @@ static void release_backing(swp_entry_t entry, int nr)
if (desc->type == VSWAP_ZSWAP && desc->zswap_entry) {
zswap_entry_free(desc->zswap_entry);
} else if (desc->type == VSWAP_SWAPFILE) {
- phys_swap_released++;
if (!phys_swap_start) {
/* start a new contiguous range of phys swap */
phys_swap_start = swp_slot_offset(desc->slot);
@@ -590,56 +598,49 @@ static void release_backing(swp_entry_t entry, int nr)
if (need_flushing_phys_swap) {
spin_unlock(&cluster->lock);
- cluster = NULL;
swap_slot_free_nr(flush_slot, flush_nr);
+ mem_cgroup_uncharge_swap(entry, flush_nr);
+ spin_lock(&cluster->lock);
need_flushing_phys_swap = false;
}
}
- if (cluster)
- spin_unlock(&cluster->lock);
- rcu_read_unlock();
/* Flush any remaining physical swap range */
if (phys_swap_start) {
flush_slot = swp_slot(phys_swap_type, phys_swap_start);
flush_nr = phys_swap_end - phys_swap_start;
+ spin_unlock(&cluster->lock);
swap_slot_free_nr(flush_slot, flush_nr);
+ mem_cgroup_uncharge_swap(entry, flush_nr);
+ spin_lock(&cluster->lock);
}
-
- if (phys_swap_released)
- mem_cgroup_uncharge_swap(entry, phys_swap_released);
}
+static void __vswap_swap_cgroup_clear(struct vswap_cluster *cluster,
+ swp_entry_t entry, unsigned int nr_ents);
+
/*
- * Entered with the cluster locked, but might unlock the cluster.
- * This is because several operations, such as releasing physical swap slots
- * (i.e swap_slot_free_nr()) require the cluster to be unlocked to avoid
- * deadlocks.
- *
- * This is safe, because:
- *
- * 1. The swap entry to be freed has refcnt (swap count and swapcache pin)
- * down to 0, so no one can change its internal state
- *
- * 2. The swap entry to be freed still holds a refcnt to the cluster, keeping
- * the cluster itself valid.
- *
- * We will exit the function with the cluster re-locked.
+ * Entered with the cluster locked. We will exit the function with the cluster
+ * still locked.
*/
-static void vswap_free(struct vswap_cluster *cluster, struct swp_desc *desc,
- swp_entry_t entry)
+static void vswap_free_nr(struct vswap_cluster *cluster, swp_entry_t entry,
+ int nr)
{
- /* Clear shadow if present */
- if (xa_is_value(desc->shadow))
- desc->shadow = NULL;
- spin_unlock(&cluster->lock);
+ struct swp_desc *desc;
+ int i;
- release_backing(entry, 1);
- mem_cgroup_clear_swap(entry, 1);
+ for (i = 0; i < nr; i++) {
+ desc = __vswap_iter(cluster, entry.val + i);
+ /* Clear shadow if present */
+ if (xa_is_value(desc->shadow))
+ desc->shadow = NULL;
+ }
- /* erase forward mapping and release the virtual slot for reallocation */
- spin_lock(&cluster->lock);
- release_vswap_slot(cluster, entry.val);
+ release_backing(cluster, entry, nr);
+ __vswap_swap_cgroup_clear(cluster, entry, nr);
+
+ /* erase forward mapping and release the virtual slots for reallocation */
+ release_vswap_slot_nr(cluster, entry.val, nr);
}
/**
@@ -818,18 +819,32 @@ static bool vswap_free_nr_any_cache_only(swp_entry_t entry, int nr)
struct vswap_cluster *cluster = NULL;
struct swp_desc *desc;
bool ret = false;
- int i;
+ swp_entry_t free_start;
+ int i, free_nr = 0;
+ free_start.val = 0;
rcu_read_lock();
for (i = 0; i < nr; i++) {
+ /* flush pending free batch at cluster boundary */
+ if (free_nr && !VSWAP_IDX_WITHIN_CLUSTER_VAL(entry.val)) {
+ vswap_free_nr(cluster, free_start, free_nr);
+ free_nr = 0;
+ }
desc = vswap_iter(&cluster, entry.val);
VM_WARN_ON(!desc);
ret |= (desc->swap_count == 1 && desc->in_swapcache);
desc->swap_count--;
- if (!desc->swap_count && !desc->in_swapcache)
- vswap_free(cluster, desc, entry);
+ if (!desc->swap_count && !desc->in_swapcache) {
+ if (!free_nr++)
+ free_start = entry;
+ } else if (free_nr) {
+ vswap_free_nr(cluster, free_start, free_nr);
+ free_nr = 0;
+ }
entry.val++;
}
+ if (free_nr)
+ vswap_free_nr(cluster, free_start, free_nr);
if (cluster)
spin_unlock(&cluster->lock);
rcu_read_unlock();
@@ -952,19 +967,33 @@ void swapcache_clear(swp_entry_t entry, int nr)
{
struct vswap_cluster *cluster = NULL;
struct swp_desc *desc;
- int i;
+ swp_entry_t free_start;
+ int i, free_nr = 0;
if (!nr)
return;
+ free_start.val = 0;
rcu_read_lock();
for (i = 0; i < nr; i++) {
+ /* flush pending free batch at cluster boundary */
+ if (free_nr && !VSWAP_IDX_WITHIN_CLUSTER_VAL(entry.val)) {
+ vswap_free_nr(cluster, free_start, free_nr);
+ free_nr = 0;
+ }
desc = vswap_iter(&cluster, entry.val);
desc->in_swapcache = false;
- if (!desc->swap_count)
- vswap_free(cluster, desc, entry);
+ if (!desc->swap_count) {
+ if (!free_nr++)
+ free_start = entry;
+ } else if (free_nr) {
+ vswap_free_nr(cluster, free_start, free_nr);
+ free_nr = 0;
+ }
entry.val++;
}
+ if (free_nr)
+ vswap_free_nr(cluster, free_start, free_nr);
if (cluster)
spin_unlock(&cluster->lock);
rcu_read_unlock();
@@ -1105,11 +1134,13 @@ void vswap_store_folio(swp_entry_t entry, struct folio *folio)
VM_BUG_ON(!folio_test_locked(folio));
VM_BUG_ON(folio->swap.val != entry.val);
- release_backing(entry, nr);
-
rcu_read_lock();
+ desc = vswap_iter(&cluster, entry.val);
+ VM_WARN_ON(!desc);
+ release_backing(cluster, entry, nr);
+
for (i = 0; i < nr; i++) {
- desc = vswap_iter(&cluster, entry.val + i);
+ desc = __vswap_iter(cluster, entry.val + i);
VM_WARN_ON(!desc);
desc->type = VSWAP_FOLIO;
desc->swap_cache = folio;
@@ -1134,11 +1165,13 @@ void swap_zeromap_folio_set(struct folio *folio)
VM_BUG_ON(!folio_test_locked(folio));
VM_BUG_ON(!entry.val);
- release_backing(entry, nr);
-
rcu_read_lock();
+ desc = vswap_iter(&cluster, entry.val);
+ VM_WARN_ON(!desc);
+ release_backing(cluster, entry, nr);
+
for (i = 0; i < nr; i++) {
- desc = vswap_iter(&cluster, entry.val + i);
+ desc = __vswap_iter(cluster, entry.val + i);
VM_WARN_ON(!desc);
desc->type = VSWAP_ZERO;
}
@@ -1772,11 +1805,10 @@ void zswap_entry_store(swp_entry_t swpentry, struct zswap_entry *entry)
struct vswap_cluster *cluster = NULL;
struct swp_desc *desc;
- release_backing(swpentry, 1);
-
rcu_read_lock();
desc = vswap_iter(&cluster, swpentry.val);
VM_WARN_ON(!desc);
+ release_backing(cluster, swpentry, 1);
desc->zswap_entry = entry;
desc->type = VSWAP_ZSWAP;
spin_unlock(&cluster->lock);
@@ -1850,6 +1882,22 @@ static unsigned short __vswap_cgroup_record(struct vswap_cluster *cluster,
return oldid;
}
+/*
+ * Clear swap cgroup for a range of swap entries.
+ * Entered with the cluster locked. Caller must be under rcu_read_lock().
+ */
+static void __vswap_swap_cgroup_clear(struct vswap_cluster *cluster,
+ swp_entry_t entry, unsigned int nr_ents)
+{
+ unsigned short id;
+ struct mem_cgroup *memcg;
+
+ id = __vswap_cgroup_record(cluster, entry, 0, nr_ents);
+ memcg = mem_cgroup_from_id(id);
+ if (memcg)
+ mem_cgroup_id_put_many(memcg, nr_ents);
+}
+
static unsigned short vswap_cgroup_record(swp_entry_t entry,
unsigned short memcgid, unsigned int nr_ents)
{
@@ -1955,6 +2003,11 @@ unsigned short lookup_swap_cgroup_id(swp_entry_t entry)
rcu_read_unlock();
return ret;
}
+#else /* !CONFIG_MEMCG */
+static void __vswap_swap_cgroup_clear(struct vswap_cluster *cluster,
+ swp_entry_t entry, unsigned int nr_ents)
+{
+}
#endif /* CONFIG_MEMCG */
int vswap_init(void)
--
2.52.0
next prev parent reply other threads:[~2026-03-20 19:28 UTC|newest]
Thread overview: 63+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-20 19:27 [PATCH v5 00/21] Virtual Swap Space Nhat Pham
2026-03-20 19:27 ` [PATCH v5 01/21] mm/swap: decouple swap cache from physical swap infrastructure Nhat Pham
2026-03-20 19:27 ` [PATCH v5 02/21] swap: rearrange the swap header file Nhat Pham
2026-03-20 19:27 ` [PATCH v5 03/21] mm: swap: add an abstract API for locking out swapoff Nhat Pham
2026-03-20 19:27 ` [PATCH v5 04/21] zswap: add new helpers for zswap entry operations Nhat Pham
2026-03-20 19:27 ` [PATCH v5 05/21] mm/swap: add a new function to check if a swap entry is in swap cached Nhat Pham
2026-03-20 19:27 ` [PATCH v5 06/21] mm: swap: add a separate type for physical swap slots Nhat Pham
2026-03-20 19:27 ` [PATCH v5 07/21] mm: create scaffolds for the new virtual swap implementation Nhat Pham
2026-03-20 19:27 ` [PATCH v5 08/21] zswap: prepare zswap for swap virtualization Nhat Pham
2026-03-20 19:27 ` [PATCH v5 09/21] mm: swap: allocate a virtual swap slot for each swapped out page Nhat Pham
2026-03-20 19:27 ` [PATCH v5 10/21] swap: move swap cache to virtual swap descriptor Nhat Pham
2026-03-20 19:27 ` [PATCH v5 11/21] zswap: move zswap entry management to the " Nhat Pham
2026-03-20 19:27 ` [PATCH v5 12/21] swap: implement the swap_cgroup API using virtual swap Nhat Pham
2026-03-20 19:27 ` [PATCH v5 13/21] swap: manage swap entry lifecycle at the virtual swap layer Nhat Pham
2026-03-20 19:27 ` [PATCH v5 14/21] mm: swap: decouple virtual swap slot from backing store Nhat Pham
2026-03-20 19:27 ` [PATCH v5 15/21] zswap: do not start zswap shrinker if there is no physical swap slots Nhat Pham
2026-03-20 19:27 ` [PATCH v5 16/21] swap: do not unnecesarily pin readahead swap entries Nhat Pham
2026-03-20 19:27 ` [PATCH v5 17/21] swapfile: remove zeromap bitmap Nhat Pham
2026-03-20 19:27 ` [PATCH v5 18/21] memcg: swap: only charge physical swap slots Nhat Pham
2026-03-20 19:27 ` [PATCH v5 19/21] swap: simplify swapoff using virtual swap Nhat Pham
2026-03-20 19:27 ` [PATCH v5 20/21] swapfile: replace the swap map with bitmaps Nhat Pham
2026-03-20 19:27 ` Nhat Pham [this message]
2026-03-21 18:22 ` [PATCH v5 00/21] Virtual Swap Space Andrew Morton
2026-03-22 2:18 ` Roman Gushchin
2026-03-23 10:08 ` Kairui Song
2026-03-23 15:32 ` Nhat Pham
2026-03-23 16:40 ` Kairui Song
2026-03-23 20:05 ` Nhat Pham
2026-04-14 17:23 ` Nhat Pham
2026-04-14 17:32 ` Nhat Pham
2026-04-16 18:46 ` Kairui Song
2026-03-25 18:53 ` YoungJun Park
2026-04-12 1:03 ` Nhat Pham
2026-04-14 3:09 ` YoungJun Park
2026-04-20 16:02 ` Nhat Pham
2026-03-24 13:19 ` Askar Safin
2026-03-24 17:23 ` Nhat Pham
2026-03-25 2:35 ` Askar Safin
2026-03-25 18:36 ` YoungJun Park
2026-04-12 1:40 ` Nhat Pham
2026-04-14 2:50 ` YoungJun Park
2026-04-14 3:28 ` Kairui Song
2026-04-14 16:35 ` Nhat Pham
2026-04-14 7:52 ` Christoph Hellwig
2026-04-22 0:26 ` Yosry Ahmed
2026-04-22 2:18 ` Kairui Song
2026-04-22 20:27 ` Yosry Ahmed
2026-04-23 6:16 ` Kairui Song
2026-04-23 20:47 ` Yosry Ahmed
2026-04-24 4:03 ` Gregory Price
2026-04-24 4:15 ` Kairui Song
2026-04-24 4:25 ` Kairui Song
2026-04-24 17:28 ` Nhat Pham
2026-04-24 18:04 ` Kairui Song
2026-04-24 18:08 ` Yosry Ahmed
2026-04-24 18:58 ` Kairui Song
2026-04-24 19:12 ` Yosry Ahmed
2026-04-24 19:51 ` Kairui Song
2026-04-27 18:23 ` Yosry Ahmed
2026-04-28 18:46 ` Kairui Song
2026-04-28 23:53 ` Yosry Ahmed
2026-05-01 13:42 ` Nhat Pham
2026-05-01 14:16 ` Nhat Pham
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260320192735.748051-22-nphamcs@gmail.com \
--to=nphamcs@gmail.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=apopple@nvidia.com \
--cc=axelrasmussen@google.com \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=bhe@redhat.com \
--cc=byungchul@sk.com \
--cc=cgroups@vger.kernel.org \
--cc=chengming.zhou@linux.dev \
--cc=chrisl@kernel.org \
--cc=corbet@lwn.net \
--cc=david@kernel.org \
--cc=dev.jain@arm.com \
--cc=gourry@gourry.net \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=jannh@google.com \
--cc=joshua.hahnjy@gmail.com \
--cc=kasong@tencent.com \
--cc=kernel-team@meta.com \
--cc=lance.yang@linux.dev \
--cc=lenb@kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-pm@vger.kernel.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=matthew.brost@intel.com \
--cc=mhocko@suse.com \
--cc=muchun.song@linux.dev \
--cc=npache@redhat.com \
--cc=pavel@kernel.org \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=pfalcato@suse.de \
--cc=rafael@kernel.org \
--cc=rakie.kim@sk.com \
--cc=riel@surriel.com \
--cc=roman.gushchin@linux.dev \
--cc=rppt@kernel.org \
--cc=ryan.roberts@arm.com \
--cc=shakeel.butt@linux.dev \
--cc=shikemeng@huaweicloud.com \
--cc=surenb@google.com \
--cc=tglx@kernel.org \
--cc=vbabka@suse.cz \
--cc=weixugc@google.com \
--cc=ying.huang@linux.alibaba.com \
--cc=yosry.ahmed@linux.dev \
--cc=yuanchu@google.com \
--cc=zhengqi.arch@bytedance.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.