From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
Jan Kara <jack@suse.cz>,
William Kucharski <william.kucharski@oracle.com>
Subject: [PATCH 46/48] truncate,shmem: Handle truncates that split large folios
Date: Wed, 8 Dec 2021 04:22:54 +0000 [thread overview]
Message-ID: <20211208042256.1923824-47-willy@infradead.org> (raw)
In-Reply-To: <20211208042256.1923824-1-willy@infradead.org>
Handle folio splitting in the parts of the truncation functions which
already handle partial pages. Factor all that code out into a new
function called truncate_inode_partial_folio().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
mm/internal.h | 2 +
mm/shmem.c | 107 ++++++++++++++++++---------------------------
mm/truncate.c | 118 ++++++++++++++++++++++++++++++++------------------
3 files changed, 120 insertions(+), 107 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 7759d4ff3323..e989d8ceec91 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -119,6 +119,8 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
+bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
+ loff_t end);
/**
* folio_evictable - Test whether a folio is evictable.
diff --git a/mm/shmem.c b/mm/shmem.c
index bbfa2d05e787..7f0b07845c1f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -151,6 +151,19 @@ int shmem_getpage(struct inode *inode, pgoff_t index,
mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
}
+int shmem_get_folio(struct inode *inode, pgoff_t index,
+ struct folio **foliop, enum sgp_type sgp)
+{
+ struct page *page = NULL;
+ int ret = shmem_getpage(inode, index, &page, sgp);
+
+ if (page)
+ *foliop = page_folio(page);
+ else
+ *foliop = NULL;
+ return ret;
+}
+
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
{
return sb->s_fs_info;
@@ -880,32 +893,6 @@ void shmem_unlock_mapping(struct address_space *mapping)
}
}
-/*
- * Check whether a hole-punch or truncation needs to split a huge page,
- * returning true if no split was required, or the split has been successful.
- *
- * Eviction (or truncation to 0 size) should never need to split a huge page;
- * but in rare cases might do so, if shmem_undo_range() failed to trylock on
- * head, and then succeeded to trylock on tail.
- *
- * A split can only succeed when there are no additional references on the
- * huge page: so the split below relies upon find_get_entries() having stopped
- * when it found a subpage of the huge page, without getting further references.
- */
-static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
-{
- if (!PageTransCompound(page))
- return true;
-
- /* Just proceed to delete a huge page wholly within the range punched */
- if (PageHead(page) &&
- page->index >= start && page->index + HPAGE_PMD_NR <= end)
- return true;
-
- /* Try to split huge page, so we can truly punch the hole or truncate */
- return split_huge_page(page) >= 0;
-}
-
/*
* Remove range of pages and swap entries from page cache, and free them.
* If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
@@ -917,13 +904,13 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
pgoff_t end = (lend + 1) >> PAGE_SHIFT;
- unsigned int partial_start = lstart & (PAGE_SIZE - 1);
- unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
struct folio_batch fbatch;
pgoff_t indices[PAGEVEC_SIZE];
+ struct folio *folio;
long nr_swaps_freed = 0;
pgoff_t index;
int i;
+ bool partial_end;
if (lend == -1)
end = -1; /* unsigned, so actually very big */
@@ -959,33 +946,34 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index++;
}
- if (partial_start) {
- struct page *page = NULL;
- shmem_getpage(inode, start - 1, &page, SGP_READ);
- if (page) {
- unsigned int top = PAGE_SIZE;
- if (start > end) {
- top = partial_end;
- partial_end = 0;
- }
- zero_user_segment(page, partial_start, top);
- set_page_dirty(page);
- unlock_page(page);
- put_page(page);
+ partial_end = ((lend + 1) % PAGE_SIZE) > 0;
+ shmem_get_folio(inode, lstart >> PAGE_SHIFT, &folio, SGP_READ);
+ if (folio) {
+ bool same_page;
+
+ same_page = lend < folio_pos(folio) + folio_size(folio);
+ if (same_page)
+ partial_end = false;
+ folio_mark_dirty(folio);
+ if (!truncate_inode_partial_folio(folio, lstart, lend)) {
+ start = folio->index + folio_nr_pages(folio);
+ if (same_page)
+ end = folio->index;
}
+ folio_unlock(folio);
+ folio_put(folio);
+ folio = NULL;
}
- if (partial_end) {
- struct page *page = NULL;
- shmem_getpage(inode, end, &page, SGP_READ);
- if (page) {
- zero_user_segment(page, 0, partial_end);
- set_page_dirty(page);
- unlock_page(page);
- put_page(page);
- }
+
+ if (partial_end)
+ shmem_get_folio(inode, end, &folio, SGP_READ);
+ if (folio) {
+ folio_mark_dirty(folio);
+ if (!truncate_inode_partial_folio(folio, lstart, lend))
+ end = folio->index;
+ folio_unlock(folio);
+ folio_put(folio);
}
- if (start >= end)
- return;
index = start;
while (index < end) {
@@ -1019,8 +1007,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
folio_lock(folio);
if (!unfalloc || !folio_test_uptodate(folio)) {
- struct page *page = folio_file_page(folio,
- index);
if (folio_mapping(folio) != mapping) {
/* Page was replaced by swap: retry */
folio_unlock(folio);
@@ -1029,18 +1015,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
}
VM_BUG_ON_FOLIO(folio_test_writeback(folio),
folio);
- if (shmem_punch_compound(page, start, end))
- truncate_inode_folio(mapping, folio);
- else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
- /* Wipe the page and don't get stuck */
- clear_highpage(page);
- flush_dcache_page(page);
- folio_mark_dirty(folio);
- if (index <
- round_up(start, HPAGE_PMD_NR))
- start = index + 1;
- }
+ truncate_inode_folio(mapping, folio);
}
+ index = folio->index + folio_nr_pages(folio) - 1;
folio_unlock(folio);
}
folio_batch_remove_exceptionals(&fbatch);
diff --git a/mm/truncate.c b/mm/truncate.c
index 2d1dae085acb..336c8d099efa 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -228,6 +228,58 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
return 0;
}
+/*
+ * Handle partial folios. The folio may be entirely within the
+ * range if a split has raced with us. If not, we zero the part of the
+ * folio that's within the [start, end] range, and then split the folio if
+ * it's large. split_page_range() will discard pages which now lie beyond
+ * i_size, and we rely on the caller to discard pages which lie within a
+ * newly created hole.
+ *
+ * Returns false if splitting failed so the caller can avoid
+ * discarding the entire folio which is stubbornly unsplit.
+ */
+bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
+{
+ loff_t pos = folio_pos(folio);
+ unsigned int offset, length;
+
+ if (pos < start)
+ offset = start - pos;
+ else
+ offset = 0;
+ length = folio_size(folio);
+ if (pos + length <= (u64)end)
+ length = length - offset;
+ else
+ length = end + 1 - pos - offset;
+
+ folio_wait_writeback(folio);
+ if (length == folio_size(folio)) {
+ truncate_inode_folio(folio->mapping, folio);
+ return true;
+ }
+
+ /*
+ * We may be zeroing pages we're about to discard, but it avoids
+ * doing a complex calculation here, and then doing the zeroing
+ * anyway if the page split fails.
+ */
+ folio_zero_range(folio, offset, length);
+
+ cleancache_invalidate_page(folio->mapping, &folio->page);
+ if (folio_has_private(folio))
+ do_invalidatepage(&folio->page, offset, length);
+ if (!folio_test_large(folio))
+ return true;
+ if (split_huge_page(&folio->page) == 0)
+ return true;
+ if (folio_test_dirty(folio))
+ return false;
+ truncate_inode_folio(folio->mapping, folio);
+ return true;
+}
+
/*
* Used to get rid of pages on hardware memory corruption.
*/
@@ -294,20 +346,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
{
pgoff_t start; /* inclusive */
pgoff_t end; /* exclusive */
- unsigned int partial_start; /* inclusive */
- unsigned int partial_end; /* exclusive */
struct folio_batch fbatch;
pgoff_t indices[PAGEVEC_SIZE];
pgoff_t index;
int i;
+ struct folio * folio;
+ bool partial_end;
if (mapping_empty(mapping))
goto out;
- /* Offsets within partial pages */
- partial_start = lstart & (PAGE_SIZE - 1);
- partial_end = (lend + 1) & (PAGE_SIZE - 1);
-
/*
* 'start' and 'end' always covers the range of pages to be fully
* truncated. Partial pages are covered with 'partial_start' at the
@@ -340,47 +388,33 @@ void truncate_inode_pages_range(struct address_space *mapping,
cond_resched();
}
- if (partial_start) {
- struct page *page = find_lock_page(mapping, start - 1);
- if (page) {
- unsigned int top = PAGE_SIZE;
- if (start > end) {
- /* Truncation within a single page */
- top = partial_end;
- partial_end = 0;
- }
- wait_on_page_writeback(page);
- zero_user_segment(page, partial_start, top);
- cleancache_invalidate_page(mapping, page);
- if (page_has_private(page))
- do_invalidatepage(page, partial_start,
- top - partial_start);
- unlock_page(page);
- put_page(page);
+ partial_end = ((lend + 1) % PAGE_SIZE) > 0;
+ folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
+ if (folio) {
+ bool same_folio = lend < folio_pos(folio) + folio_size(folio);
+ if (same_folio)
+ partial_end = false;
+ if (!truncate_inode_partial_folio(folio, lstart, lend)) {
+ start = folio->index + folio_nr_pages(folio);
+ if (same_folio)
+ end = folio->index;
}
+ folio_unlock(folio);
+ folio_put(folio);
+ folio = NULL;
}
- if (partial_end) {
- struct page *page = find_lock_page(mapping, end);
- if (page) {
- wait_on_page_writeback(page);
- zero_user_segment(page, 0, partial_end);
- cleancache_invalidate_page(mapping, page);
- if (page_has_private(page))
- do_invalidatepage(page, 0,
- partial_end);
- unlock_page(page);
- put_page(page);
- }
+
+ if (partial_end)
+ folio = __filemap_get_folio(mapping, end, FGP_LOCK, 0);
+ if (folio) {
+ if (!truncate_inode_partial_folio(folio, lstart, lend))
+ end = folio->index;
+ folio_unlock(folio);
+ folio_put(folio);
}
- /*
- * If the truncation happened within a single page no pages
- * will be released, just zeroed, so we can bail out now.
- */
- if (start >= end)
- goto out;
index = start;
- for ( ; ; ) {
+ while (index < end) {
cond_resched();
if (!find_get_entries(mapping, index, end - 1, &fbatch,
indices)) {
--
2.33.0
next prev parent reply other threads:[~2021-12-08 4:24 UTC|newest]
Thread overview: 126+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-08 4:22 [PATCH 00/48] Folios for 5.17 Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 01/48] filemap: Remove PageHWPoison check from next_uptodate_page() Matthew Wilcox (Oracle)
2021-12-23 6:48 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 02/48] fs/writeback: Convert inode_switch_wbs_work_fn to folios Matthew Wilcox (Oracle)
2021-12-23 6:50 ` Christoph Hellwig
2021-12-23 13:50 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 03/48] mm/doc: Add documentation for folio_test_uptodate Matthew Wilcox (Oracle)
2021-12-23 6:51 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 04/48] mm/writeback: Improve __folio_mark_dirty() comment Matthew Wilcox (Oracle)
2021-12-23 6:52 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 05/48] pagevec: Add folio_batch Matthew Wilcox (Oracle)
2021-12-23 6:54 ` Christoph Hellwig
2021-12-23 14:18 ` Matthew Wilcox
2021-12-24 6:13 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 06/48] iov_iter: Add copy_folio_to_iter() Matthew Wilcox (Oracle)
2021-12-23 6:55 ` Christoph Hellwig
2021-12-23 14:22 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 07/48] iov_iter: Convert iter_xarray to use folios Matthew Wilcox (Oracle)
2021-12-23 6:57 ` Christoph Hellwig
2021-12-23 14:31 ` Matthew Wilcox
2021-12-23 15:24 ` David Howells
2021-12-24 6:14 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 08/48] mm: Add folio_test_pmd_mappable() Matthew Wilcox (Oracle)
2021-12-23 6:58 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 09/48] filemap: Add folio_put_wait_locked() Matthew Wilcox (Oracle)
2021-12-23 7:00 ` Christoph Hellwig
2021-12-23 14:32 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 10/48] filemap: Convert page_cache_delete to take a folio Matthew Wilcox (Oracle)
2021-12-23 7:01 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 11/48] filemap: Add filemap_unaccount_folio() Matthew Wilcox (Oracle)
2021-12-23 7:03 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 12/48] filemap: Convert tracing of page cache operations to folio Matthew Wilcox (Oracle)
2021-12-23 7:04 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 13/48] filemap: Add filemap_remove_folio and __filemap_remove_folio Matthew Wilcox (Oracle)
2021-12-23 7:06 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 14/48] filemap: Convert find_get_entry to return a folio Matthew Wilcox (Oracle)
2021-12-23 7:08 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 15/48] filemap: Remove thp_contains() Matthew Wilcox (Oracle)
2021-12-23 7:09 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 16/48] filemap: Convert filemap_get_read_batch to use folios Matthew Wilcox (Oracle)
2021-12-23 7:10 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 17/48] filemap: Convert find_get_pages_contig to folios Matthew Wilcox (Oracle)
2021-12-23 7:16 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 18/48] filemap: Convert filemap_read_page to take a folio Matthew Wilcox (Oracle)
2021-12-23 7:16 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 19/48] filemap: Convert filemap_create_page to folio Matthew Wilcox (Oracle)
2021-12-23 7:17 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 20/48] filemap: Convert filemap_range_uptodate to folios Matthew Wilcox (Oracle)
2021-12-23 7:18 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 21/48] readahead: Convert page_cache_async_ra() to take a folio Matthew Wilcox (Oracle)
2021-12-23 7:19 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 22/48] readahead: Convert page_cache_ra_unbounded to folios Matthew Wilcox (Oracle)
2021-12-23 7:19 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 23/48] filemap: Convert do_async_mmap_readahead to take a folio Matthew Wilcox (Oracle)
2021-12-23 7:23 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 24/48] filemap: Convert filemap_fault to folio Matthew Wilcox (Oracle)
2021-12-23 7:25 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 25/48] filemap: Add read_cache_folio and read_mapping_folio Matthew Wilcox (Oracle)
2021-12-23 7:39 ` Christoph Hellwig
2021-12-23 15:18 ` Matthew Wilcox
2021-12-23 16:20 ` Matthew Wilcox
2021-12-23 18:36 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 26/48] filemap: Convert filemap_get_pages to use folios Matthew Wilcox (Oracle)
2021-12-23 7:40 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 27/48] filemap: Convert page_cache_delete_batch to folios Matthew Wilcox (Oracle)
2021-12-23 7:40 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 28/48] filemap: Use folios in next_uptodate_page Matthew Wilcox (Oracle)
2021-12-23 8:20 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 29/48] filemap: Use a folio in filemap_map_pages Matthew Wilcox (Oracle)
2021-12-23 8:21 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 30/48] filemap: Use a folio in filemap_page_mkwrite Matthew Wilcox (Oracle)
2021-12-23 8:21 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 31/48] filemap: Add filemap_release_folio() Matthew Wilcox (Oracle)
2021-12-23 8:21 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 32/48] truncate: Add truncate_cleanup_folio() Matthew Wilcox (Oracle)
2021-12-23 8:21 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 33/48] mm: Add unmap_mapping_folio() Matthew Wilcox (Oracle)
2021-12-23 7:36 ` Christoph Hellwig
2022-01-02 16:11 ` Matthew Wilcox
2022-01-03 7:53 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 34/48] shmem: Convert part of shmem_undo_range() to use a folio Matthew Wilcox (Oracle)
2021-12-23 7:39 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 35/48] truncate,shmem: Add truncate_inode_folio() Matthew Wilcox (Oracle)
2021-12-23 8:21 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 36/48] truncate: Skip known-truncated indices Matthew Wilcox (Oracle)
2021-12-23 8:21 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 37/48] truncate: Convert invalidate_inode_pages2_range() to use a folio Matthew Wilcox (Oracle)
2021-12-23 8:21 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 38/48] truncate: Add invalidate_complete_folio2() Matthew Wilcox (Oracle)
2021-12-23 8:21 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 39/48] filemap: Convert filemap_read() to use a folio Matthew Wilcox (Oracle)
2021-12-23 8:22 ` Christoph Hellwig
2022-01-01 16:14 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 40/48] filemap: Convert filemap_get_read_batch() to use a folio_batch Matthew Wilcox (Oracle)
2021-12-23 8:22 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 41/48] filemap: Return only folios from find_get_entries() Matthew Wilcox (Oracle)
2021-12-23 8:22 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 42/48] mm: Convert find_lock_entries() to use a folio_batch Matthew Wilcox (Oracle)
2021-12-08 11:29 ` kernel test robot
2021-12-08 14:30 ` Matthew Wilcox
2021-12-23 8:22 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 43/48] mm: Remove pagevec_remove_exceptionals() Matthew Wilcox (Oracle)
2021-12-23 8:22 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 44/48] fs: Convert vfs_dedupe_file_range_compare to folios Matthew Wilcox (Oracle)
2021-12-23 8:22 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 45/48] truncate: Convert invalidate_inode_pages2_range " Matthew Wilcox (Oracle)
2021-12-23 8:22 ` Christoph Hellwig
2021-12-08 4:22 ` Matthew Wilcox (Oracle) [this message]
2021-12-08 16:43 ` [PATCH 46/48] truncate,shmem: Handle truncates that split large folios Matthew Wilcox
2021-12-23 8:43 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 47/48] XArray: Add xas_advance() Matthew Wilcox (Oracle)
2021-12-23 8:29 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 48/48] mm: Use multi-index entries in the page cache Matthew Wilcox (Oracle)
2021-12-23 8:47 ` Christoph Hellwig
2021-12-26 22:26 ` [PATCH 00/48] Folios for 5.17 William Kucharski
2022-01-03 1:27 ` Matthew Wilcox
2022-01-03 19:28 ` William Kucharski
2022-01-02 16:19 ` Matthew Wilcox
2022-01-02 23:46 ` William Kucharski
2022-01-03 1:29 ` Hugh Dickins
2022-01-03 1:44 ` Matthew Wilcox
2022-01-03 9:29 ` Christoph Hellwig
2022-01-08 5:32 ` Matthew Wilcox
2022-01-08 16:47 ` Hugh Dickins
2022-01-08 16:53 ` Matthew Wilcox
2022-01-08 17:20 ` Hugh Dickins
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211208042256.1923824-47-willy@infradead.org \
--to=willy@infradead.org \
--cc=jack@suse.cz \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=william.kucharski@oracle.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).