From: Christoph Hellwig <hch@lst.de>
Cc: baoquan.he@linux.dev, akpm@linux-foundation.org,
chrisl@kernel.org, usama.arif@linux.dev, kasong@tencent.com,
nphamcs@gmail.com, shikemeng@huaweicloud.com,
youngjun.park@lge.com, linux-mm@kvack.org
Subject: [PATCH 4/6] mm/swap: also use struct swap_iocb for block I/O
Date: Fri, 15 May 2026 14:00:09 +0200 [thread overview]
Message-ID: <20260515120019.4015143-5-hch@lst.de> (raw)
In-Reply-To: <20260515120019.4015143-1-hch@lst.de>
Block I/O benefits from batching just as much as remote file systems.
Extent struct swap_iocb to support building a bio on the fly as well,
and rewrite the block based swap code for it. This especially benefits
submit_bio based drivers that do not have the block plugging available,
but also saves allocating extra bios for blk-mq drivers.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
mm/page_io.c | 506 +++++++++++++++++++++++---------------------------
mm/swap.h | 1 +
mm/swapfile.c | 9 +-
3 files changed, 235 insertions(+), 281 deletions(-)
diff --git a/mm/page_io.c b/mm/page_io.c
index a78efc9909c8..bbd8cf47d20d 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -27,54 +27,6 @@
#include <linux/zswap.h>
#include "swap.h"
-static void __end_swap_bio_write(struct bio *bio)
-{
- struct folio *folio = bio_first_folio_all(bio);
-
- if (bio->bi_status) {
- /*
- * We failed to write the page out to swap-space.
- * Re-dirty the page in order to avoid it being reclaimed.
- * Also print a dire warning that things will go BAD (tm)
- * very quickly.
- *
- * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
- */
- folio_mark_dirty(folio);
- pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
- MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
- (unsigned long long)bio->bi_iter.bi_sector);
- folio_clear_reclaim(folio);
- }
- folio_end_writeback(folio);
-}
-
-static void end_swap_bio_write(struct bio *bio)
-{
- __end_swap_bio_write(bio);
- bio_put(bio);
-}
-
-static void __end_swap_bio_read(struct bio *bio)
-{
- struct folio *folio = bio_first_folio_all(bio);
-
- if (bio->bi_status) {
- pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
- MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
- (unsigned long long)bio->bi_iter.bi_sector);
- } else {
- folio_mark_uptodate(folio);
- }
- folio_unlock(folio);
-}
-
-static void end_swap_bio_read(struct bio *bio)
-{
- __end_swap_bio_read(bio);
- bio_put(bio);
-}
-
int generic_swapfile_activate(struct swap_info_struct *sis,
struct file *swap_file,
sector_t *span)
@@ -325,9 +277,12 @@ static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
struct swap_iocb {
- struct kiocb iocb;
+ union {
+ struct kiocb iocb;
+ struct bio bio;
+ };
struct bio_vec bvec[SWAP_CLUSTER_MAX];
- int pages;
+ int nr_vecs;
int len;
};
static mempool_t *sio_pool;
@@ -345,172 +300,68 @@ int sio_pool_init(void)
return 0;
}
-static void sio_write_complete(struct kiocb *iocb, long ret)
+static bool swap_can_merge(struct swap_io_ctx *ctx, struct folio *folio)
{
- struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
- struct page *page = sio->bvec[0].bv_page;
- int p;
+ struct swap_info_struct *sis = __swap_entry_to_info(folio->swap);
+ struct bio_vec *last_bv = &ctx->sio->bvec[ctx->sio->nr_vecs - 1];
+ struct folio *prev_folio = page_folio(last_bv->bv_page);
+ size_t prev_folio_size = folio_size(prev_folio);
- if (ret != sio->len) {
- /*
- * In the case of swap-over-nfs, this can be a
- * temporary failure if the system has limited
- * memory for allocating transmit buffers.
- * Mark the page dirty and avoid
- * folio_rotate_reclaimable but rate-limit the
- * messages.
- */
- pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
- ret, swap_dev_pos(page_swap_entry(page)));
- for (p = 0; p < sio->pages; p++) {
- page = sio->bvec[p].bv_page;
- set_page_dirty(page);
- ClearPageReclaim(page);
- }
- }
+ if (ctx->sis != sis)
+ return false;
- for (p = 0; p < sio->pages; p++)
- end_page_writeback(sio->bvec[p].bv_page);
+ if (sis->flags & SWP_FS_OPS) {
+ if (swap_dev_pos(folio->swap) !=
+ swap_dev_pos(prev_folio->swap) + prev_folio_size)
+ return false;
+ } else {
+ if (swap_folio_sector(folio) !=
+ swap_folio_sector(prev_folio) +
+ (prev_folio_size >> SECTOR_SHIFT))
+ return false;
+ }
- mempool_free(sio, sio_pool);
+ return true;
}
-static void swap_writepage_fs(struct swap_io_ctx *ctx, struct folio *folio)
+static void swap_add_page(struct swap_io_ctx *ctx, struct folio *folio, int rw)
{
- struct swap_iocb *sio = ctx->sio;
struct swap_info_struct *sis = __swap_entry_to_info(folio->swap);
- struct file *swap_file = sis->swap_file;
- loff_t pos = swap_dev_pos(folio->swap);
+ struct swap_iocb *sio = ctx->sio;
- count_swpout_vm_event(folio);
- folio_start_writeback(folio);
- folio_unlock(folio);
- if (sio) {
- if (sio->iocb.ki_filp != swap_file ||
- sio->iocb.ki_pos + sio->len != pos) {
+ if (sio && !swap_can_merge(ctx, folio)) {
+ if (rw == WRITE)
swap_write_submit(ctx);
- sio = NULL;
- }
+ else
+ swap_read_submit(ctx);
+ sio = ctx->sio;
}
+
if (!sio) {
- sio = mempool_alloc(sio_pool, GFP_NOIO);
- init_sync_kiocb(&sio->iocb, swap_file);
- sio->iocb.ki_complete = sio_write_complete;
- sio->iocb.ki_pos = pos;
- sio->pages = 0;
+ ctx->sis = sis;
+ ctx->sio = sio = mempool_alloc(sio_pool, GFP_NOIO);
+ sio->nr_vecs = 0;
sio->len = 0;
}
- bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+ bvec_set_folio(&sio->bvec[sio->nr_vecs], folio, folio_size(folio), 0);
sio->len += folio_size(folio);
- sio->pages += 1;
- if (sio->pages == ARRAY_SIZE(sio->bvec)) {
- swap_write_submit(ctx);
- sio = NULL;
+ sio->nr_vecs += 1;
+ if (sio->nr_vecs == ARRAY_SIZE(sio->bvec)) {
+ if (rw == WRITE)
+ swap_write_submit(ctx);
+ else
+ swap_read_submit(ctx);
}
- ctx->sio = sio;
}
-static void swap_writepage_bdev_sync(struct folio *folio,
- struct swap_info_struct *sis)
-{
- struct bio_vec bv;
- struct bio bio;
-
- bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_WRITE | REQ_SWAP);
- bio.bi_iter.bi_sector = swap_folio_sector(folio);
- bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
-
- bio_associate_blkg_from_page(&bio, folio);
- count_swpout_vm_event(folio);
-
- folio_start_writeback(folio);
- folio_unlock(folio);
-
- submit_bio_wait(&bio);
- __end_swap_bio_write(&bio);
-}
-
-static void swap_writepage_bdev_async(struct folio *folio,
- struct swap_info_struct *sis)
+void __swap_writepage(struct swap_io_ctx *ctx, struct folio *folio)
{
- struct bio *bio;
-
- bio = bio_alloc(sis->bdev, 1, REQ_OP_WRITE | REQ_SWAP, GFP_NOIO);
- bio->bi_iter.bi_sector = swap_folio_sector(folio);
- bio->bi_end_io = end_swap_bio_write;
- bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
+ VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
- bio_associate_blkg_from_page(bio, folio);
count_swpout_vm_event(folio);
folio_start_writeback(folio);
folio_unlock(folio);
- submit_bio(bio);
-}
-
-void __swap_writepage(struct swap_io_ctx *ctx, struct folio *folio)
-{
- struct swap_info_struct *sis = __swap_entry_to_info(folio->swap);
-
- VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
- /*
- * ->flags can be updated non-atomically,
- * but that will never affect SWP_FS_OPS, so the data_race
- * is safe.
- */
- if (data_race(sis->flags & SWP_FS_OPS))
- swap_writepage_fs(ctx, folio);
- /*
- * ->flags can be updated non-atomically,
- * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race
- * is safe.
- */
- else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO))
- swap_writepage_bdev_sync(folio, sis);
- else
- swap_writepage_bdev_async(folio, sis);
-}
-
-void swap_write_submit(struct swap_io_ctx *ctx)
-{
- struct iov_iter from;
- struct swap_iocb *sio = ctx->sio;
- struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
- int ret;
-
- if (!ctx)
- return;
-
- iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
- ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
- if (ret != -EIOCBQUEUED)
- sio_write_complete(&sio->iocb, ret);
- ctx->sio = NULL;
-}
-
-static void sio_read_complete(struct kiocb *iocb, long ret)
-{
- struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
- int p;
-
- if (ret == sio->len) {
- for (p = 0; p < sio->pages; p++) {
- struct folio *folio = page_folio(sio->bvec[p].bv_page);
-
- count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
- count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
- folio_mark_uptodate(folio);
- folio_unlock(folio);
- }
- count_vm_events(PSWPIN, sio->len >> PAGE_SHIFT);
- } else {
- for (p = 0; p < sio->pages; p++) {
- struct folio *folio = page_folio(sio->bvec[p].bv_page);
-
- folio_unlock(folio);
- }
- pr_alert_ratelimited("Read-error on swap-device\n");
- }
- mempool_free(sio, sio_pool);
+ swap_add_page(ctx, folio, WRITE);
}
static bool swap_read_folio_zeromap(struct folio *folio)
@@ -543,74 +394,6 @@ static bool swap_read_folio_zeromap(struct folio *folio)
return true;
}
-static void swap_read_folio_fs(struct swap_io_ctx *ctx, struct folio *folio)
-{
- struct swap_info_struct *sis = __swap_entry_to_info(folio->swap);
- struct swap_iocb *sio = ctx->sio;
- loff_t pos = swap_dev_pos(folio->swap);
-
- if (sio) {
- if (sio->iocb.ki_filp != sis->swap_file ||
- sio->iocb.ki_pos + sio->len != pos) {
- swap_read_submit(ctx);
- sio = NULL;
- }
- }
- if (!sio) {
- sio = mempool_alloc(sio_pool, GFP_KERNEL);
- init_sync_kiocb(&sio->iocb, sis->swap_file);
- sio->iocb.ki_pos = pos;
- sio->iocb.ki_complete = sio_read_complete;
- sio->pages = 0;
- sio->len = 0;
- }
- bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
- sio->len += folio_size(folio);
- sio->pages += 1;
- if (sio->pages == ARRAY_SIZE(sio->bvec)) {
- swap_read_submit(ctx);
- sio = NULL;
- }
- ctx->sio = sio;
-}
-
-static void swap_read_folio_bdev_sync(struct folio *folio,
- struct swap_info_struct *sis)
-{
- struct bio_vec bv;
- struct bio bio;
-
- bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
- bio.bi_iter.bi_sector = swap_folio_sector(folio);
- bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
- /*
- * Keep this task valid during swap readpage because the oom killer may
- * attempt to access it in the page fault retry time check.
- */
- get_task_struct(current);
- count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
- count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
- count_vm_events(PSWPIN, folio_nr_pages(folio));
- submit_bio_wait(&bio);
- __end_swap_bio_read(&bio);
- put_task_struct(current);
-}
-
-static void swap_read_folio_bdev_async(struct folio *folio,
- struct swap_info_struct *sis)
-{
- struct bio *bio;
-
- bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
- bio->bi_iter.bi_sector = swap_folio_sector(folio);
- bio->bi_end_io = end_swap_bio_read;
- bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
- count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
- count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
- count_vm_events(PSWPIN, folio_nr_pages(folio));
- submit_bio(bio);
-}
-
void swap_read_folio(struct swap_io_ctx *ctx, struct folio *folio)
{
struct swap_info_struct *sis = __swap_entry_to_info(folio->swap);
@@ -644,14 +427,7 @@ void swap_read_folio(struct swap_io_ctx *ctx, struct folio *folio)
/* We have to read from slower devices. Increase zswap protection. */
zswap_folio_swapin(folio);
-
- if (data_race(sis->flags & SWP_FS_OPS)) {
- swap_read_folio_fs(ctx, folio);
- } else if (synchronous) {
- swap_read_folio_bdev_sync(folio, sis);
- } else {
- swap_read_folio_bdev_async(folio, sis);
- }
+ swap_add_page(ctx, folio, READ);
finish:
if (workingset) {
@@ -661,19 +437,197 @@ void swap_read_folio(struct swap_io_ctx *ctx, struct folio *folio)
delayacct_swapin_end();
}
-void swap_read_submit(struct swap_io_ctx *ctx)
+static void sio_write_end(struct swap_iocb *sio, bool failed)
+{
+ int p;
+
+ for (p = 0; p < sio->nr_vecs; p++) {
+ struct page *page = sio->bvec[p].bv_page;
+
+ if (failed) {
+ set_page_dirty(page);
+ ClearPageReclaim(page);
+ }
+ end_page_writeback(page);
+ }
+ mempool_free(sio, sio_pool);
+}
+
+static void sio_write_complete(struct kiocb *iocb, long ret)
+{
+ struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
+ bool failed = ret != sio->len;
+
+ if (failed) {
+ struct page *page = sio->bvec[0].bv_page;
+
+ /*
+ * In the case of swap-over-nfs, this can be a temporary failure
+ * if the system has limited memory for allocating transmit
+ * buffers. Mark the page dirty and avoid
+ * folio_rotate_reclaimable but rate-limit the messages.
+ */
+ pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
+ ret, swap_dev_pos(page_swap_entry(page)));
+ }
+
+ sio_write_end(sio, failed);
+}
+
+static void end_swap_bio_write(struct bio *bio)
+{
+ struct swap_iocb *sio = container_of(bio, struct swap_iocb, bio);
+ bool failed = !!bio->bi_status;
+
+ if (failed)
+ pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
+ MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
+ (unsigned long long)bio->bi_iter.bi_sector);
+ sio_write_end(sio, failed);
+}
+
+static void sio_read_end(struct swap_iocb *sio)
+{
+ int p;
+
+ for (p = 0; p < sio->nr_vecs; p++) {
+ struct folio *folio = page_folio(sio->bvec[p].bv_page);
+
+ count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
+ count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
+ }
+ count_vm_events(PSWPIN, sio->len >> PAGE_SHIFT);
+ mempool_free(sio, sio_pool);
+}
+
+static void sio_read_fail(struct swap_iocb *sio)
+{
+ int p;
+
+ for (p = 0; p < sio->nr_vecs; p++)
+ folio_unlock(page_folio(sio->bvec[p].bv_page));
+ mempool_free(sio, sio_pool);
+}
+
+static void sio_read_complete(struct kiocb *iocb, long ret)
+{
+ struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
+
+ if (ret != sio->len) {
+ pr_alert_ratelimited("Read-error on swap-device\n");
+ sio_read_fail(sio);
+ return;
+ }
+
+ sio_read_end(sio);
+}
+
+static void end_swap_bio_read(struct bio *bio)
+{
+ struct swap_iocb *sio = container_of(bio, struct swap_iocb, bio);
+
+ if (bio->bi_status) {
+ pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
+ MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
+ (unsigned long long)bio->bi_iter.bi_sector);
+ sio_read_fail(sio);
+ return;
+ }
+
+ sio_read_end(sio);
+}
+
+static void swap_bdev_submit_write(struct swap_io_ctx *ctx)
+{
+ struct swap_iocb *sio = ctx->sio;
+ struct bio *bio = &sio->bio;
+
+ bio_init(bio, ctx->sis->bdev, sio->bvec, ARRAY_SIZE(sio->bvec),
+ REQ_OP_WRITE | REQ_SWAP);
+ bio->bi_iter.bi_size = sio->len;
+ bio->bi_iter.bi_sector = swap_folio_sector(bio_first_folio_all(bio));
+ bio_associate_blkg_from_page(bio, bio_first_folio_all(bio));
+
+ if (ctx->sis->flags & SWP_SYNCHRONOUS_IO) {
+ submit_bio_wait(bio);
+ end_swap_bio_write(bio);
+ } else {
+ bio->bi_end_io = end_swap_bio_write;
+ submit_bio(bio);
+ }
+}
+
+static void swap_bdev_submit_read(struct swap_io_ctx *ctx)
+{
+ struct swap_iocb *sio = ctx->sio;
+ struct bio *bio = &sio->bio;
+
+ bio_init(bio, ctx->sis->bdev, sio->bvec, ARRAY_SIZE(sio->bvec),
+ REQ_OP_READ);
+ bio->bi_iter.bi_size = sio->len;
+ bio->bi_iter.bi_sector = swap_folio_sector(bio_first_folio_all(bio));
+
+ if (ctx->sis->flags & SWP_SYNCHRONOUS_IO) {
+ /*
+ * Keep this task valid during swap readpage because the oom
+ * killer may attempt to access it in the page fault retry
+ * time check.
+ */
+ get_task_struct(current);
+ submit_bio_wait(bio);
+ end_swap_bio_read(bio);
+ put_task_struct(current);
+ } else {
+ bio->bi_end_io = end_swap_bio_read;
+ submit_bio(bio);
+ }
+}
+
+static void swap_fs_submit(struct swap_io_ctx *ctx, int rw)
{
- struct iov_iter from;
struct swap_iocb *sio = ctx->sio;
struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
+ struct iov_iter iter;
int ret;
- if (!sio)
- return;
+ init_sync_kiocb(&sio->iocb, ctx->sis->swap_file);
+ sio->iocb.ki_pos = swap_dev_pos(page_folio(sio->bvec[0].bv_page)->swap);
+ if (rw == WRITE)
+ sio->iocb.ki_complete = sio_write_complete;
+ else
+ sio->iocb.ki_complete = sio_read_complete;
- iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
- ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
+ iov_iter_bvec(&iter, rw == WRITE ? ITER_SOURCE : ITER_DEST,
+ sio->bvec, sio->nr_vecs, sio->len);
+ ret = mapping->a_ops->swap_rw(&sio->iocb, &iter);
if (ret != -EIOCBQUEUED)
- sio_read_complete(&sio->iocb, ret);
+ sio->iocb.ki_complete(&sio->iocb, ret);
+}
+
+void swap_write_submit(struct swap_io_ctx *ctx)
+{
+ if (!ctx->sio)
+ return;
+
+ if (ctx->sis->flags & SWP_FS_OPS)
+ swap_fs_submit(ctx, WRITE);
+ else
+ swap_bdev_submit_write(ctx);
+ ctx->sio = NULL;
+ ctx->sis = NULL;
+}
+
+void swap_read_submit(struct swap_io_ctx *ctx)
+{
+ if (!ctx->sio)
+ return;
+
+ if (ctx->sis->flags & SWP_FS_OPS)
+ swap_fs_submit(ctx, READ);
+ else
+ swap_bdev_submit_read(ctx);
ctx->sio = NULL;
+ ctx->sis = NULL;
}
diff --git a/mm/swap.h b/mm/swap.h
index 3ec35b6d629f..b359735be3c5 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -55,6 +55,7 @@ enum swap_cluster_flags {
struct swap_io_ctx {
struct swap_iocb *sio;
+ struct swap_info_struct *sis;
};
#ifdef CONFIG_SWAP
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9174f1eeffb0..27dbce0d1e1e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2781,6 +2781,10 @@ static int setup_swap_extents(struct swap_info_struct *sis,
struct inode *inode = mapping->host;
int ret;
+ ret = sio_pool_init();
+ if (ret)
+ return ret;
+
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
@@ -2792,11 +2796,6 @@ static int setup_swap_extents(struct swap_info_struct *sis,
if (ret < 0)
return ret;
sis->flags |= SWP_ACTIVATED;
- if ((sis->flags & SWP_FS_OPS) &&
- sio_pool_init() != 0) {
- destroy_swap_extents(sis, swap_file);
- return -ENOMEM;
- }
return ret;
}
--
2.53.0
next prev parent reply other threads:[~2026-05-15 12:00 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-15 12:00 RFC: better block swap batching and a different take on swap_ops Christoph Hellwig
2026-05-15 12:00 ` [PATCH 1/6] shmem: provide a shmem_write_folio wrapper Christoph Hellwig
2026-05-15 12:00 ` [PATCH 2/6] mm: merge writeout into pageout Christoph Hellwig
2026-05-15 12:00 ` [PATCH 3/6] mm/swap: intoduce struct swap_io_ctx Christoph Hellwig
2026-05-15 12:00 ` Christoph Hellwig [this message]
2026-05-15 12:00 ` [PATCH 5/6] mm/swap: use swap_ops to register swap device's methods Christoph Hellwig
2026-05-15 12:00 ` [PATCH 6/6] mm/swap: remove SWP_FS_OPS Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260515120019.4015143-5-hch@lst.de \
--to=hch@lst.de \
--cc=akpm@linux-foundation.org \
--cc=baoquan.he@linux.dev \
--cc=chrisl@kernel.org \
--cc=kasong@tencent.com \
--cc=linux-mm@kvack.org \
--cc=nphamcs@gmail.com \
--cc=shikemeng@huaweicloud.com \
--cc=usama.arif@linux.dev \
--cc=youngjun.park@lge.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox