* [PATCH] iomap: add allocation cache for iomap_dio
@ 2025-11-21 9:00 guzebing
2025-11-21 10:22 ` Christoph Hellwig
0 siblings, 1 reply; 2+ messages in thread
From: guzebing @ 2025-11-21 9:00 UTC (permalink / raw)
To: brauner, djwong
Cc: linux-xfs, linux-fsdevel, linux-kernel, guzebing, Fengnan Chang
From: guzebing <guzebing@bytedance.com>
As implemented by the bio structure, we do the same thing on the
iomap-dio structure. Add a per-cpu cache for iomap_dio allocations,
enabling us to quickly recycle them instead of going through the slab
allocator.
By making such changes, we can reduce memory allocation on the direct
IO path, so that direct IO will not block due to insufficient system
memory. In addition, for direct IO, the read performance of io_uring
is improved by about 2.6%.
Suggested-by: Fengnan Chang <changfengnan@bytedance.com>
Signed-off-by: guzebing <guzebing@bytedance.com>
---
fs/iomap/direct-io.c | 92 +++++++++++++++++++++++++++++++++++++++++---
1 file changed, 87 insertions(+), 5 deletions(-)
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 5d5d63efbd57..7a5c610ded7b 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -54,8 +54,84 @@ struct iomap_dio {
struct work_struct work;
} aio;
};
+ struct iomap_dio *dio_next; /* request queue link */
};
+#define DIO_ALLOC_CACHE_THRESHOLD 16
+#define DIO_ALLOC_CACHE_MAX 256
+struct dio_alloc_cache {
+ struct iomap_dio *free_list;
+ struct iomap_dio *free_list_irq;
+ int nr;
+ int nr_irq;
+};
+
+static struct dio_alloc_cache __percpu *dio_cache;
+
+static void dio_alloc_irq_cache_splice(struct dio_alloc_cache *cache)
+{
+ unsigned long flags;
+
+ /* cache->free_list must be empty */
+ if (WARN_ON_ONCE(cache->free_list))
+ return;
+
+ local_irq_save(flags);
+ cache->free_list = cache->free_list_irq;
+ cache->free_list_irq = NULL;
+ cache->nr += cache->nr_irq;
+ cache->nr_irq = 0;
+ local_irq_restore(flags);
+}
+
+static struct iomap_dio *dio_alloc_percpu_cache(void)
+{
+ struct dio_alloc_cache *cache;
+ struct iomap_dio *dio;
+
+ cache = per_cpu_ptr(dio_cache, get_cpu());
+ if (!cache->free_list) {
+ if (READ_ONCE(cache->nr_irq) >= DIO_ALLOC_CACHE_THRESHOLD)
+ dio_alloc_irq_cache_splice(cache);
+ if (!cache->free_list) {
+ put_cpu();
+ return NULL;
+ }
+ }
+ dio = cache->free_list;
+ cache->free_list = dio->dio_next;
+ cache->nr--;
+ put_cpu();
+ return dio;
+}
+
+static void dio_put_percpu_cache(struct iomap_dio *dio)
+{
+ struct dio_alloc_cache *cache;
+
+ cache = per_cpu_ptr(dio_cache, get_cpu());
+ if (READ_ONCE(cache->nr_irq) + cache->nr > DIO_ALLOC_CACHE_MAX)
+ goto out_free;
+
+ if (in_task()) {
+ dio->dio_next = cache->free_list;
+ cache->free_list = dio;
+ cache->nr++;
+ } else if (in_hardirq()) {
+ lockdep_assert_irqs_disabled();
+ dio->dio_next = cache->free_list_irq;
+ cache->free_list_irq = dio;
+ cache->nr_irq++;
+ } else {
+ goto out_free;
+ }
+ put_cpu();
+ return;
+out_free:
+ put_cpu();
+ kfree(dio);
+}
+
static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
{
@@ -135,7 +211,7 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
ret += dio->done_before;
}
trace_iomap_dio_complete(iocb, dio->error, ret);
- kfree(dio);
+ dio_put_percpu_cache(dio);
return ret;
}
EXPORT_SYMBOL_GPL(iomap_dio_complete);
@@ -620,9 +696,12 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!iomi.len)
return NULL;
- dio = kmalloc(sizeof(*dio), GFP_KERNEL);
- if (!dio)
- return ERR_PTR(-ENOMEM);
+ dio = dio_alloc_percpu_cache();
+ if (!dio) {
+ dio = kmalloc(sizeof(*dio), GFP_KERNEL);
+ if (!dio)
+ return ERR_PTR(-ENOMEM);
+ }
dio->iocb = iocb;
atomic_set(&dio->ref, 1);
@@ -804,7 +883,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
return dio;
out_free_dio:
- kfree(dio);
+ dio_put_percpu_cache(dio);
if (ret)
return ERR_PTR(ret);
return NULL;
@@ -833,6 +912,9 @@ static int __init iomap_dio_init(void)
if (!zero_page)
return -ENOMEM;
+ dio_cache = alloc_percpu(struct dio_alloc_cache);
+ if (!dio_cache)
+ return -ENOMEM;
return 0;
}
--
2.20.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] iomap: add allocation cache for iomap_dio
2025-11-21 9:00 [PATCH] iomap: add allocation cache for iomap_dio guzebing
@ 2025-11-21 10:22 ` Christoph Hellwig
0 siblings, 0 replies; 2+ messages in thread
From: Christoph Hellwig @ 2025-11-21 10:22 UTC (permalink / raw)
To: guzebing
Cc: brauner, djwong, linux-xfs, linux-fsdevel, linux-kernel, guzebing,
Fengnan Chang
On Fri, Nov 21, 2025 at 05:00:52PM +0800, guzebing wrote:
> From: guzebing <guzebing@bytedance.com>
>
> As implemented by the bio structure, we do the same thing on the
> iomap-dio structure. Add a per-cpu cache for iomap_dio allocations,
> enabling us to quickly recycle them instead of going through the slab
> allocator.
>
> By making such changes, we can reduce memory allocation on the direct
> IO path, so that direct IO will not block due to insufficient system
> memory. In addition, for direct IO, the read performance of io_uring
> is improved by about 2.6%.
Have you checked how much of that you'd get by using a dedicated
slab cache that should also do per-cpu allocations? Note that even
if we had a dedicated per-cpu cache we'd probably still want that.
Also any chance you could factor this into common code?
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2025-11-21 10:22 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-21 9:00 [PATCH] iomap: add allocation cache for iomap_dio guzebing
2025-11-21 10:22 ` Christoph Hellwig
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).