* [PATCH] ext4: optimize ext4_end_io memory usage
@ 2013-04-01 5:17 Dmitry Monakhov
2013-04-05 9:35 ` Jan Kara
0 siblings, 1 reply; 4+ messages in thread
From: Dmitry Monakhov @ 2013-04-01 5:17 UTC (permalink / raw)
To: linux-ext4; +Cc: Dmitry Monakhov
ext4_end_io->pages array has is used only for buffered writes and usless
in case of DIO. This patch allow us to save 1K for each DIO request.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/ext4/ext4.h | 4 ++--
fs/ext4/inode.c | 3 +--
fs/ext4/page-io.c | 39 ++++++++++++++++++++++++++++++++-------
3 files changed, 35 insertions(+), 11 deletions(-)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 73f3e60..4c803af 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -219,7 +219,7 @@ typedef struct ext4_io_end {
struct kiocb *iocb; /* iocb struct for AIO */
int result; /* error value for AIO */
int num_io_pages; /* for writepages() */
- struct ext4_io_page *pages[MAX_IO_PAGES]; /* for writepages() */
+ struct ext4_io_page **pages; /* for writepages() */
} ext4_io_end_t;
struct ext4_io_submit {
@@ -2622,7 +2622,7 @@ extern void ext4_add_complete_io(ext4_io_end_t *io_end);
extern void ext4_exit_pageio(void);
extern void ext4_ioend_shutdown(struct inode *);
extern void ext4_free_io_end(ext4_io_end_t *io);
-extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
+extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, int directio, gfp_t flags);
extern void ext4_end_io_work(struct work_struct *work);
extern void ext4_io_submit(struct ext4_io_submit *io);
extern int ext4_bio_write_page(struct ext4_io_submit *io,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index f455ac0..840a23e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3162,12 +3162,11 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
iocb->private = NULL;
ext4_inode_aio_set(inode, NULL);
if (!is_sync_kiocb(iocb)) {
- ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
+ ext4_io_end_t *io_end = ext4_init_io_end(inode, 1, GFP_NOFS);
if (!io_end) {
ret = -ENOMEM;
goto retake_lock;
}
- io_end->flag |= EXT4_IO_END_DIRECT;
iocb->private = io_end;
/*
* we save the io structure for current async direct
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 047a6de..1b8ec50 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -29,16 +29,26 @@
#include "xattr.h"
#include "acl.h"
-static struct kmem_cache *io_page_cachep, *io_end_cachep;
+static struct kmem_cache *io_page_cachep, *io_pgvec_cachep, *io_end_cachep;
int __init ext4_init_pageio(void)
{
io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
if (io_page_cachep == NULL)
return -ENOMEM;
+
+ io_pgvec_cachep = kmem_cache_create("ext4_io_pgvec",
+ sizeof(struct ext4_io_page*)
+ * MAX_IO_PAGES,
+ 0, (SLAB_RECLAIM_ACCOUNT), NULL);
+ if (io_pgvec_cachep == NULL) {
+ kmem_cache_destroy(io_page_cachep);
+ return -ENOMEM;
+ }
io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
if (io_end_cachep == NULL) {
kmem_cache_destroy(io_page_cachep);
+ kmem_cache_destroy(io_pgvec_cachep);
return -ENOMEM;
}
return 0;
@@ -47,6 +57,7 @@ int __init ext4_init_pageio(void)
void ext4_exit_pageio(void)
{
kmem_cache_destroy(io_end_cachep);
+ kmem_cache_destroy(io_pgvec_cachep);
kmem_cache_destroy(io_page_cachep);
}
@@ -83,12 +94,15 @@ void ext4_free_io_end(ext4_io_end_t *io)
BUG_ON(!io);
BUG_ON(!list_empty(&io->list));
BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
+ BUG_ON(io->num_io_pages && !io->pages);
for (i = 0; i < io->num_io_pages; i++)
put_io_page(io->pages[i]);
io->num_io_pages = 0;
if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
wake_up_all(ext4_ioend_wq(io->inode));
+ if (io->pages)
+ kmem_cache_free(io_pgvec_cachep, io->pages);
kmem_cache_free(io_end_cachep, io);
}
@@ -212,14 +226,25 @@ int ext4_flush_unwritten_io(struct inode *inode)
return ret;
}
-ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
+ext4_io_end_t *ext4_init_io_end(struct inode *inode, int directio, gfp_t flags)
{
ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
- if (io) {
- atomic_inc(&EXT4_I(inode)->i_ioend_count);
- io->inode = inode;
- INIT_LIST_HEAD(&io->list);
+
+ if (!io)
+ return NULL;
+
+ if (directio) {
+ io->flag = EXT4_IO_END_DIRECT;
+ } else {
+ io->pages = kmem_cache_zalloc(io_pgvec_cachep, flags);
+ if (!io->pages) {
+ kmem_cache_free(io_end_cachep, io);
+ return NULL;
+ }
}
+ atomic_inc(&EXT4_I(inode)->i_ioend_count);
+ io->inode = inode;
+ INIT_LIST_HEAD(&io->list);
return io;
}
@@ -327,7 +352,7 @@ static int io_submit_init(struct ext4_io_submit *io,
int nvecs = bio_get_nr_vecs(bh->b_bdev);
struct bio *bio;
- io_end = ext4_init_io_end(inode, GFP_NOFS);
+ io_end = ext4_init_io_end(inode, 0, GFP_NOFS);
if (!io_end)
return -ENOMEM;
bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
--
1.7.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] ext4: optimize ext4_end_io memory usage
2013-04-01 5:17 [PATCH] ext4: optimize ext4_end_io memory usage Dmitry Monakhov
@ 2013-04-05 9:35 ` Jan Kara
2013-04-08 17:10 ` Theodore Ts'o
0 siblings, 1 reply; 4+ messages in thread
From: Jan Kara @ 2013-04-05 9:35 UTC (permalink / raw)
To: Dmitry Monakhov; +Cc: linux-ext4
On Mon 01-04-13 09:17:48, Dmitry Monakhov wrote:
> ext4_end_io->pages array has is used only for buffered writes and usless
> in case of DIO. This patch allow us to save 1K for each DIO request.
I have actually a more complete solution to this in my patch queue -
there isn't any need to reference pages from io_end after my patches.
Hopefully I'll be able to send things out later today...
Honza
> Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
> ---
> fs/ext4/ext4.h | 4 ++--
> fs/ext4/inode.c | 3 +--
> fs/ext4/page-io.c | 39 ++++++++++++++++++++++++++++++++-------
> 3 files changed, 35 insertions(+), 11 deletions(-)
>
> diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
> index 73f3e60..4c803af 100644
> --- a/fs/ext4/ext4.h
> +++ b/fs/ext4/ext4.h
> @@ -219,7 +219,7 @@ typedef struct ext4_io_end {
> struct kiocb *iocb; /* iocb struct for AIO */
> int result; /* error value for AIO */
> int num_io_pages; /* for writepages() */
> - struct ext4_io_page *pages[MAX_IO_PAGES]; /* for writepages() */
> + struct ext4_io_page **pages; /* for writepages() */
> } ext4_io_end_t;
>
> struct ext4_io_submit {
> @@ -2622,7 +2622,7 @@ extern void ext4_add_complete_io(ext4_io_end_t *io_end);
> extern void ext4_exit_pageio(void);
> extern void ext4_ioend_shutdown(struct inode *);
> extern void ext4_free_io_end(ext4_io_end_t *io);
> -extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
> +extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, int directio, gfp_t flags);
> extern void ext4_end_io_work(struct work_struct *work);
> extern void ext4_io_submit(struct ext4_io_submit *io);
> extern int ext4_bio_write_page(struct ext4_io_submit *io,
> diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
> index f455ac0..840a23e 100644
> --- a/fs/ext4/inode.c
> +++ b/fs/ext4/inode.c
> @@ -3162,12 +3162,11 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
> iocb->private = NULL;
> ext4_inode_aio_set(inode, NULL);
> if (!is_sync_kiocb(iocb)) {
> - ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
> + ext4_io_end_t *io_end = ext4_init_io_end(inode, 1, GFP_NOFS);
> if (!io_end) {
> ret = -ENOMEM;
> goto retake_lock;
> }
> - io_end->flag |= EXT4_IO_END_DIRECT;
> iocb->private = io_end;
> /*
> * we save the io structure for current async direct
> diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
> index 047a6de..1b8ec50 100644
> --- a/fs/ext4/page-io.c
> +++ b/fs/ext4/page-io.c
> @@ -29,16 +29,26 @@
> #include "xattr.h"
> #include "acl.h"
>
> -static struct kmem_cache *io_page_cachep, *io_end_cachep;
> +static struct kmem_cache *io_page_cachep, *io_pgvec_cachep, *io_end_cachep;
>
> int __init ext4_init_pageio(void)
> {
> io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
> if (io_page_cachep == NULL)
> return -ENOMEM;
> +
> + io_pgvec_cachep = kmem_cache_create("ext4_io_pgvec",
> + sizeof(struct ext4_io_page*)
> + * MAX_IO_PAGES,
> + 0, (SLAB_RECLAIM_ACCOUNT), NULL);
> + if (io_pgvec_cachep == NULL) {
> + kmem_cache_destroy(io_page_cachep);
> + return -ENOMEM;
> + }
> io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
> if (io_end_cachep == NULL) {
> kmem_cache_destroy(io_page_cachep);
> + kmem_cache_destroy(io_pgvec_cachep);
> return -ENOMEM;
> }
> return 0;
> @@ -47,6 +57,7 @@ int __init ext4_init_pageio(void)
> void ext4_exit_pageio(void)
> {
> kmem_cache_destroy(io_end_cachep);
> + kmem_cache_destroy(io_pgvec_cachep);
> kmem_cache_destroy(io_page_cachep);
> }
>
> @@ -83,12 +94,15 @@ void ext4_free_io_end(ext4_io_end_t *io)
> BUG_ON(!io);
> BUG_ON(!list_empty(&io->list));
> BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
> + BUG_ON(io->num_io_pages && !io->pages);
>
> for (i = 0; i < io->num_io_pages; i++)
> put_io_page(io->pages[i]);
> io->num_io_pages = 0;
> if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
> wake_up_all(ext4_ioend_wq(io->inode));
> + if (io->pages)
> + kmem_cache_free(io_pgvec_cachep, io->pages);
> kmem_cache_free(io_end_cachep, io);
> }
>
> @@ -212,14 +226,25 @@ int ext4_flush_unwritten_io(struct inode *inode)
> return ret;
> }
>
> -ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
> +ext4_io_end_t *ext4_init_io_end(struct inode *inode, int directio, gfp_t flags)
> {
> ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
> - if (io) {
> - atomic_inc(&EXT4_I(inode)->i_ioend_count);
> - io->inode = inode;
> - INIT_LIST_HEAD(&io->list);
> +
> + if (!io)
> + return NULL;
> +
> + if (directio) {
> + io->flag = EXT4_IO_END_DIRECT;
> + } else {
> + io->pages = kmem_cache_zalloc(io_pgvec_cachep, flags);
> + if (!io->pages) {
> + kmem_cache_free(io_end_cachep, io);
> + return NULL;
> + }
> }
> + atomic_inc(&EXT4_I(inode)->i_ioend_count);
> + io->inode = inode;
> + INIT_LIST_HEAD(&io->list);
> return io;
> }
>
> @@ -327,7 +352,7 @@ static int io_submit_init(struct ext4_io_submit *io,
> int nvecs = bio_get_nr_vecs(bh->b_bdev);
> struct bio *bio;
>
> - io_end = ext4_init_io_end(inode, GFP_NOFS);
> + io_end = ext4_init_io_end(inode, 0, GFP_NOFS);
> if (!io_end)
> return -ENOMEM;
> bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
> --
> 1.7.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
--
Jan Kara <jack@suse.cz>
SUSE Labs, CR
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] ext4: optimize ext4_end_io memory usage
2013-04-05 9:35 ` Jan Kara
@ 2013-04-08 17:10 ` Theodore Ts'o
2013-04-08 21:38 ` Jan Kara
0 siblings, 1 reply; 4+ messages in thread
From: Theodore Ts'o @ 2013-04-08 17:10 UTC (permalink / raw)
To: Jan Kara; +Cc: Dmitry Monakhov, linux-ext4
On Fri, Apr 05, 2013 at 11:35:14AM +0200, Jan Kara wrote:
> On Mon 01-04-13 09:17:48, Dmitry Monakhov wrote:
> > ext4_end_io->pages array has is used only for buffered writes and usless
> > in case of DIO. This patch allow us to save 1K for each DIO request.
> I have actually a more complete solution to this in my patch queue -
> there isn't any need to reference pages from io_end after my patches.
> Hopefully I'll be able to send things out later today...
Hi Jan,
Did you have a chance to send out your patches?
Thanks,
- Ted
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] ext4: optimize ext4_end_io memory usage
2013-04-08 17:10 ` Theodore Ts'o
@ 2013-04-08 21:38 ` Jan Kara
0 siblings, 0 replies; 4+ messages in thread
From: Jan Kara @ 2013-04-08 21:38 UTC (permalink / raw)
To: Theodore Ts'o; +Cc: Jan Kara, Dmitry Monakhov, linux-ext4
On Mon 08-04-13 13:10:52, Ted Tso wrote:
> On Fri, Apr 05, 2013 at 11:35:14AM +0200, Jan Kara wrote:
> > On Mon 01-04-13 09:17:48, Dmitry Monakhov wrote:
> > > ext4_end_io->pages array has is used only for buffered writes and usless
> > > in case of DIO. This patch allow us to save 1K for each DIO request.
> > I have actually a more complete solution to this in my patch queue -
> > there isn't any need to reference pages from io_end after my patches.
> > Hopefully I'll be able to send things out later today...
>
> Hi Jan,
>
> Did you have a chance to send out your patches?
Not on Friday but the patch bomb has landed just now (finally I was able
to sort out all the failures in dioread_nolock mode). I'd just note that
my version of Dmitry's patch is patch 1/29.
Honza
--
Jan Kara <jack@suse.cz>
SUSE Labs, CR
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2013-04-08 21:38 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-04-01 5:17 [PATCH] ext4: optimize ext4_end_io memory usage Dmitry Monakhov
2013-04-05 9:35 ` Jan Kara
2013-04-08 17:10 ` Theodore Ts'o
2013-04-08 21:38 ` Jan Kara
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).