cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] GFS2: Cache last hash bucket for glock seq_files
@ 2012-06-08 10:52 Steven Whitehouse
       [not found] ` <1339230731.6001.159.camel@edumazet-glaptop>
  0 siblings, 1 reply; 6+ messages in thread
From: Steven Whitehouse @ 2012-06-08 10:52 UTC (permalink / raw)
  To: cluster-devel.redhat.com

From ba1ddcb6ca0c46edd275790d1e4e2cfd6219ce19 Mon Sep 17 00:00:00 2001
From: Steven Whitehouse <swhiteho@redhat.com>
Date: Fri, 8 Jun 2012 11:16:22 +0100
Subject: [PATCH] GFS2: Cache last hash bucket for glock seq_files

For the glocks and glstats seq_files, which are exposed via debugfs
we should cache the most recent hash bucket, along with the offset
into that bucket. This allows us to restart from that point, rather
than having to begin at the beginning each time.

This is an idea from Eric Dumazet, however I've slightly extended it
so that if the position from which we are due to start is at any
point beyond the last cached point, we start from the last cached
point, plus whatever is the appropriate offset. I don't really expect
people to be lseeking around these files, but if they did so with only
positive offsets, then we'd still get some of the benefit of using a
cached offset.

With my simple test of around 200k entries in the file, I'm seeing
an approx 10x speed up.

Cc: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 1c4cddf..3ad8cb3 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -46,10 +46,12 @@
 #include "trace_gfs2.h"
 
 struct gfs2_glock_iter {
-	int hash;			/* hash bucket index         */
-	struct gfs2_sbd *sdp;		/* incore superblock         */
-	struct gfs2_glock *gl;		/* current glock struct      */
-	char string[512];		/* scratch space             */
+	int hash;			/* hash bucket index           */
+	unsigned nhash;			/* Index within current bucket */
+	struct gfs2_sbd *sdp;		/* incore superblock           */
+	struct gfs2_glock *gl;		/* current glock struct        */
+	loff_t last_pos;		/* last position               */
+	char string[512];		/* scratch space               */
 };
 
 typedef void (*glock_examiner) (struct gfs2_glock * gl);
@@ -950,7 +952,7 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
 	if (seq) {
 		struct gfs2_glock_iter *gi = seq->private;
 		vsprintf(gi->string, fmt, args);
-		seq_printf(seq, gi->string);
+		seq_puts(seq, gi->string);
 	} else {
 		vaf.fmt = fmt;
 		vaf.va = &args;
@@ -1854,8 +1856,14 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
 		gl = gi->gl;
 		if (gl) {
 			gi->gl = glock_hash_next(gl);
+			gi->nhash++;
 		} else {
+			if (gi->hash >= GFS2_GL_HASH_SIZE) {
+				rcu_read_unlock();
+				return 1;
+			}
 			gi->gl = glock_hash_chain(gi->hash);
+			gi->nhash = 0;
 		}
 		while (gi->gl == NULL) {
 			gi->hash++;
@@ -1864,6 +1872,7 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
 				return 1;
 			}
 			gi->gl = glock_hash_chain(gi->hash);
+			gi->nhash = 0;
 		}
 	/* Skip entries for other sb and dead entries */
 	} while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
@@ -1876,7 +1885,12 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
 	struct gfs2_glock_iter *gi = seq->private;
 	loff_t n = *pos;
 
-	gi->hash = 0;
+	if (gi->last_pos <= *pos)
+		n = gi->nhash + (*pos - gi->last_pos);
+	else
+		gi->hash = 0;
+
+	gi->nhash = 0;
 	rcu_read_lock();
 
 	do {
@@ -1884,6 +1898,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
 			return NULL;
 	} while (n--);
 
+	gi->last_pos = *pos;
 	return gi->gl;
 }
 
@@ -1893,7 +1908,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
 	struct gfs2_glock_iter *gi = seq->private;
 
 	(*pos)++;
-
+	gi->last_pos = *pos;
 	if (gfs2_glock_iter_next(gi))
 		return NULL;
 
-- 
1.7.4





^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [Cluster-devel] GFS2: Cache last hash bucket for glock seq_files
       [not found] ` <1339230731.6001.159.camel@edumazet-glaptop>
@ 2012-06-11  8:29   ` Steven Whitehouse
       [not found]     ` <1339404517.6001.1767.camel@edumazet-glaptop>
  0 siblings, 1 reply; 6+ messages in thread
From: Steven Whitehouse @ 2012-06-11  8:29 UTC (permalink / raw)
  To: cluster-devel.redhat.com

Hi,

On Sat, 2012-06-09 at 10:32 +0200, Eric Dumazet wrote:
> On Fri, 2012-06-08 at 11:52 +0100, Steven Whitehouse wrote:
> > From ba1ddcb6ca0c46edd275790d1e4e2cfd6219ce19 Mon Sep 17 00:00:00 2001
> > From: Steven Whitehouse <swhiteho@redhat.com>
> > Date: Fri, 8 Jun 2012 11:16:22 +0100
> > Subject: [PATCH] GFS2: Cache last hash bucket for glock seq_files
> > 
> > For the glocks and glstats seq_files, which are exposed via debugfs
> > we should cache the most recent hash bucket, along with the offset
> > into that bucket. This allows us to restart from that point, rather
> > than having to begin at the beginning each time.
> > 
> > This is an idea from Eric Dumazet, however I've slightly extended it
> > so that if the position from which we are due to start is at any
> > point beyond the last cached point, we start from the last cached
> > point, plus whatever is the appropriate offset. I don't really expect
> > people to be lseeking around these files, but if they did so with only
> > positive offsets, then we'd still get some of the benefit of using a
> > cached offset.
> > 
> > With my simple test of around 200k entries in the file, I'm seeing
> > an approx 10x speed up.
> 
> Strange, a 10x speed up is not what I would expect...
> 
That is on top of the almost 8x from increasing the buffer size with a
patch that Al Viro had suggested as a response to our earlier
discussion:

http://git.kernel.org/?p=linux/kernel/git/steve/gfs2-3.0-nmw.git;a=commitdiff;h=df5d2f5560a9c33129391a136ed9f0ac26abe69b

Also, I suspect that I'd see a much larger effect if I used more glocks
(i.e. more entries in the file). It is not unusual to see a million or
more entries, but it does mean that tests take a long time, just to
create or cache all those entries. So I was testing with a smaller
number mainly to speed up the tests.

> > 
> > Cc: Eric Dumazet <eric.dumazet@gmail.com>
> > Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
> > 
> > diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
> > index 1c4cddf..3ad8cb3 100644
> > --- a/fs/gfs2/glock.c
> > +++ b/fs/gfs2/glock.c
> > @@ -46,10 +46,12 @@
> >  #include "trace_gfs2.h"
> >  
> >  struct gfs2_glock_iter {
> > -	int hash;			/* hash bucket index         */
> > -	struct gfs2_sbd *sdp;		/* incore superblock         */
> > -	struct gfs2_glock *gl;		/* current glock struct      */
> > -	char string[512];		/* scratch space             */
> > +	int hash;			/* hash bucket index           */
> > +	unsigned nhash;			/* Index within current bucket */
> > +	struct gfs2_sbd *sdp;		/* incore superblock           */
> > +	struct gfs2_glock *gl;		/* current glock struct        */
> > +	loff_t last_pos;		/* last position               */
> > +	char string[512];		/* scratch space               */
> >  };
> >  
> >  typedef void (*glock_examiner) (struct gfs2_glock * gl);
> > @@ -950,7 +952,7 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
> >  	if (seq) {
> >  		struct gfs2_glock_iter *gi = seq->private;
> >  		vsprintf(gi->string, fmt, args);
> > -		seq_printf(seq, gi->string);
> > +		seq_puts(seq, gi->string);
> 
> This looks like a bug fix on its own ?
> 
> Anyway, the vsprintf(gi->string, ...) sounds risky too, vsnprintf() is
> your friend.
> 
There are no strings (except fixed length ones) which will be printed.
So all the fields are of bounded length and the format is also fixed.

> I suggest you add seq_vprintf() interface to get rid of the string[512]
> scratch/kludge and remove one copy...
> 
That sounds like a good idea... this bit of code is quite old and
seq_vprintf was not available originally (neither was seq_puts or I
suspect I'd have used that at the time) so I'll do a follow up patch to
resolve that,

Steve.




^ permalink raw reply	[flat|nested] 6+ messages in thread

* [Cluster-devel] [PATCH] Add seq_vprintf and use in gfs2 (was Re: GFS2: Cache last hash bucket for glock seq_files)
       [not found]     ` <1339404517.6001.1767.camel@edumazet-glaptop>
@ 2012-06-11 10:21       ` Steven Whitehouse
  2012-06-11 12:56         ` Steven Whitehouse
                           ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Steven Whitehouse @ 2012-06-11 10:21 UTC (permalink / raw)
  To: cluster-devel.redhat.com

Hi,

On Mon, 2012-06-11 at 10:48 +0200, Eric Dumazet wrote:
> On Mon, 2012-06-11 at 09:29 +0100, Steven Whitehouse wrote:
> > > 
> > That is on top of the almost 8x from increasing the buffer size with a
> > patch that Al Viro had suggested as a response to our earlier
> > discussion:
> 
> Ah OK, I understand ;)
> 
> > 
> > http://git.kernel.org/?p=linux/kernel/git/steve/gfs2-3.0-nmw.git;a=commitdiff;h=df5d2f5560a9c33129391a136ed9f0ac26abe69b
> 
> 
> Hmm, this patch seems overkill if PAGE_SIZE=65536 ?
> 
> #define GFS2_SEQ_GOODSIZE   \
> 	min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536)
> 
> 
Thats true, but are there any arches with a 64k page size? In any case
I'll follow up with another patch for that rather than try to combine it
with this one...

Here is a patch to add seq_vprintf and make use of it. It does speed
things up a bit, but not hugely (from approx 1.3 to approx 1.2 seconds
for my test). I can split this up into two bit if required, but lets see
what Al thinks is the best way to apply this. I'm happy to keep it in
the GFS2 tree if there are no objections - that should reduce conflicts
and complications, I hope.

Copying in lkml again, since this has ventured back into generic code
once more.

This patch adds an seq_vprintf function and then uses this in the GFS2
code in order to remove the need for a temporary buffer in the
glock/glstats file iteration state.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 10ae164..4d5d63d 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -51,7 +51,6 @@ struct gfs2_glock_iter {
 	struct gfs2_sbd *sdp;		/* incore superblock           */
 	struct gfs2_glock *gl;		/* current glock struct        */
 	loff_t last_pos;		/* last position               */
-	char string[512];		/* scratch space               */
 };
 
 typedef void (*glock_examiner) (struct gfs2_glock * gl);
@@ -951,9 +950,7 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
 	va_start(args, fmt);
 
 	if (seq) {
-		struct gfs2_glock_iter *gi = seq->private;
-		vsprintf(gi->string, fmt, args);
-		seq_puts(seq, gi->string);
+		seq_vprintf(seq, fmt, args);
 	} else {
 		vaf.fmt = fmt;
 		vaf.va = &args;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 0cbd049..14cf9de 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -385,15 +385,12 @@ int seq_escape(struct seq_file *m, const char *s, const char *esc)
 }
 EXPORT_SYMBOL(seq_escape);
 
-int seq_printf(struct seq_file *m, const char *f, ...)
+int seq_vprintf(struct seq_file *m, const char *f, va_list args)
 {
-	va_list args;
 	int len;
 
 	if (m->count < m->size) {
-		va_start(args, f);
 		len = vsnprintf(m->buf + m->count, m->size - m->count, f, args);
-		va_end(args);
 		if (m->count + len < m->size) {
 			m->count += len;
 			return 0;
@@ -402,6 +399,19 @@ int seq_printf(struct seq_file *m, const char *f, ...)
 	seq_set_overflow(m);
 	return -1;
 }
+EXPORT_SYMBOL(seq_vprintf);
+
+int seq_printf(struct seq_file *m, const char *f, ...)
+{
+	int ret;
+	va_list args;
+
+	va_start(args, f);
+	ret = seq_vprintf(m, f, args);
+	va_end(args);
+
+	return ret;
+}
 EXPORT_SYMBOL(seq_printf);
 
 /**
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index fc61854..83c44ee 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -86,6 +86,7 @@ int seq_puts(struct seq_file *m, const char *s);
 int seq_write(struct seq_file *seq, const void *data, size_t len);
 
 __printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
+__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
 
 int seq_path(struct seq_file *, const struct path *, const char *);
 int seq_dentry(struct seq_file *, struct dentry *, const char *);




^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [Cluster-devel] [PATCH] Add seq_vprintf and use in gfs2 (was Re: GFS2: Cache last hash bucket for glock seq_files)
  2012-06-11 10:21       ` [Cluster-devel] [PATCH] Add seq_vprintf and use in gfs2 (was Re: GFS2: Cache last hash bucket for glock seq_files) Steven Whitehouse
@ 2012-06-11 12:56         ` Steven Whitehouse
  2012-06-11 12:56         ` Steven Whitehouse
       [not found]         ` <1339413164.6001.2046.camel@edumazet-glaptop>
  2 siblings, 0 replies; 6+ messages in thread
From: Steven Whitehouse @ 2012-06-11 12:56 UTC (permalink / raw)
  To: cluster-devel.redhat.com

Hi,

I've split that original patch into two, and I'll carry both bits in the
GFS2 -nmw tree for now. Here are the (hopefully) final versions of the
two patches. I'll follow up with a buffer sizing patch too a bit later,

Steve.

-------------------------------------------------------------------------------
From a4808147dcf1ecf2f76212a78fd9692b3c112f47 Mon Sep 17 00:00:00 2001
From: Steven Whitehouse <swhiteho@redhat.com>
Date: Mon, 11 Jun 2012 13:16:35 +0100
Subject: [PATCH 1/2] seq_file: Add seq_vprintf function and export it

The existing seq_printf function is rewritten in terms of the new
seq_vprintf which is also exported to modules. This allows GFS2
(and potentially other seq_file users) to have a vprintf based
interface and to avoid an extra copy into a temporary buffer in
some cases.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Al Viro <viro@ZenIV.linux.org.uk>
---
 fs/seq_file.c            |   18 ++++++++++++++----
 include/linux/seq_file.h |    1 +
 2 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/fs/seq_file.c b/fs/seq_file.c
index 0cbd049..14cf9de 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -385,15 +385,12 @@ int seq_escape(struct seq_file *m, const char *s, const char *esc)
 }
 EXPORT_SYMBOL(seq_escape);
 
-int seq_printf(struct seq_file *m, const char *f, ...)
+int seq_vprintf(struct seq_file *m, const char *f, va_list args)
 {
-	va_list args;
 	int len;
 
 	if (m->count < m->size) {
-		va_start(args, f);
 		len = vsnprintf(m->buf + m->count, m->size - m->count, f, args);
-		va_end(args);
 		if (m->count + len < m->size) {
 			m->count += len;
 			return 0;
@@ -402,6 +399,19 @@ int seq_printf(struct seq_file *m, const char *f, ...)
 	seq_set_overflow(m);
 	return -1;
 }
+EXPORT_SYMBOL(seq_vprintf);
+
+int seq_printf(struct seq_file *m, const char *f, ...)
+{
+	int ret;
+	va_list args;
+
+	va_start(args, f);
+	ret = seq_vprintf(m, f, args);
+	va_end(args);
+
+	return ret;
+}
 EXPORT_SYMBOL(seq_printf);
 
 /**
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index fc61854..83c44ee 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -86,6 +86,7 @@ int seq_puts(struct seq_file *m, const char *s);
 int seq_write(struct seq_file *seq, const void *data, size_t len);
 
 __printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
+__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
 
 int seq_path(struct seq_file *, const struct path *, const char *);
 int seq_dentry(struct seq_file *, struct dentry *, const char *);
-- 
1.7.4





^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [Cluster-devel] [PATCH] Add seq_vprintf and use in gfs2 (was Re: GFS2: Cache last hash bucket for glock seq_files)
  2012-06-11 10:21       ` [Cluster-devel] [PATCH] Add seq_vprintf and use in gfs2 (was Re: GFS2: Cache last hash bucket for glock seq_files) Steven Whitehouse
  2012-06-11 12:56         ` Steven Whitehouse
@ 2012-06-11 12:56         ` Steven Whitehouse
       [not found]         ` <1339413164.6001.2046.camel@edumazet-glaptop>
  2 siblings, 0 replies; 6+ messages in thread
From: Steven Whitehouse @ 2012-06-11 12:56 UTC (permalink / raw)
  To: cluster-devel.redhat.com

From 1bb49303b7a82eb9bce0595087523343683abdf0 Mon Sep 17 00:00:00 2001
From: Steven Whitehouse <swhiteho@redhat.com>
Date: Mon, 11 Jun 2012 13:26:50 +0100
Subject: [PATCH 2/2] GFS2: Use seq_vprintf for glocks debugfs file

Make use of the newly added seq_vprintf() function.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Al Viro <viro@ZenIV.linux.org.uk>
---
 fs/gfs2/glock.c |    5 +----
 1 files changed, 1 insertions(+), 4 deletions(-)

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 10ae164..4d5d63d 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -51,7 +51,6 @@ struct gfs2_glock_iter {
 	struct gfs2_sbd *sdp;		/* incore superblock           */
 	struct gfs2_glock *gl;		/* current glock struct        */
 	loff_t last_pos;		/* last position               */
-	char string[512];		/* scratch space               */
 };
 
 typedef void (*glock_examiner) (struct gfs2_glock * gl);
@@ -951,9 +950,7 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
 	va_start(args, fmt);
 
 	if (seq) {
-		struct gfs2_glock_iter *gi = seq->private;
-		vsprintf(gi->string, fmt, args);
-		seq_puts(seq, gi->string);
+		seq_vprintf(seq, fmt, args);
 	} else {
 		vaf.fmt = fmt;
 		vaf.va = &args;
-- 
1.7.4





^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [Cluster-devel] [PATCH] Add seq_vprintf and use in gfs2 (was Re: GFS2: Cache last hash bucket for glock seq_files)
       [not found]         ` <1339413164.6001.2046.camel@edumazet-glaptop>
@ 2012-06-11 13:32           ` Steven Whitehouse
  0 siblings, 0 replies; 6+ messages in thread
From: Steven Whitehouse @ 2012-06-11 13:32 UTC (permalink / raw)
  To: cluster-devel.redhat.com

Hi,

On Mon, 2012-06-11 at 13:12 +0200, Eric Dumazet wrote:
> On Mon, 2012-06-11 at 11:21 +0100, Steven Whitehouse wrote:
> 
> > Thats true, but are there any arches with a 64k page size? In any case
> > I'll follow up with another patch for that rather than try to combine it
> > with this one...
> 
> Some arches have page size from 16K to 1MB in size
> 
> sh, frv, hexagon, tile, ia64, mips, microblaze, sparc64, ppc64...
> 
> 
> 
I can't imagine running GFS2 on many of those, sparc64 and/or ppc64
maybe, but rather unlikely on the others. Nevertheless, here is a patch
to ensure that we don't land up allocating too much memory. No
performance impact on x86_64 since the buffer size hasn't changed in
that case.

Hopefully this should be the last bit for this set of patches. Many
thanks for taking the time to look at this - things should be much
improved from my initial idea and I think that our QE and support teams
will be happy with the result,

Steve.


From 0fe2f1e929ecabf834f4af2ffd300fe70700f4b3 Mon Sep 17 00:00:00 2001
From: Steven Whitehouse <swhiteho@redhat.com>
Date: Mon, 11 Jun 2012 13:49:47 +0100
Subject: [PATCH] GFS2: Size seq_file buffer more carefully

This places a limit on the buffer size for archs with larger
PAGE_SIZE.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Reported-by: Eric Dumazet <eric.dumazet@gmail.com>

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4d5d63d..1ed81f4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1977,6 +1977,8 @@ static const struct seq_operations gfs2_sbstats_seq_ops = {
 	.show  = gfs2_sbstats_seq_show,
 };
 
+#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
+
 static int gfs2_glocks_open(struct inode *inode, struct file *file)
 {
 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
@@ -1985,9 +1987,9 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file)
 		struct seq_file *seq = file->private_data;
 		struct gfs2_glock_iter *gi = seq->private;
 		gi->sdp = inode->i_private;
-		seq->buf = kmalloc(8*PAGE_SIZE, GFP_KERNEL | __GFP_NOWARN);
+		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
 		if (seq->buf)
-			seq->size = 8*PAGE_SIZE;
+			seq->size = GFS2_SEQ_GOODSIZE;
 	}
 	return ret;
 }
@@ -2000,9 +2002,9 @@ static int gfs2_glstats_open(struct inode *inode, struct file *file)
 		struct seq_file *seq = file->private_data;
 		struct gfs2_glock_iter *gi = seq->private;
 		gi->sdp = inode->i_private;
-		seq->buf = kmalloc(8*PAGE_SIZE, GFP_KERNEL | __GFP_NOWARN);
+		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
 		if (seq->buf)
-			seq->size = 8*PAGE_SIZE;
+			seq->size = GFS2_SEQ_GOODSIZE;
 	}
 	return ret;
 }
-- 
1.7.4






^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2012-06-11 13:32 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-06-08 10:52 [Cluster-devel] GFS2: Cache last hash bucket for glock seq_files Steven Whitehouse
     [not found] ` <1339230731.6001.159.camel@edumazet-glaptop>
2012-06-11  8:29   ` Steven Whitehouse
     [not found]     ` <1339404517.6001.1767.camel@edumazet-glaptop>
2012-06-11 10:21       ` [Cluster-devel] [PATCH] Add seq_vprintf and use in gfs2 (was Re: GFS2: Cache last hash bucket for glock seq_files) Steven Whitehouse
2012-06-11 12:56         ` Steven Whitehouse
2012-06-11 12:56         ` Steven Whitehouse
     [not found]         ` <1339413164.6001.2046.camel@edumazet-glaptop>
2012-06-11 13:32           ` Steven Whitehouse

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).