public inbox for linux-xfs@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] libxfs: stop caching inode structures
@ 2012-02-07 18:22 Christoph Hellwig
  2012-02-08  5:11 ` Dave Chinner
  0 siblings, 1 reply; 7+ messages in thread
From: Christoph Hellwig @ 2012-02-07 18:22 UTC (permalink / raw)
  To: xfs

Currently libxfs has a cache for xfs_inode structures.  Unlike in kernelspace
where the inode cache, and the associated page cache for file data is used
for all filesystem operations the libxfs inode cache is only used in few
places:

 - the libxfs init code reads the root and realtime inodes when called from
   xfs_db using a special flag, but these inode structure are never referenced
   again
 - mkfs uses namespace and bmap routines that take the xfs_inode structure
   to create the root and realtime inodes, as well as any additional files
   specified in the proto file
 - the xfs_db attr code uses xfs_inode-based attr routines in the attrset
   and attrget commands
 - phase6 of xfs_repair uses xfs_inode-based routines for rebuilding
   directories and moving files to the lost+found directory.
 - phase7 of xfs_repair uses struct xfs_inode to modify the nlink count
   of inodes.

So except in repair we never ever reuse a cached inode, and in repair we can
easily read the information from the more compact cached buffers (or even
better rewrite phase7 to operate on the raw on-disk inodes).  Given these
facts stop caching the inodes to reduce memory usage especially in xfs_repair.

With this we probably could increase the memory available to the buffer
cache in xfs_repair, but trying to do so I got a bit lost - the current
formula seems to magic to me to make any sense, and simply doubling the
buffer cache size causes us to run out of memory given that the data cached
in the buffer cache (typically lots of 8k inode buffers and few 4k other
metadata buffers) are much bigger than the inodes cached in the inode
cache.  We probably need a sizing scheme that takes the actual amount
of memory allocated to the buffer cache into account to solve this better.

Signed-off-by: Christoph Hellwig <hch@lst.de>

Index: xfsprogs-dev/include/libxfs.h
===================================================================
--- xfsprogs-dev.orig/include/libxfs.h	2012-02-06 15:22:51.000000000 +0000
+++ xfsprogs-dev/include/libxfs.h	2012-02-06 15:31:05.000000000 +0000
@@ -207,7 +207,6 @@ typedef struct xfs_mount {
 #define LIBXFS_MOUNT_COMPAT_ATTR	0x0010
 #define LIBXFS_MOUNT_ATTR2		0x0020
 
-#define LIBXFS_IHASHSIZE(sbp)		(1<<10)
 #define LIBXFS_BHASHSIZE(sbp) 		(1<<10)
 
 extern xfs_mount_t	*libxfs_mount (xfs_mount_t *, xfs_sb_t *,
@@ -335,7 +334,6 @@ extern int	libxfs_writebuf_int(xfs_buf_t
 extern int	libxfs_readbufr(dev_t, xfs_daddr_t, xfs_buf_t *, int, int);
 
 extern int libxfs_bhash_size;
-extern int libxfs_ihash_size;
 
 #define LIBXFS_BREAD	0x1
 #define LIBXFS_BWRITE	0x2
@@ -455,9 +453,6 @@ extern int	libxfs_iread (xfs_mount_t *,
 				xfs_inode_t *, xfs_daddr_t);
 
 /* Inode Cache Interfaces */
-extern struct cache	*libxfs_icache;
-extern struct cache_operations	libxfs_icache_operations;
-extern void	libxfs_icache_purge (void);
 extern int	libxfs_iget (xfs_mount_t *, xfs_trans_t *, xfs_ino_t,
 				uint, xfs_inode_t **, xfs_daddr_t);
 extern void	libxfs_iput (xfs_inode_t *, uint);
Index: xfsprogs-dev/libxfs/init.c
===================================================================
--- xfsprogs-dev.orig/libxfs/init.c	2012-02-06 15:22:51.000000000 +0000
+++ xfsprogs-dev/libxfs/init.c	2012-02-06 15:30:51.000000000 +0000
@@ -22,9 +22,6 @@
 
 char *progname = "libxfs";	/* default, changed by each tool */
 
-struct cache *libxfs_icache;	/* global inode cache */
-int libxfs_ihash_size;		/* #buckets in icache */
-
 struct cache *libxfs_bcache;	/* global buffer cache */
 int libxfs_bhash_size;		/* #buckets in bcache */
 
@@ -333,9 +330,6 @@ libxfs_init(libxfs_init_t *a)
 	}
 	if (needcd)
 		chdir(curdir);
-	if (!libxfs_ihash_size)
-		libxfs_ihash_size = LIBXFS_IHASHSIZE(sbp);
-	libxfs_icache = cache_init(libxfs_ihash_size, &libxfs_icache_operations);
 	if (!libxfs_bhash_size)
 		libxfs_bhash_size = LIBXFS_BHASHSIZE(sbp);
 	libxfs_bcache = cache_init(libxfs_bhash_size, &libxfs_bcache_operations);
@@ -817,7 +811,6 @@ libxfs_umount(xfs_mount_t *mp)
 	int			agno;
 
 	libxfs_rtmount_destroy(mp);
-	libxfs_icache_purge();
 	libxfs_bcache_purge();
 
 	for (agno = 0; agno < mp->m_maxagi; agno++) {
@@ -833,7 +826,6 @@ void
 libxfs_destroy(void)
 {
 	manage_zones(1);
-	cache_destroy(libxfs_icache);
 	cache_destroy(libxfs_bcache);
 }
 
@@ -849,7 +841,6 @@ libxfs_report(FILE *fp)
 	time_t t;
 	char *c;
 
-	cache_report(fp, "libxfs_icache", libxfs_icache);
 	cache_report(fp, "libxfs_bcache", libxfs_bcache);
 
 	t = time(NULL);
Index: xfsprogs-dev/libxfs/rdwr.c
===================================================================
--- xfsprogs-dev.orig/libxfs/rdwr.c	2012-02-06 15:22:07.000000000 +0000
+++ xfsprogs-dev/libxfs/rdwr.c	2012-02-06 15:29:03.000000000 +0000
@@ -710,58 +710,29 @@ struct cache_operations libxfs_bcache_op
 
 
 /*
- * Inode cache interfaces
+ * Inode cache stubs.
  */
 
 extern kmem_zone_t	*xfs_ili_zone;
 extern kmem_zone_t	*xfs_inode_zone;
 
-static unsigned int
-libxfs_ihash(cache_key_t key, unsigned int hashsize)
-{
-	return ((unsigned int)*(xfs_ino_t *)key) % hashsize;
-}
-
-static int
-libxfs_icompare(struct cache_node *node, cache_key_t key)
-{
-	xfs_inode_t	*ip = (xfs_inode_t *)node;
-
-	return (ip->i_ino == *(xfs_ino_t *)key);
-}
-
 int
 libxfs_iget(xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint lock_flags,
 		xfs_inode_t **ipp, xfs_daddr_t bno)
 {
-	xfs_inode_t	*ip;
 	int		error = 0;
 
-	if (cache_node_get(libxfs_icache, &ino, (struct cache_node **)&ip)) {
-#ifdef INO_DEBUG
-		fprintf(stderr, "%s: allocated inode, ino=%llu(%llu), %p\n",
-			__FUNCTION__, (unsigned long long)ino, bno, ip);
-#endif
-		if ((error = libxfs_iread(mp, tp, ino, ip, bno))) {
-			cache_node_purge(libxfs_icache, &ino,
-					(struct cache_node *)ip);
-			ip = NULL;
-		}
+	*ipp = kmem_zone_zalloc(xfs_inode_zone, 0);
+	if (!*ipp)
+		return ENOMEM;
+
+	error = libxfs_iread(mp, tp, ino, *ipp, bno);
+	if (error) {
+		kmem_zone_free(xfs_inode_zone, *ipp);
+		*ipp = NULL;
 	}
-	*ipp = ip;
-	return error;
-}
-
-void
-libxfs_iput(xfs_inode_t *ip, uint lock_flags)
-{
-	cache_node_put(libxfs_icache, (struct cache_node *)ip);
-}
 
-static struct cache_node *
-libxfs_ialloc(cache_key_t key)
-{
-	return kmem_zone_zalloc(xfs_inode_zone, 0);
+	return error;
 }
 
 static void
@@ -778,32 +749,12 @@ libxfs_idestroy(xfs_inode_t *ip)
 		libxfs_idestroy_fork(ip, XFS_ATTR_FORK);
 }
 
-static void
-libxfs_irelse(struct cache_node *node)
-{
-	xfs_inode_t	*ip = (xfs_inode_t *)node;
-
-	if (ip != NULL) {
-		if (ip->i_itemp)
-			kmem_zone_free(xfs_ili_zone, ip->i_itemp);
-		ip->i_itemp = NULL;
-		libxfs_idestroy(ip);
-		kmem_zone_free(xfs_inode_zone, ip);
-		ip = NULL;
-	}
-}
-
 void
-libxfs_icache_purge(void)
+libxfs_iput(xfs_inode_t *ip, uint lock_flags)
 {
-	cache_purge(libxfs_icache);
+	if (ip->i_itemp)
+		kmem_zone_free(xfs_ili_zone, ip->i_itemp);
+	ip->i_itemp = NULL;
+	libxfs_idestroy(ip);
+	kmem_zone_free(xfs_inode_zone, ip);
 }
-
-struct cache_operations libxfs_icache_operations = {
-	/* .hash */	libxfs_ihash,
-	/* .alloc */	libxfs_ialloc,
-	/* .flush */	NULL,
-	/* .relse */	libxfs_irelse,
-	/* .compare */	libxfs_icompare,
-	/* .bulkrelse */ NULL
-};
Index: xfsprogs-dev/mkfs/xfs_mkfs.c
===================================================================
--- xfsprogs-dev.orig/mkfs/xfs_mkfs.c	2012-02-06 15:22:51.000000000 +0000
+++ xfsprogs-dev/mkfs/xfs_mkfs.c	2012-02-06 15:22:59.000000000 +0000
@@ -2698,7 +2698,6 @@ an AG size that is one stripe unit small
 	 * Need to drop references to inodes we still hold, first.
 	 */
 	libxfs_rtmount_destroy(mp);
-	libxfs_icache_purge();
 	libxfs_bcache_purge();
 
 	/*
Index: xfsprogs-dev/repair/xfs_repair.c
===================================================================
--- xfsprogs-dev.orig/repair/xfs_repair.c	2012-02-06 15:22:51.000000000 +0000
+++ xfsprogs-dev/repair/xfs_repair.c	2012-02-06 15:38:25.000000000 +0000
@@ -69,7 +69,6 @@ static char *c_opts[] = {
 };
 
 
-static int	ihash_option_used;
 static int	bhash_option_used;
 static long	max_mem_specified;	/* in megabytes */
 static int	phase2_threads = 32;
@@ -239,13 +238,13 @@ process_args(int argc, char **argv)
 					pre_65_beta = 1;
 					break;
 				case IHASH_SIZE:
-					libxfs_ihash_size = (int)strtol(val, NULL, 0);
-					ihash_option_used = 1;
+					do_warn(
+		_("-o ihash option has been removed and will be ignored\n"));
 					break;
 				case BHASH_SIZE:
 					if (max_mem_specified)
 						do_abort(
-			_("-o bhash option cannot be used with -m option\n"));
+		_("-o bhash option cannot be used with -m option\n"));
 					libxfs_bhash_size = (int)strtol(val, NULL, 0);
 					bhash_option_used = 1;
 					break;
@@ -646,9 +645,7 @@ main(int argc, char **argv)
 		unsigned long	max_mem;
 		struct rlimit	rlim;
 
-		libxfs_icache_purge();
 		libxfs_bcache_purge();
-		cache_destroy(libxfs_icache);
 		cache_destroy(libxfs_bcache);
 
 		mem_used = (mp->m_sb.sb_icount >> (10 - 2)) +
@@ -707,11 +704,6 @@ main(int argc, char **argv)
 			do_log(_("        - block cache size set to %d entries\n"),
 				libxfs_bhash_size * HASH_CACHE_RATIO);
 
-		if (!ihash_option_used)
-			libxfs_ihash_size = libxfs_bhash_size;
-
-		libxfs_icache = cache_init(libxfs_ihash_size,
-						&libxfs_icache_operations);
 		libxfs_bcache = cache_init(libxfs_bhash_size,
 						&libxfs_bcache_operations);
 	}
Index: xfsprogs-dev/man/man8/xfs_repair.8
===================================================================
--- xfsprogs-dev.orig/man/man8/xfs_repair.8	2012-02-06 15:32:57.000000000 +0000
+++ xfsprogs-dev/man/man8/xfs_repair.8	2012-02-06 15:33:08.000000000 +0000
@@ -130,12 +130,6 @@ The
 supported are:
 .RS 1.0i
 .TP
-.BI ihash= ihashsize
-overrides the default inode cache hash size. The total number of
-inode cache entries are limited to 8 times this amount. The default
-.I ihashsize
-is 1024 (for a total of 8192 entries).
-.TP
 .BI bhash= bhashsize
 overrides the default buffer cache hash size. The total number of
 buffer cache entries are limited to 8 times this amount. The default

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] libxfs: stop caching inode structures
  2012-02-07 18:22 Christoph Hellwig
@ 2012-02-08  5:11 ` Dave Chinner
  2012-02-09 18:01   ` Christoph Hellwig
  0 siblings, 1 reply; 7+ messages in thread
From: Dave Chinner @ 2012-02-08  5:11 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Tue, Feb 07, 2012 at 01:22:28PM -0500, Christoph Hellwig wrote:
> Currently libxfs has a cache for xfs_inode structures.  Unlike in kernelspace
> where the inode cache, and the associated page cache for file data is used
> for all filesystem operations the libxfs inode cache is only used in few
> places:
> 
>  - the libxfs init code reads the root and realtime inodes when called from
>    xfs_db using a special flag, but these inode structure are never referenced
>    again
>  - mkfs uses namespace and bmap routines that take the xfs_inode structure
>    to create the root and realtime inodes, as well as any additional files
>    specified in the proto file
>  - the xfs_db attr code uses xfs_inode-based attr routines in the attrset
>    and attrget commands
>  - phase6 of xfs_repair uses xfs_inode-based routines for rebuilding
>    directories and moving files to the lost+found directory.
>  - phase7 of xfs_repair uses struct xfs_inode to modify the nlink count
>    of inodes.
> 
> So except in repair we never ever reuse a cached inode, and in repair we can
> easily read the information from the more compact cached buffers (or even
> better rewrite phase7 to operate on the raw on-disk inodes).  Given these
> facts stop caching the inodes to reduce memory usage especially in xfs_repair.

Ok, so what does it do to the speed of phase6 and phase7 of repair?
How much CPU overhead does this add to every inode lookup done in
these phases?

Indeed, there are cases where individual inode caching is much more
memory efficient than keeping the buffers around (think sparse inode
chunks on disk where only a few of the 64 inodes are actually
allocated). Tracking them in buffers (esp. if the inode size is
large) could use a lot more memory than just caching the active
inodes in a struct xfs_inode. Hence I'm not so sure this is clear
cut win for memory usage.

Do you have any numbers for memory usage or performance?

The code changes are simple enough, so if it is actually a win then
I see no problems with doing this. But that's what I need more
information about to be convinced on....

> With this we probably could increase the memory available to the buffer
> cache in xfs_repair, but trying to do so I got a bit lost - the current
> formula seems to magic to me to make any sense, and simply doubling the
> buffer cache size causes us to run out of memory given that the data cached

IIRC, that's because the current formula sets the buffer cache size
to 75% of physical RAM on the machine. Doubling it will definitely
cause problems ;)

Cheers,

Dave.
-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] libxfs: stop caching inode structures
  2012-02-08  5:11 ` Dave Chinner
@ 2012-02-09 18:01   ` Christoph Hellwig
  0 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2012-02-09 18:01 UTC (permalink / raw)
  To: Dave Chinner; +Cc: xfs

On Wed, Feb 08, 2012 at 04:11:26PM +1100, Dave Chinner wrote:
> Ok, so what does it do to the speed of phase6 and phase7 of repair?
> How much CPU overhead does this add to every inode lookup done in
> these phases?

I'm away from my test system, but on the tons of inodes filesystems it
actually slightly improved their speed, probably because the box
was swapping less, or we spent less time in inode cache doing cache
misses as we'd never actually have the inode we care about cached.

The reason why the individual inode cache here doesn't work is because
we only every touched inodes in phase7 if we are going to modify them
and write them out, so we absolutely need the backing buffer anyway.

I can't see how phase6 benefits from the logical inode cache either,
given it's structure:

 - in phase 6a we iterate over each inode in the incore inode tree,
   and if it's a directory check/rebuild it
 - phase6b then updates the "." and ".." entries for directories
   that need, which means we require the backing buffers.
 - phase6c moves disconnected inodes to lost_found, which again needs
   the backing buffer to actually do anything.

In short there is no code in repair that benefits from doing logical
inode caching.

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH] libxfs: stop caching inode structures
@ 2013-10-09 13:02 Christoph Hellwig
  2013-10-14 20:16 ` Dave Chinner
  2013-10-31 15:43 ` Christoph Hellwig
  0 siblings, 2 replies; 7+ messages in thread
From: Christoph Hellwig @ 2013-10-09 13:02 UTC (permalink / raw)
  To: xfs

Currently libxfs has a cache for xfs_inode structures.  Unlike in kernelspace
where the inode cache, and the associated page cache for file data is used
for all filesystem operations the libxfs inode cache is only used in few
places:

 - the libxfs init code reads the root and realtime inodes when called from
   xfs_db using a special flag, but these inode structure are never referenced
   again
 - mkfs uses namespace and bmap routines that take the xfs_inode structure
   to create the root and realtime inodes, as well as any additional files
   specified in the proto file
 - the xfs_db attr code uses xfs_inode-based attr routines in the attrset
   and attrget commands
 - phase6 of xfs_repair uses xfs_inode-based routines for rebuilding
   directories and moving files to the lost+found directory.
 - phase7 of xfs_repair uses struct xfs_inode to modify the nlink count
   of inodes.

So except in repair we never ever reuse a cached inode, and even in repair
the logical inode caching doesn't help:

 - in phase 6a we iterate over each inode in the incore inode tree,
   and if it's a directory check/rebuild it
 - phase6b then updates the "." and ".." entries for directories
   that need, which means we require the backing buffers.
 - phase6c moves disconnected inodes to lost_found, which again needs
   the backing buffer to actually do anything.
 - phase7 then only touches inodes for which we need to reset i_nlink,
   which always involves reading, modifying and writing the physical
   inode.
   which always involves modifying the . and .. entries.

Given these facts stop caching the inodes to reduce memory usage
especially in xfs_repair, where this makes a different for large inode
count inodes.  On the upper end this allows repair to complete for
filesystem / amount of memory combinations that previously wouldn't.

With this we probably could increase the memory available to the buffer
cache in xfs_repair, but trying to do so I got a bit lost - the current
formula seems to magic to me to make any sense, and simply doubling the
buffer cache size causes us to run out of memory given that the data cached
in the buffer cache (typically lots of 8k inode buffers and few 4k other
metadata buffers) are much bigger than the inodes cached in the inode
cache.  We probably need a sizing scheme that takes the actual amount
of memory allocated to the buffer cache into account to solve this better.

Signed-off-by: Christoph Hellwig <hch@lst.de>

---
 include/libxfs.h      |    5 --
 libxfs/init.c         |    9 -----
 libxfs/rdwr.c         |   87 +++++++++++---------------------------------------
 man/man8/xfs_repair.8 |    6 ---
 mkfs/xfs_mkfs.c       |    1 
 repair/xfs_repair.c   |   14 +-------
 6 files changed, 23 insertions(+), 99 deletions(-)

Index: xfsprogs/include/libxfs.h
===================================================================
--- xfsprogs.orig/include/libxfs.h	2013-10-09 12:36:31.000000000 +0000
+++ xfsprogs/include/libxfs.h	2013-10-09 12:40:20.000000000 +0000
@@ -257,7 +257,6 @@
 #define LIBXFS_MOUNT_COMPAT_ATTR	0x0010
 #define LIBXFS_MOUNT_ATTR2		0x0020
 
-#define LIBXFS_IHASHSIZE(sbp)		(1<<10)
 #define LIBXFS_BHASHSIZE(sbp) 		(1<<10)
 
 extern xfs_mount_t	*libxfs_mount (xfs_mount_t *, xfs_sb_t *,
@@ -440,7 +439,6 @@
 extern int	libxfs_readbufr(struct xfs_buftarg *, xfs_daddr_t, xfs_buf_t *, int, int);
 
 extern int libxfs_bhash_size;
-extern int libxfs_ihash_size;
 
 #define LIBXFS_BREAD	0x1
 #define LIBXFS_BWRITE	0x2
@@ -640,9 +638,6 @@
 extern int	libxfs_iflush_int (xfs_inode_t *, xfs_buf_t *);
 
 /* Inode Cache Interfaces */
-extern struct cache	*libxfs_icache;
-extern struct cache_operations	libxfs_icache_operations;
-extern void	libxfs_icache_purge (void);
 extern int	libxfs_iget (xfs_mount_t *, xfs_trans_t *, xfs_ino_t,
 				uint, xfs_inode_t **, xfs_daddr_t);
 extern void	libxfs_iput (xfs_inode_t *, uint);
Index: xfsprogs/libxfs/init.c
===================================================================
--- xfsprogs.orig/libxfs/init.c	2013-10-09 12:36:31.000000000 +0000
+++ xfsprogs/libxfs/init.c	2013-10-09 12:40:20.000000000 +0000
@@ -22,9 +22,6 @@
 
 char *progname = "libxfs";	/* default, changed by each tool */
 
-struct cache *libxfs_icache;	/* global inode cache */
-int libxfs_ihash_size;		/* #buckets in icache */
-
 struct cache *libxfs_bcache;	/* global buffer cache */
 int libxfs_bhash_size;		/* #buckets in bcache */
 
@@ -335,9 +332,6 @@
 	}
 	if (needcd)
 		chdir(curdir);
-	if (!libxfs_ihash_size)
-		libxfs_ihash_size = LIBXFS_IHASHSIZE(sbp);
-	libxfs_icache = cache_init(libxfs_ihash_size, &libxfs_icache_operations);
 	if (!libxfs_bhash_size)
 		libxfs_bhash_size = LIBXFS_BHASHSIZE(sbp);
 	libxfs_bcache = cache_init(libxfs_bhash_size, &libxfs_bcache_operations);
@@ -866,7 +860,6 @@
 	int			agno;
 
 	libxfs_rtmount_destroy(mp);
-	libxfs_icache_purge();
 	libxfs_bcache_purge();
 
 	for (agno = 0; agno < mp->m_maxagi; agno++) {
@@ -882,7 +875,6 @@
 libxfs_destroy(void)
 {
 	manage_zones(1);
-	cache_destroy(libxfs_icache);
 	cache_destroy(libxfs_bcache);
 }
 
@@ -898,7 +890,6 @@
 	time_t t;
 	char *c;
 
-	cache_report(fp, "libxfs_icache", libxfs_icache);
 	cache_report(fp, "libxfs_bcache", libxfs_bcache);
 
 	t = time(NULL);
Index: xfsprogs/libxfs/rdwr.c
===================================================================
--- xfsprogs.orig/libxfs/rdwr.c	2013-10-09 12:36:31.000000000 +0000
+++ xfsprogs/libxfs/rdwr.c	2013-10-09 12:46:09.000000000 +0000
@@ -993,26 +993,12 @@
 
 
 /*
- * Inode cache interfaces
+ * Inode cache stubs.
  */
 
 extern kmem_zone_t	*xfs_ili_zone;
 extern kmem_zone_t	*xfs_inode_zone;
 
-static unsigned int
-libxfs_ihash(cache_key_t key, unsigned int hashsize)
-{
-	return ((unsigned int)*(xfs_ino_t *)key) % hashsize;
-}
-
-static int
-libxfs_icompare(struct cache_node *node, cache_key_t key)
-{
-	xfs_inode_t	*ip = (xfs_inode_t *)node;
-
-	return (ip->i_ino == *(xfs_ino_t *)key);
-}
-
 int
 libxfs_iget(xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint lock_flags,
 		xfs_inode_t **ipp, xfs_daddr_t bno)
@@ -1020,34 +1006,21 @@
 	xfs_inode_t	*ip;
 	int		error = 0;
 
-	if (cache_node_get(libxfs_icache, &ino, (struct cache_node **)&ip)) {
-#ifdef INO_DEBUG
-		fprintf(stderr, "%s: allocated inode, ino=%llu(%llu), %p\n",
-			__FUNCTION__, (unsigned long long)ino, bno, ip);
-#endif
-		ip->i_ino = ino;
-		ip->i_mount = mp;
-		error = xfs_iread(mp, tp, ip, bno);
-		if (error) {
-			cache_node_purge(libxfs_icache, &ino,
-					(struct cache_node *)ip);
-			ip = NULL;
-		}
+	ip = kmem_zone_zalloc(xfs_inode_zone, 0);
+	if (!ip)
+		return ENOMEM;
+
+	ip->i_ino = ino;
+	ip->i_mount = mp;
+	error = xfs_iread(mp, tp, ip, bno);
+	if (error) {
+		kmem_zone_free(xfs_inode_zone, ip);
+		*ipp = NULL;
+		return error;
 	}
-	*ipp = ip;
-	return error;
-}
-
-void
-libxfs_iput(xfs_inode_t *ip, uint lock_flags)
-{
-	cache_node_put(libxfs_icache, (struct cache_node *)ip);
-}
 
-static struct cache_node *
-libxfs_ialloc(cache_key_t key)
-{
-	return kmem_zone_zalloc(xfs_inode_zone, 0);
+	*ipp = ip;
+	return 0;
 }
 
 static void
@@ -1064,32 +1037,12 @@
 		libxfs_idestroy_fork(ip, XFS_ATTR_FORK);
 }
 
-static void
-libxfs_irelse(struct cache_node *node)
-{
-	xfs_inode_t	*ip = (xfs_inode_t *)node;
-
-	if (ip != NULL) {
-		if (ip->i_itemp)
-			kmem_zone_free(xfs_ili_zone, ip->i_itemp);
-		ip->i_itemp = NULL;
-		libxfs_idestroy(ip);
-		kmem_zone_free(xfs_inode_zone, ip);
-		ip = NULL;
-	}
-}
-
 void
-libxfs_icache_purge(void)
+libxfs_iput(xfs_inode_t *ip, uint lock_flags)
 {
-	cache_purge(libxfs_icache);
+	if (ip->i_itemp)
+		kmem_zone_free(xfs_ili_zone, ip->i_itemp);
+	ip->i_itemp = NULL;
+	libxfs_idestroy(ip);
+	kmem_zone_free(xfs_inode_zone, ip);
 }
-
-struct cache_operations libxfs_icache_operations = {
-	/* .hash */	libxfs_ihash,
-	/* .alloc */	libxfs_ialloc,
-	/* .flush */	NULL,
-	/* .relse */	libxfs_irelse,
-	/* .compare */	libxfs_icompare,
-	/* .bulkrelse */ NULL
-};
Index: xfsprogs/mkfs/xfs_mkfs.c
===================================================================
--- xfsprogs.orig/mkfs/xfs_mkfs.c	2013-10-09 12:36:31.000000000 +0000
+++ xfsprogs/mkfs/xfs_mkfs.c	2013-10-09 12:40:20.000000000 +0000
@@ -2909,7 +2909,6 @@
 	 * Need to drop references to inodes we still hold, first.
 	 */
 	libxfs_rtmount_destroy(mp);
-	libxfs_icache_purge();
 	libxfs_bcache_purge();
 
 	/*
Index: xfsprogs/repair/xfs_repair.c
===================================================================
--- xfsprogs.orig/repair/xfs_repair.c	2013-09-09 14:34:49.000000000 +0000
+++ xfsprogs/repair/xfs_repair.c	2013-10-09 12:40:20.000000000 +0000
@@ -69,7 +69,6 @@
 };
 
 
-static int	ihash_option_used;
 static int	bhash_option_used;
 static long	max_mem_specified;	/* in megabytes */
 static int	phase2_threads = 32;
@@ -239,13 +238,13 @@
 					pre_65_beta = 1;
 					break;
 				case IHASH_SIZE:
-					libxfs_ihash_size = (int)strtol(val, NULL, 0);
-					ihash_option_used = 1;
+					do_warn(
+		_("-o ihash option has been removed and will be ignored\n"));
 					break;
 				case BHASH_SIZE:
 					if (max_mem_specified)
 						do_abort(
-			_("-o bhash option cannot be used with -m option\n"));
+		_("-o bhash option cannot be used with -m option\n"));
 					libxfs_bhash_size = (int)strtol(val, NULL, 0);
 					bhash_option_used = 1;
 					break;
@@ -648,9 +647,7 @@
 		unsigned long	max_mem;
 		struct rlimit	rlim;
 
-		libxfs_icache_purge();
 		libxfs_bcache_purge();
-		cache_destroy(libxfs_icache);
 		cache_destroy(libxfs_bcache);
 
 		mem_used = (mp->m_sb.sb_icount >> (10 - 2)) +
@@ -709,11 +706,6 @@
 			do_log(_("        - block cache size set to %d entries\n"),
 				libxfs_bhash_size * HASH_CACHE_RATIO);
 
-		if (!ihash_option_used)
-			libxfs_ihash_size = libxfs_bhash_size;
-
-		libxfs_icache = cache_init(libxfs_ihash_size,
-						&libxfs_icache_operations);
 		libxfs_bcache = cache_init(libxfs_bhash_size,
 						&libxfs_bcache_operations);
 	}
Index: xfsprogs/man/man8/xfs_repair.8
===================================================================
--- xfsprogs.orig/man/man8/xfs_repair.8	2013-09-09 14:34:49.000000000 +0000
+++ xfsprogs/man/man8/xfs_repair.8	2013-10-09 12:40:20.000000000 +0000
@@ -130,12 +130,6 @@
 supported are:
 .RS 1.0i
 .TP
-.BI ihash= ihashsize
-overrides the default inode cache hash size. The total number of
-inode cache entries are limited to 8 times this amount. The default
-.I ihashsize
-is 1024 (for a total of 8192 entries).
-.TP
 .BI bhash= bhashsize
 overrides the default buffer cache hash size. The total number of
 buffer cache entries are limited to 8 times this amount. The default

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] libxfs: stop caching inode structures
  2013-10-09 13:02 [PATCH] libxfs: stop caching inode structures Christoph Hellwig
@ 2013-10-14 20:16 ` Dave Chinner
  2013-10-15 16:06   ` Christoph Hellwig
  2013-10-31 15:43 ` Christoph Hellwig
  1 sibling, 1 reply; 7+ messages in thread
From: Dave Chinner @ 2013-10-14 20:16 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: xfs

On Wed, Oct 09, 2013 at 06:02:41AM -0700, Christoph Hellwig wrote:
> Currently libxfs has a cache for xfs_inode structures.  Unlike in kernelspace
> where the inode cache, and the associated page cache for file data is used
> for all filesystem operations the libxfs inode cache is only used in few
> places:
> 
>  - the libxfs init code reads the root and realtime inodes when called from
>    xfs_db using a special flag, but these inode structure are never referenced
>    again
>  - mkfs uses namespace and bmap routines that take the xfs_inode structure
>    to create the root and realtime inodes, as well as any additional files
>    specified in the proto file
>  - the xfs_db attr code uses xfs_inode-based attr routines in the attrset
>    and attrget commands
>  - phase6 of xfs_repair uses xfs_inode-based routines for rebuilding
>    directories and moving files to the lost+found directory.
>  - phase7 of xfs_repair uses struct xfs_inode to modify the nlink count
>    of inodes.
> 
> So except in repair we never ever reuse a cached inode, and even in repair
> the logical inode caching doesn't help:
> 
>  - in phase 6a we iterate over each inode in the incore inode tree,
>    and if it's a directory check/rebuild it
>  - phase6b then updates the "." and ".." entries for directories
>    that need, which means we require the backing buffers.
>  - phase6c moves disconnected inodes to lost_found, which again needs
>    the backing buffer to actually do anything.
>  - phase7 then only touches inodes for which we need to reset i_nlink,
>    which always involves reading, modifying and writing the physical
>    inode.
>    which always involves modifying the . and .. entries.
> 
> Given these facts stop caching the inodes to reduce memory usage
> especially in xfs_repair, where this makes a different for large inode
> count inodes.  On the upper end this allows repair to complete for
> filesystem / amount of memory combinations that previously wouldn't.

This all sounds good and the code looks fine, but there's one
lingering question I have - what's the impact on performance for
repair? Does it slow down phase 6/7 at all?

> With this we probably could increase the memory available to the buffer
> cache in xfs_repair, but trying to do so I got a bit lost - the current
> formula seems to magic to me to make any sense, and simply doubling the
> buffer cache size causes us to run out of memory given that the data cached
> in the buffer cache (typically lots of 8k inode buffers and few 4k other
> metadata buffers) are much bigger than the inodes cached in the inode
> cache.  We probably need a sizing scheme that takes the actual amount
> of memory allocated to the buffer cache into account to solve this better.

IIRC, the size of the buffer cache is currently set at 75% of RAM
so doubling it would cause OOM issues regardless of the presence of
the inode cache....

Cheers,

Dave.

-- 
Dave Chinner
david@fromorbit.com

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] libxfs: stop caching inode structures
  2013-10-14 20:16 ` Dave Chinner
@ 2013-10-15 16:06   ` Christoph Hellwig
  0 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2013-10-15 16:06 UTC (permalink / raw)
  To: Dave Chinner; +Cc: Christoph Hellwig, xfs

On Tue, Oct 15, 2013 at 07:16:59AM +1100, Dave Chinner wrote:
> This all sounds good and the code looks fine, but there's one
> lingering question I have - what's the impact on performance for
> repair? Does it slow down phase 6/7 at all?

I have to admit that I'm just pulling memory from my hat as this is a
repost of an almost 1 year old patch, and I don't have equipment for
large scale performance testing at the moment.

But the biggest speedups I had seen was in filesystems where we had to
delete lots of inodes and thus manipulate the link count of hundreds of
thousands of directories in phase7 - with the current code we thrash the
inode cache badly there and got into deep swapping, and with this patch
we removed that thrashing (and often got the inodes from the buffer
cache still) and got rid of the swapping, causing speedups up to about
10%.  Upto because the numbers for the previous case weren't too
reliable.

Not sure if my wording in the description wasn't clear enough but I
really can't come up with a case where the inode cache would help
in repair - all the users of it will eventually modify the inode and
thus hit the buffers anyway, so any unlikely help during lookup would
still be mood once we write back.

Actually I'll have to correct myself after going through the scenarious
another time - the no_modify case might get hit a little, but I don't
think it's worth optimizing for that.

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] libxfs: stop caching inode structures
  2013-10-09 13:02 [PATCH] libxfs: stop caching inode structures Christoph Hellwig
  2013-10-14 20:16 ` Dave Chinner
@ 2013-10-31 15:43 ` Christoph Hellwig
  1 sibling, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2013-10-31 15:43 UTC (permalink / raw)
  To: xfs

ping?

On Wed, Oct 09, 2013 at 06:02:41AM -0700, Christoph Hellwig wrote:
> Currently libxfs has a cache for xfs_inode structures.  Unlike in kernelspace
> where the inode cache, and the associated page cache for file data is used
> for all filesystem operations the libxfs inode cache is only used in few
> places:
> 
>  - the libxfs init code reads the root and realtime inodes when called from
>    xfs_db using a special flag, but these inode structure are never referenced
>    again
>  - mkfs uses namespace and bmap routines that take the xfs_inode structure
>    to create the root and realtime inodes, as well as any additional files
>    specified in the proto file
>  - the xfs_db attr code uses xfs_inode-based attr routines in the attrset
>    and attrget commands
>  - phase6 of xfs_repair uses xfs_inode-based routines for rebuilding
>    directories and moving files to the lost+found directory.
>  - phase7 of xfs_repair uses struct xfs_inode to modify the nlink count
>    of inodes.
> 
> So except in repair we never ever reuse a cached inode, and even in repair
> the logical inode caching doesn't help:
> 
>  - in phase 6a we iterate over each inode in the incore inode tree,
>    and if it's a directory check/rebuild it
>  - phase6b then updates the "." and ".." entries for directories
>    that need, which means we require the backing buffers.
>  - phase6c moves disconnected inodes to lost_found, which again needs
>    the backing buffer to actually do anything.
>  - phase7 then only touches inodes for which we need to reset i_nlink,
>    which always involves reading, modifying and writing the physical
>    inode.
>    which always involves modifying the . and .. entries.
> 
> Given these facts stop caching the inodes to reduce memory usage
> especially in xfs_repair, where this makes a different for large inode
> count inodes.  On the upper end this allows repair to complete for
> filesystem / amount of memory combinations that previously wouldn't.
> 
> With this we probably could increase the memory available to the buffer
> cache in xfs_repair, but trying to do so I got a bit lost - the current
> formula seems to magic to me to make any sense, and simply doubling the
> buffer cache size causes us to run out of memory given that the data cached
> in the buffer cache (typically lots of 8k inode buffers and few 4k other
> metadata buffers) are much bigger than the inodes cached in the inode
> cache.  We probably need a sizing scheme that takes the actual amount
> of memory allocated to the buffer cache into account to solve this better.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> 
> ---
>  include/libxfs.h      |    5 --
>  libxfs/init.c         |    9 -----
>  libxfs/rdwr.c         |   87 +++++++++++---------------------------------------
>  man/man8/xfs_repair.8 |    6 ---
>  mkfs/xfs_mkfs.c       |    1 
>  repair/xfs_repair.c   |   14 +-------
>  6 files changed, 23 insertions(+), 99 deletions(-)
> 
> Index: xfsprogs/include/libxfs.h
> ===================================================================
> --- xfsprogs.orig/include/libxfs.h	2013-10-09 12:36:31.000000000 +0000
> +++ xfsprogs/include/libxfs.h	2013-10-09 12:40:20.000000000 +0000
> @@ -257,7 +257,6 @@
>  #define LIBXFS_MOUNT_COMPAT_ATTR	0x0010
>  #define LIBXFS_MOUNT_ATTR2		0x0020
>  
> -#define LIBXFS_IHASHSIZE(sbp)		(1<<10)
>  #define LIBXFS_BHASHSIZE(sbp) 		(1<<10)
>  
>  extern xfs_mount_t	*libxfs_mount (xfs_mount_t *, xfs_sb_t *,
> @@ -440,7 +439,6 @@
>  extern int	libxfs_readbufr(struct xfs_buftarg *, xfs_daddr_t, xfs_buf_t *, int, int);
>  
>  extern int libxfs_bhash_size;
> -extern int libxfs_ihash_size;
>  
>  #define LIBXFS_BREAD	0x1
>  #define LIBXFS_BWRITE	0x2
> @@ -640,9 +638,6 @@
>  extern int	libxfs_iflush_int (xfs_inode_t *, xfs_buf_t *);
>  
>  /* Inode Cache Interfaces */
> -extern struct cache	*libxfs_icache;
> -extern struct cache_operations	libxfs_icache_operations;
> -extern void	libxfs_icache_purge (void);
>  extern int	libxfs_iget (xfs_mount_t *, xfs_trans_t *, xfs_ino_t,
>  				uint, xfs_inode_t **, xfs_daddr_t);
>  extern void	libxfs_iput (xfs_inode_t *, uint);
> Index: xfsprogs/libxfs/init.c
> ===================================================================
> --- xfsprogs.orig/libxfs/init.c	2013-10-09 12:36:31.000000000 +0000
> +++ xfsprogs/libxfs/init.c	2013-10-09 12:40:20.000000000 +0000
> @@ -22,9 +22,6 @@
>  
>  char *progname = "libxfs";	/* default, changed by each tool */
>  
> -struct cache *libxfs_icache;	/* global inode cache */
> -int libxfs_ihash_size;		/* #buckets in icache */
> -
>  struct cache *libxfs_bcache;	/* global buffer cache */
>  int libxfs_bhash_size;		/* #buckets in bcache */
>  
> @@ -335,9 +332,6 @@
>  	}
>  	if (needcd)
>  		chdir(curdir);
> -	if (!libxfs_ihash_size)
> -		libxfs_ihash_size = LIBXFS_IHASHSIZE(sbp);
> -	libxfs_icache = cache_init(libxfs_ihash_size, &libxfs_icache_operations);
>  	if (!libxfs_bhash_size)
>  		libxfs_bhash_size = LIBXFS_BHASHSIZE(sbp);
>  	libxfs_bcache = cache_init(libxfs_bhash_size, &libxfs_bcache_operations);
> @@ -866,7 +860,6 @@
>  	int			agno;
>  
>  	libxfs_rtmount_destroy(mp);
> -	libxfs_icache_purge();
>  	libxfs_bcache_purge();
>  
>  	for (agno = 0; agno < mp->m_maxagi; agno++) {
> @@ -882,7 +875,6 @@
>  libxfs_destroy(void)
>  {
>  	manage_zones(1);
> -	cache_destroy(libxfs_icache);
>  	cache_destroy(libxfs_bcache);
>  }
>  
> @@ -898,7 +890,6 @@
>  	time_t t;
>  	char *c;
>  
> -	cache_report(fp, "libxfs_icache", libxfs_icache);
>  	cache_report(fp, "libxfs_bcache", libxfs_bcache);
>  
>  	t = time(NULL);
> Index: xfsprogs/libxfs/rdwr.c
> ===================================================================
> --- xfsprogs.orig/libxfs/rdwr.c	2013-10-09 12:36:31.000000000 +0000
> +++ xfsprogs/libxfs/rdwr.c	2013-10-09 12:46:09.000000000 +0000
> @@ -993,26 +993,12 @@
>  
>  
>  /*
> - * Inode cache interfaces
> + * Inode cache stubs.
>   */
>  
>  extern kmem_zone_t	*xfs_ili_zone;
>  extern kmem_zone_t	*xfs_inode_zone;
>  
> -static unsigned int
> -libxfs_ihash(cache_key_t key, unsigned int hashsize)
> -{
> -	return ((unsigned int)*(xfs_ino_t *)key) % hashsize;
> -}
> -
> -static int
> -libxfs_icompare(struct cache_node *node, cache_key_t key)
> -{
> -	xfs_inode_t	*ip = (xfs_inode_t *)node;
> -
> -	return (ip->i_ino == *(xfs_ino_t *)key);
> -}
> -
>  int
>  libxfs_iget(xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint lock_flags,
>  		xfs_inode_t **ipp, xfs_daddr_t bno)
> @@ -1020,34 +1006,21 @@
>  	xfs_inode_t	*ip;
>  	int		error = 0;
>  
> -	if (cache_node_get(libxfs_icache, &ino, (struct cache_node **)&ip)) {
> -#ifdef INO_DEBUG
> -		fprintf(stderr, "%s: allocated inode, ino=%llu(%llu), %p\n",
> -			__FUNCTION__, (unsigned long long)ino, bno, ip);
> -#endif
> -		ip->i_ino = ino;
> -		ip->i_mount = mp;
> -		error = xfs_iread(mp, tp, ip, bno);
> -		if (error) {
> -			cache_node_purge(libxfs_icache, &ino,
> -					(struct cache_node *)ip);
> -			ip = NULL;
> -		}
> +	ip = kmem_zone_zalloc(xfs_inode_zone, 0);
> +	if (!ip)
> +		return ENOMEM;
> +
> +	ip->i_ino = ino;
> +	ip->i_mount = mp;
> +	error = xfs_iread(mp, tp, ip, bno);
> +	if (error) {
> +		kmem_zone_free(xfs_inode_zone, ip);
> +		*ipp = NULL;
> +		return error;
>  	}
> -	*ipp = ip;
> -	return error;
> -}
> -
> -void
> -libxfs_iput(xfs_inode_t *ip, uint lock_flags)
> -{
> -	cache_node_put(libxfs_icache, (struct cache_node *)ip);
> -}
>  
> -static struct cache_node *
> -libxfs_ialloc(cache_key_t key)
> -{
> -	return kmem_zone_zalloc(xfs_inode_zone, 0);
> +	*ipp = ip;
> +	return 0;
>  }
>  
>  static void
> @@ -1064,32 +1037,12 @@
>  		libxfs_idestroy_fork(ip, XFS_ATTR_FORK);
>  }
>  
> -static void
> -libxfs_irelse(struct cache_node *node)
> -{
> -	xfs_inode_t	*ip = (xfs_inode_t *)node;
> -
> -	if (ip != NULL) {
> -		if (ip->i_itemp)
> -			kmem_zone_free(xfs_ili_zone, ip->i_itemp);
> -		ip->i_itemp = NULL;
> -		libxfs_idestroy(ip);
> -		kmem_zone_free(xfs_inode_zone, ip);
> -		ip = NULL;
> -	}
> -}
> -
>  void
> -libxfs_icache_purge(void)
> +libxfs_iput(xfs_inode_t *ip, uint lock_flags)
>  {
> -	cache_purge(libxfs_icache);
> +	if (ip->i_itemp)
> +		kmem_zone_free(xfs_ili_zone, ip->i_itemp);
> +	ip->i_itemp = NULL;
> +	libxfs_idestroy(ip);
> +	kmem_zone_free(xfs_inode_zone, ip);
>  }
> -
> -struct cache_operations libxfs_icache_operations = {
> -	/* .hash */	libxfs_ihash,
> -	/* .alloc */	libxfs_ialloc,
> -	/* .flush */	NULL,
> -	/* .relse */	libxfs_irelse,
> -	/* .compare */	libxfs_icompare,
> -	/* .bulkrelse */ NULL
> -};
> Index: xfsprogs/mkfs/xfs_mkfs.c
> ===================================================================
> --- xfsprogs.orig/mkfs/xfs_mkfs.c	2013-10-09 12:36:31.000000000 +0000
> +++ xfsprogs/mkfs/xfs_mkfs.c	2013-10-09 12:40:20.000000000 +0000
> @@ -2909,7 +2909,6 @@
>  	 * Need to drop references to inodes we still hold, first.
>  	 */
>  	libxfs_rtmount_destroy(mp);
> -	libxfs_icache_purge();
>  	libxfs_bcache_purge();
>  
>  	/*
> Index: xfsprogs/repair/xfs_repair.c
> ===================================================================
> --- xfsprogs.orig/repair/xfs_repair.c	2013-09-09 14:34:49.000000000 +0000
> +++ xfsprogs/repair/xfs_repair.c	2013-10-09 12:40:20.000000000 +0000
> @@ -69,7 +69,6 @@
>  };
>  
>  
> -static int	ihash_option_used;
>  static int	bhash_option_used;
>  static long	max_mem_specified;	/* in megabytes */
>  static int	phase2_threads = 32;
> @@ -239,13 +238,13 @@
>  					pre_65_beta = 1;
>  					break;
>  				case IHASH_SIZE:
> -					libxfs_ihash_size = (int)strtol(val, NULL, 0);
> -					ihash_option_used = 1;
> +					do_warn(
> +		_("-o ihash option has been removed and will be ignored\n"));
>  					break;
>  				case BHASH_SIZE:
>  					if (max_mem_specified)
>  						do_abort(
> -			_("-o bhash option cannot be used with -m option\n"));
> +		_("-o bhash option cannot be used with -m option\n"));
>  					libxfs_bhash_size = (int)strtol(val, NULL, 0);
>  					bhash_option_used = 1;
>  					break;
> @@ -648,9 +647,7 @@
>  		unsigned long	max_mem;
>  		struct rlimit	rlim;
>  
> -		libxfs_icache_purge();
>  		libxfs_bcache_purge();
> -		cache_destroy(libxfs_icache);
>  		cache_destroy(libxfs_bcache);
>  
>  		mem_used = (mp->m_sb.sb_icount >> (10 - 2)) +
> @@ -709,11 +706,6 @@
>  			do_log(_("        - block cache size set to %d entries\n"),
>  				libxfs_bhash_size * HASH_CACHE_RATIO);
>  
> -		if (!ihash_option_used)
> -			libxfs_ihash_size = libxfs_bhash_size;
> -
> -		libxfs_icache = cache_init(libxfs_ihash_size,
> -						&libxfs_icache_operations);
>  		libxfs_bcache = cache_init(libxfs_bhash_size,
>  						&libxfs_bcache_operations);
>  	}
> Index: xfsprogs/man/man8/xfs_repair.8
> ===================================================================
> --- xfsprogs.orig/man/man8/xfs_repair.8	2013-09-09 14:34:49.000000000 +0000
> +++ xfsprogs/man/man8/xfs_repair.8	2013-10-09 12:40:20.000000000 +0000
> @@ -130,12 +130,6 @@
>  supported are:
>  .RS 1.0i
>  .TP
> -.BI ihash= ihashsize
> -overrides the default inode cache hash size. The total number of
> -inode cache entries are limited to 8 times this amount. The default
> -.I ihashsize
> -is 1024 (for a total of 8192 entries).
> -.TP
>  .BI bhash= bhashsize
>  overrides the default buffer cache hash size. The total number of
>  buffer cache entries are limited to 8 times this amount. The default
> 
> _______________________________________________
> xfs mailing list
> xfs@oss.sgi.com
> http://oss.sgi.com/mailman/listinfo/xfs
---end quoted text---

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2013-10-31 15:43 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-10-09 13:02 [PATCH] libxfs: stop caching inode structures Christoph Hellwig
2013-10-14 20:16 ` Dave Chinner
2013-10-15 16:06   ` Christoph Hellwig
2013-10-31 15:43 ` Christoph Hellwig
  -- strict thread matches above, loose matches on Subject: below --
2012-02-07 18:22 Christoph Hellwig
2012-02-08  5:11 ` Dave Chinner
2012-02-09 18:01   ` Christoph Hellwig

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox