git.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 04/11] Allow pooled nodes to be recycled back onto a free list
@ 2007-11-09 11:06 Shawn O. Pearce
  2007-11-09 22:27 ` Junio C Hamano
  0 siblings, 1 reply; 2+ messages in thread
From: Shawn O. Pearce @ 2007-11-09 11:06 UTC (permalink / raw)
  To: Junio C Hamano; +Cc: git

In some cases and for some node types a caller may have used one of
our allocated nodes for some temporary usage and then wants to return
it back to the available free list.  We now define a function that
will thread the object onto the front of a free list, which will be
used only after the current block has been exhausted.

We hold off on looking at the free list until we are sure the current
block is empty.  This saves about 1000 tests of the (usually empty)
free list per block.

Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
---
 alloc.c |   23 +++++++++++++++++++++--
 1 files changed, 21 insertions(+), 2 deletions(-)

diff --git a/alloc.c b/alloc.c
index aa429ec..5737a73 100644
--- a/alloc.c
+++ b/alloc.c
@@ -26,6 +26,7 @@ struct node_block
 struct node_pool
 {
 	struct node_block *block_list;
+	void *free_list;
 	char *base;
 	size_t nr;
 };
@@ -38,15 +39,22 @@ struct node_pool
 
 static void report(const char* name, struct node_pool *p, size_t sz)
 {
-	unsigned int count = 0;
+	unsigned int count = 0, free = 0;
 	struct node_block *b;
+	void **f;
 
 	for (b = p->block_list; b; b = b->next)
 		count += BLOCKING;
+	for (f = p->free_list; f; f = *f)
+		free++;
+	free += p->nr;
 	count -= p->nr;
 
 	sz = (count * sz) >> 10;
-	fprintf(stderr, "%10s: %8u (" SZ_FMT " kB)\n", name, count, sz);
+	fprintf(stderr, "%10s: %8u (" SZ_FMT " kB)", name, count, sz);
+	if (free)
+		fprintf(stderr, ", %8u on free list", free);
+	fputc('\n', stderr);
 }
 
 #undef SZ_FMT
@@ -59,6 +67,12 @@ void *alloc_##name##_node(void)					\
 								\
 	if (!name##_pool.nr) {					\
 		struct node_block *b;				\
+		if (name##_pool.free_list) {			\
+			ret = name##_pool.free_list;		\
+			name##_pool.free_list = *((void**)ret);	\
+			memset(ret, 0, sizeof(t));		\
+			return ret;				\
+		}						\
 		b = xmalloc(sizeof(*b) + BLOCKING * sizeof(t));	\
 		b->next = name##_pool.block_list;		\
 		name##_pool.block_list = b;			\
@@ -71,6 +85,11 @@ void *alloc_##name##_node(void)					\
 	memset(ret, 0, sizeof(t));				\
 	return ret;						\
 }								\
+void free_##name##_node(void *n)				\
+{								\
+	*((void**)n) = name##_pool.free_list;			\
+	name##_pool.free_list = n;				\
+}								\
 static void report_##name(void)					\
 {								\
 	report(#name, &name##_pool, sizeof(t));			\
-- 
1.5.3.5.1622.g41d10

^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2007-11-09 22:28 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-11-09 11:06 [PATCH 04/11] Allow pooled nodes to be recycled back onto a free list Shawn O. Pearce
2007-11-09 22:27 ` Junio C Hamano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).