From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Linus Torvalds <torvalds@linux-foundation.org>,
Andrew Morton <akpm@linux-foundation.org>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
netdev@vger.kernel.org, trond.myklebust@fy
Subject: [PATCH 09/32] mm: kmem_alloc_estimate()
Date: Thu, 02 Oct 2008 15:05:13 +0200 [thread overview]
Message-ID: <20081002131608.162932309@chello.nl> (raw)
In-Reply-To: 20081002130504.927878499@chello.nl
[-- Attachment #1: mm-kmem_estimate_pages.patch --]
[-- Type: text/plain, Size: 8778 bytes --]
Provide a method to get the upper bound on the pages needed to allocate
a given number of objects from a given kmem_cache.
This lays the foundation for a generic reserve framework as presented in
a later patch in this series. This framework needs to convert object demand
(kmalloc() bytes, kmem_cache_alloc() objects) to pages.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
include/linux/slab.h | 4 ++
mm/slab.c | 75 +++++++++++++++++++++++++++++++++++++++++++
mm/slob.c | 67 +++++++++++++++++++++++++++++++++++++++
mm/slub.c | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 233 insertions(+)
Index: linux-2.6/include/linux/slab.h
===================================================================
--- linux-2.6.orig/include/linux/slab.h
+++ linux-2.6/include/linux/slab.h
@@ -72,6 +72,8 @@ void kmem_cache_free(struct kmem_cache *
unsigned int kmem_cache_size(struct kmem_cache *);
const char *kmem_cache_name(struct kmem_cache *);
int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
+unsigned kmem_alloc_estimate(struct kmem_cache *cachep,
+ gfp_t flags, int objects);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -107,6 +109,8 @@ void * __must_check __krealloc(const voi
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
size_t ksize(const void *);
+unsigned kmalloc_estimate_objs(size_t, gfp_t, int);
+unsigned kmalloc_estimate_bytes(gfp_t, size_t);
/*
* Function prototypes passed to kmem_cache_defrag() to enable defragmentation
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c
+++ linux-2.6/mm/slub.c
@@ -2452,6 +2452,42 @@ const char *kmem_cache_name(struct kmem_
}
EXPORT_SYMBOL(kmem_cache_name);
+/*
+ * Calculate the upper bound of pages required to sequentially allocate
+ * @objects objects from @cachep.
+ *
+ * We should use s->min_objects because those are the least efficient.
+ */
+unsigned kmem_alloc_estimate(struct kmem_cache *s, gfp_t flags, int objects)
+{
+ unsigned long pages;
+ struct kmem_cache_order_objects x;
+
+ if (WARN_ON(!s) || WARN_ON(!oo_objects(s->min)))
+ return 0;
+
+ x = s->min;
+ pages = DIV_ROUND_UP(objects, oo_objects(x)) << oo_order(x);
+
+ /*
+ * Account the possible additional overhead if the slab holds more that
+ * one object. Use s->max_objects because that's the worst case.
+ */
+ x = s->oo;
+ if (oo_objects(x) > 1) {
+ /*
+ * Account the possible additional overhead if per cpu slabs
+ * are currently empty and have to be allocated. This is very
+ * unlikely but a possible scenario immediately after
+ * kmem_cache_shrink.
+ */
+ pages += num_possible_cpus() << oo_order(x);
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL_GPL(kmem_alloc_estimate);
+
static void list_slab_objects(struct kmem_cache *s, struct page *page,
const char *text)
{
@@ -2852,6 +2888,57 @@ void kfree(const void *x)
EXPORT_SYMBOL(kfree);
/*
+ * Calculate the upper bound of pages required to sequentially allocate
+ * @count objects of @size bytes from kmalloc given @flags.
+ */
+unsigned kmalloc_estimate_objs(size_t size, gfp_t flags, int count)
+{
+ struct kmem_cache *s = get_slab(size, flags);
+ if (!s)
+ return 0;
+
+ return kmem_alloc_estimate(s, flags, count);
+
+}
+EXPORT_SYMBOL_GPL(kmalloc_estimate_objs);
+
+/*
+ * Calculate the upper bound of pages requires to sequentially allocate @bytes
+ * from kmalloc in an unspecified number of allocations of nonuniform size.
+ */
+unsigned kmalloc_estimate_bytes(gfp_t flags, size_t bytes)
+{
+ int i;
+ unsigned long pages;
+
+ /*
+ * multiply by two, in order to account the worst case slack space
+ * due to the power-of-two allocation sizes.
+ */
+ pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE);
+
+ /*
+ * add the kmem_cache overhead of each possible kmalloc cache
+ */
+ for (i = 1; i < PAGE_SHIFT; i++) {
+ struct kmem_cache *s;
+
+#ifdef CONFIG_ZONE_DMA
+ if (unlikely(flags & SLUB_DMA))
+ s = dma_kmalloc_cache(i, flags);
+ else
+#endif
+ s = &kmalloc_caches[i];
+
+ if (s)
+ pages += kmem_alloc_estimate(s, flags, 0);
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL_GPL(kmalloc_estimate_bytes);
+
+/*
* Allocate a slab scratch space that is sufficient to keep at least
* max_defrag_slab_objects pointers to individual objects and also a bitmap
* for max_defrag_slab_objects.
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c
+++ linux-2.6/mm/slab.c
@@ -3849,6 +3849,81 @@ const char *kmem_cache_name(struct kmem_
EXPORT_SYMBOL_GPL(kmem_cache_name);
/*
+ * Calculate the upper bound of pages required to sequentially allocate
+ * @objects objects from @cachep.
+ */
+unsigned kmem_alloc_estimate(struct kmem_cache *cachep,
+ gfp_t flags, int objects)
+{
+ /*
+ * (1) memory for objects,
+ */
+ unsigned nr_slabs = DIV_ROUND_UP(objects, cachep->num);
+ unsigned nr_pages = nr_slabs << cachep->gfporder;
+
+ /*
+ * (2) memory for each per-cpu queue (nr_cpu_ids),
+ * (3) memory for each per-node alien queues (nr_cpu_ids), and
+ * (4) some amount of memory for the slab management structures
+ *
+ * XXX: truely account these
+ */
+ nr_pages += 1 + ilog2(nr_pages);
+
+ return nr_pages;
+}
+
+/*
+ * Calculate the upper bound of pages required to sequentially allocate
+ * @count objects of @size bytes from kmalloc given @flags.
+ */
+unsigned kmalloc_estimate_objs(size_t size, gfp_t flags, int count)
+{
+ struct kmem_cache *s = kmem_find_general_cachep(size, flags);
+ if (!s)
+ return 0;
+
+ return kmem_alloc_estimate(s, flags, count);
+}
+EXPORT_SYMBOL_GPL(kmalloc_estimate_objs);
+
+/*
+ * Calculate the upper bound of pages requires to sequentially allocate @bytes
+ * from kmalloc in an unspecified number of allocations of nonuniform size.
+ */
+unsigned kmalloc_estimate_bytes(gfp_t flags, size_t bytes)
+{
+ unsigned long pages;
+ struct cache_sizes *csizep = malloc_sizes;
+
+ /*
+ * multiply by two, in order to account the worst case slack space
+ * due to the power-of-two allocation sizes.
+ */
+ pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE);
+
+ /*
+ * add the kmem_cache overhead of each possible kmalloc cache
+ */
+ for (csizep = malloc_sizes; csizep->cs_cachep; csizep++) {
+ struct kmem_cache *s;
+
+#ifdef CONFIG_ZONE_DMA
+ if (unlikely(flags & __GFP_DMA))
+ s = csizep->cs_dmacachep;
+ else
+#endif
+ s = csizep->cs_cachep;
+
+ if (s)
+ pages += kmem_alloc_estimate(s, flags, 0);
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL_GPL(kmalloc_estimate_bytes);
+
+/*
* This initializes kmem_list3 or resizes various caches for all nodes.
*/
static int alloc_kmemlist(struct kmem_cache *cachep)
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c
+++ linux-2.6/mm/slob.c
@@ -682,3 +682,70 @@ void __init kmem_cache_init(void)
{
slob_ready = 1;
}
+
+static __slob_estimate(unsigned size, unsigned align, unsigned objects)
+{
+ unsigned nr_pages;
+
+ size = SLOB_UNIT * SLOB_UNITS(size + align - 1);
+
+ if (size <= PAGE_SIZE) {
+ nr_pages = DIV_ROUND_UP(objects, PAGE_SIZE / size);
+ } else {
+ nr_pages = objects << get_order(size);
+ }
+
+ return nr_pages;
+}
+
+/*
+ * Calculate the upper bound of pages required to sequentially allocate
+ * @objects objects from @cachep.
+ */
+unsigned kmem_alloc_estimate(struct kmem_cache *c, gfp_t flags, int objects)
+{
+ unsigned size = c->size;
+
+ if (c->flags & SLAB_DESTROY_BY_RCU)
+ size += sizeof(struct slob_rcu);
+
+ return __slob_estimate(size, c->align, objects);
+}
+
+/*
+ * Calculate the upper bound of pages required to sequentially allocate
+ * @count objects of @size bytes from kmalloc given @flags.
+ */
+unsigned kmalloc_estimate_objs(size_t size, gfp_t flags, int count)
+{
+ unsigned align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+
+ return __slob_estimate(size, align, count);
+}
+EXPORT_SYMBOL_GPL(kmalloc_estimate_objs);
+
+/*
+ * Calculate the upper bound of pages requires to sequentially allocate @bytes
+ * from kmalloc in an unspecified number of allocations of nonuniform size.
+ */
+unsigned kmalloc_estimate_bytes(gfp_t flags, size_t bytes)
+{
+ unsigned long pages;
+
+ /*
+ * Multiply by two, in order to account the worst case slack space
+ * due to the power-of-two allocation sizes.
+ *
+ * While not true for slob, it cannot do worse than that for sequential
+ * allocations.
+ */
+ pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE);
+
+ /*
+ * Our power of two series starts at PAGE_SIZE, so add one page.
+ */
+ pages++;
+
+ return pages;
+}
+EXPORT_SYMBOL_GPL(kmalloc_estimate_bytes);
--
next prev parent reply other threads:[~2008-10-02 13:19 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-10-02 13:05 [PATCH 00/32] Swap over NFS - v19 Peter Zijlstra
2008-10-02 13:05 ` [PATCH 01/32] mm: gfp_to_alloc_flags() Peter Zijlstra
2008-10-02 13:05 ` [PATCH 02/32] mm: serialize access to min_free_kbytes Peter Zijlstra
2008-10-02 13:05 ` [PATCH 03/32] net: ipv6: clean up ip6_route_net_init() error handling Peter Zijlstra
2008-10-07 21:12 ` David Miller
2008-10-02 13:05 ` [PATCH 04/32] net: ipv6: initialize ip6_route sysctl vars in ip6_route_net_init() Peter Zijlstra
2008-10-07 21:15 ` David Miller
2008-10-02 13:05 ` [PATCH 05/32] swap over network documentation Peter Zijlstra
2008-10-02 13:05 ` [PATCH 06/32] mm: expose gfp_to_alloc_flags() Peter Zijlstra
2008-10-02 13:05 ` [PATCH 07/32] mm: tag reseve pages Peter Zijlstra
2008-10-02 13:05 ` [PATCH 08/32] mm: slb: add knowledge of reserve pages Peter Zijlstra
2008-10-03 9:32 ` Peter Zijlstra
2008-10-02 13:05 ` Peter Zijlstra [this message]
2008-10-02 13:05 ` [PATCH 10/32] mm: allow PF_MEMALLOC from softirq context Peter Zijlstra
2008-10-02 13:05 ` [PATCH 11/32] mm: emergency pool Peter Zijlstra
2008-10-02 13:05 ` [PATCH 12/32] mm: system wide ALLOC_NO_WATERMARK Peter Zijlstra
2008-10-02 13:05 ` [PATCH 13/32] mm: __GFP_MEMALLOC Peter Zijlstra
2008-10-02 13:05 ` [PATCH 14/32] mm: memory reserve management Peter Zijlstra
2008-10-02 13:05 ` [PATCH 15/32] selinux: tag avc cache alloc as non-critical Peter Zijlstra
2008-10-02 13:05 ` [PATCH 16/32] net: wrap sk->sk_backlog_rcv() Peter Zijlstra
2008-10-07 21:19 ` David Miller
2008-10-02 13:05 ` [PATCH 17/32] net: packet split receive api Peter Zijlstra
2008-10-07 21:23 ` David Miller
2008-10-02 13:05 ` [PATCH 18/32] net: sk_allocation() - concentrate socket related allocations Peter Zijlstra
2008-10-07 21:26 ` David Miller
2008-10-08 6:25 ` Peter Zijlstra
2008-10-02 13:05 ` [PATCH 19/32] netvm: network reserve infrastructure Peter Zijlstra
2008-10-02 13:05 ` [PATCH 20/32] netvm: INET reserves Peter Zijlstra
2008-10-22 5:31 ` Suresh Jayaraman
2008-10-02 13:05 ` [PATCH 21/32] netvm: hook skb allocation to reserves Peter Zijlstra
2008-10-02 13:05 ` [PATCH 22/32] netvm: filter emergency skbs Peter Zijlstra
2008-10-02 13:05 ` [PATCH 23/32] netvm: prevent a stream specific deadlock Peter Zijlstra
2008-10-02 13:05 ` [PATCH 24/32] netfilter: NF_QUEUE vs emergency skbs Peter Zijlstra
2008-10-02 13:05 ` [PATCH 25/32] netvm: skb processing Peter Zijlstra
2008-10-02 13:05 ` [PATCH 26/32] mm: add support for non block device backed swap files Peter Zijlstra
2008-10-02 13:05 ` [PATCH 27/32] mm: methods for teaching filesystems about PG_swapcache pages Peter Zijlstra
2008-10-02 13:05 ` [PATCH 28/32] nfs: remove mempools Peter Zijlstra
2008-10-02 13:05 ` [PATCH 29/32] nfs: teach the NFS client how to treat PG_swapcache pages Peter Zijlstra
2008-10-02 13:05 ` [PATCH 30/32] nfs: disable data cache revalidation for swapfiles Peter Zijlstra
2008-10-02 13:05 ` [PATCH 31/32] nfs: enable swap on NFS Peter Zijlstra
2008-10-02 13:05 ` [PATCH 32/32] nfs: fix various memory recursions possible with swap over NFS Peter Zijlstra
2008-10-02 19:47 ` [PATCH 00/32] Swap over NFS - v19 Andrew Morton
2008-10-02 20:59 ` Lee Schermerhorn
2008-10-03 6:53 ` Nick Piggin
2008-10-03 19:38 ` Rik van Riel
2008-10-04 15:05 ` KOSAKI Motohiro
2008-10-07 14:26 ` split-lru performance mesurement part2 KOSAKI Motohiro
2008-10-07 20:17 ` Andrew Morton
2008-10-07 21:28 ` Rik van Riel
2008-10-03 6:49 ` [PATCH 00/32] Swap over NFS - v19 Nick Piggin
2008-10-03 17:17 ` Luiz Fernando N. Capitulino
2008-10-04 10:13 ` Peter Zijlstra
2008-10-06 6:04 ` Suresh Jayaraman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20081002131608.162932309@chello.nl \
--to=a.p.zijlstra@chello.nl \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=netdev@vger.kernel.org \
--cc=torvalds@linux-foundation.org \
--cc=trond.myklebust@fy \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).