linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Liam Howlett <liam.howlett@oracle.com>
To: "maple-tree@lists.infradead.org" <maple-tree@lists.infradead.org>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Hugh Dickins <hughd@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Subject: [PATCH v12 05/69] radix tree test suite: add support for slab bulk APIs
Date: Wed, 20 Jul 2022 02:17:40 +0000	[thread overview]
Message-ID: <20220720021727.17018-6-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20220720021727.17018-1-Liam.Howlett@oracle.com>

From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Add support for kmem_cache_free_bulk() and kmem_cache_alloc_bulk() to the
radix tree test suite.

Link: https://lkml.kernel.org/r/20220504010716.661115-7-Liam.Howlett@oracle.com
Link: https://lkml.kernel.org/r/20220621204632.3370049-6-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Howells <dhowells@redhat.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
 tools/include/linux/slab.h       |   4 ++
 tools/testing/radix-tree/linux.c | 118 ++++++++++++++++++++++++++++++-
 2 files changed, 120 insertions(+), 2 deletions(-)

diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index 0616409513eb..311759ea25e9 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -41,4 +41,8 @@ struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
 			unsigned int align, unsigned int flags,
 			void (*ctor)(void *));
 
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+			  void **list);
+
 #endif		/* _TOOLS_SLAB_H */
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index f20529ae4dbe..2048d12c31df 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -93,14 +93,13 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
 	return p;
 }
 
-void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
 {
 	assert(objp);
 	uatomic_dec(&nr_allocated);
 	uatomic_dec(&cachep->nr_allocated);
 	if (kmalloc_verbose)
 		printf("Freeing %p to slab\n", objp);
-	pthread_mutex_lock(&cachep->lock);
 	if (cachep->nr_objs > 10 || cachep->align) {
 		memset(objp, POISON_FREE, cachep->size);
 		free(objp);
@@ -110,9 +109,80 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 		node->parent = cachep->objs;
 		cachep->objs = node;
 	}
+}
+
+void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+{
+	pthread_mutex_lock(&cachep->lock);
+	kmem_cache_free_locked(cachep, objp);
 	pthread_mutex_unlock(&cachep->lock);
 }
 
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
+{
+	if (kmalloc_verbose)
+		pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
+
+	pthread_mutex_lock(&cachep->lock);
+	for (int i = 0; i < size; i++)
+		kmem_cache_free_locked(cachep, list[i]);
+	pthread_mutex_unlock(&cachep->lock);
+}
+
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+			  void **p)
+{
+	size_t i;
+
+	if (kmalloc_verbose)
+		pr_debug("Bulk alloc %lu\n", size);
+
+	if (!(gfp & __GFP_DIRECT_RECLAIM)) {
+		if (cachep->non_kernel < size)
+			return 0;
+
+		cachep->non_kernel -= size;
+	}
+
+	pthread_mutex_lock(&cachep->lock);
+	if (cachep->nr_objs >= size) {
+		struct radix_tree_node *node;
+
+		for (i = 0; i < size; i++) {
+			node = cachep->objs;
+			cachep->nr_objs--;
+			cachep->objs = node->parent;
+			p[i] = node;
+			node->parent = NULL;
+		}
+		pthread_mutex_unlock(&cachep->lock);
+	} else {
+		pthread_mutex_unlock(&cachep->lock);
+		for (i = 0; i < size; i++) {
+			if (cachep->align) {
+				posix_memalign(&p[i], cachep->align,
+					       cachep->size * size);
+			} else {
+				p[i] = malloc(cachep->size * size);
+			}
+			if (cachep->ctor)
+				cachep->ctor(p[i]);
+			else if (gfp & __GFP_ZERO)
+				memset(p[i], 0, cachep->size);
+		}
+	}
+
+	for (i = 0; i < size; i++) {
+		uatomic_inc(&nr_allocated);
+		uatomic_inc(&cachep->nr_allocated);
+		uatomic_inc(&cachep->nr_tallocated);
+		if (kmalloc_verbose)
+			printf("Allocating %p from slab\n", p[i]);
+	}
+
+	return size;
+}
+
 struct kmem_cache *
 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
 		unsigned int flags, void (*ctor)(void *))
@@ -130,3 +200,47 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
 	ret->non_kernel = 0;
 	return ret;
 }
+
+/*
+ * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
+ */
+void test_kmem_cache_bulk(void)
+{
+	int i;
+	void *list[12];
+	static struct kmem_cache *test_cache, *test_cache2;
+
+	/*
+	 * Testing the bulk allocators without aligned kmem_cache to force the
+	 * bulk alloc/free to reuse
+	 */
+	test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
+
+	for (i = 0; i < 5; i++)
+		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
+
+	for (i = 0; i < 5; i++)
+		kmem_cache_free(test_cache, list[i]);
+	assert(test_cache->nr_objs == 5);
+
+	kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
+	kmem_cache_free_bulk(test_cache, 5, list);
+
+	for (i = 0; i < 12 ; i++)
+		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
+
+	for (i = 0; i < 12; i++)
+		kmem_cache_free(test_cache, list[i]);
+
+	/* The last free will not be kept around */
+	assert(test_cache->nr_objs == 11);
+
+	/* Aligned caches will immediately free */
+	test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
+
+	kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
+	kmem_cache_free_bulk(test_cache2, 10, list);
+	assert(!test_cache2->nr_objs);
+
+
+}
-- 
2.35.1


  parent reply	other threads:[~2022-07-20  2:17 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-20  2:17 [PATCH v12 00/69] Introducing the Maple Tree Liam Howlett
2022-07-20  2:17 ` [PATCH v12 01/69] Maple Tree: add new data structure Liam Howlett
2022-07-20  2:17 ` [PATCH v12 02/69] radix tree test suite: add pr_err define Liam Howlett
2022-07-20  2:17 ` [PATCH v12 03/69] radix tree test suite: add kmem_cache_set_non_kernel() Liam Howlett
2022-07-20  2:17 ` Liam Howlett [this message]
2022-07-20  2:17 ` [PATCH v12 04/69] radix tree test suite: add allocation counts and size to kmem_cache Liam Howlett
2022-07-20  2:17 ` [PATCH v12 06/69] radix tree test suite: add lockdep_is_held to header Liam Howlett
2022-07-20  2:17 ` [PATCH v12 08/69] mm: start tracking VMAs with maple tree Liam Howlett
2022-07-27  0:28   ` Nathan Chancellor
2022-07-28  0:34     ` Liam Howlett
2022-07-29 15:41       ` Liam Howlett
2022-07-29 17:02         ` Nathan Chancellor
2022-07-29 20:13           ` Liam Howlett
2022-07-20  2:17 ` [PATCH v12 07/69] lib/test_maple_tree: add testing for " Liam Howlett
2022-07-20  2:17 ` [PATCH v12 10/69] mmap: use the VMA iterator in count_vma_pages_range() Liam Howlett
2022-07-20  2:17 ` [PATCH v12 09/69] mm: add VMA iterator Liam Howlett
2022-07-20  2:17 ` [PATCH v12 11/69] mm/mmap: use the maple tree in find_vma() instead of the rbtree Liam Howlett
2022-07-20  2:17 ` [PATCH v12 13/69] mm/mmap: use maple tree for unmapped_area{_topdown} Liam Howlett
2022-07-20  2:17 ` [PATCH v12 12/69] mm/mmap: use the maple tree for find_vma_prev() instead of the rbtree Liam Howlett
2022-07-20  2:17 ` [PATCH v12 15/69] damon: convert __damon_va_three_regions to use the VMA iterator Liam Howlett
2022-07-20  2:17 ` [PATCH v12 16/69] proc: remove VMA rbtree use from nommu Liam Howlett
2022-07-20  2:17 ` [PATCH v12 14/69] kernel/fork: use maple tree for dup_mmap() during forking Liam Howlett
2022-07-20  2:17 ` [PATCH v12 19/69] xen: use vma_lookup() in privcmd_ioctl_mmap() Liam Howlett
2022-07-20  2:17 ` [PATCH v12 17/69] mm: remove rb tree Liam Howlett
2022-07-20  2:17 ` [PATCH v12 18/69] mmap: change zeroing of maple tree in __vma_adjust() Liam Howlett
2022-07-20  2:17 ` [PATCH v12 20/69] mm: optimize find_exact_vma() to use vma_lookup() Liam Howlett
2022-07-20  2:17 ` [PATCH v12 21/69] mm/khugepaged: optimize collapse_pte_mapped_thp() by using vma_lookup() Liam Howlett
2022-07-20  2:17 ` [PATCH v12 22/69] mm/mmap: change do_brk_flags() to expand existing VMA and add do_brk_munmap() Liam Howlett
     [not found]   ` <f5e99dbb-7008-dcf8-3e85-2f161056b37a@gmail.com>
2022-07-25 14:01     ` Liam Howlett
2022-07-25 18:49       ` Liam Howlett
     [not found]         ` <8d77a517-fcc1-5e74-b9d8-2a250dc751ed@gmail.com>
2022-07-28  0:57           ` Liam Howlett
2022-07-20  2:17 ` [PATCH v12 24/69] mm/mmap: use advanced maple tree API for mmap_region() Liam Howlett
2022-07-20  2:17 ` [PATCH v12 23/69] mm: use maple tree operations for find_vma_intersection() Liam Howlett
2022-07-20  2:17 ` [PATCH v12 25/69] mm: remove vmacache Liam Howlett
2022-07-20  2:17 ` [PATCH v12 26/69] mm: convert vma_lookup() to use mtree_load() Liam Howlett
2022-07-20  2:17 ` [PATCH v12 27/69] mm/mmap: move mmap_region() below do_munmap() Liam Howlett
2022-07-20  2:17 ` [PATCH v12 29/69] mm/mmap: change do_brk_munmap() to use do_mas_align_munmap() Liam Howlett
2022-07-20  2:17 ` [PATCH v12 28/69] mm/mmap: reorganize munmap to use maple states Liam Howlett
2022-07-20  2:17 ` [PATCH v12 30/69] arm64: remove mmap linked list from vdso Liam Howlett
2022-07-20  2:17 ` [PATCH v12 31/69] arm64: Change elfcore for_each_mte_vma() to use VMA iterator Liam Howlett
2022-07-20  2:17 ` [PATCH v12 32/69] parisc: remove mmap linked list from cache handling Liam Howlett
2022-07-20  2:17 ` [PATCH v12 33/69] powerpc: remove mmap linked list walks Liam Howlett
2022-08-02 10:36   ` Christophe Leroy
2022-07-20  2:17 ` [PATCH v12 34/69] s390: remove vma " Liam Howlett
2022-07-20  2:17 ` [PATCH v12 35/69] x86: " Liam Howlett
2022-07-20  2:17 ` [PATCH v12 36/69] xtensa: " Liam Howlett
2022-07-20  2:17 ` [PATCH v12 37/69] cxl: remove vma linked list walk Liam Howlett
2022-07-20  2:17 ` [PATCH v12 38/69] optee: " Liam Howlett
2022-07-20  2:17 ` [PATCH v12 40/69] coredump: " Liam Howlett
2022-07-20  2:17 ` [PATCH v12 39/69] um: " Liam Howlett
2022-07-20  2:17 ` [PATCH v12 44/69] userfaultfd: use maple tree iterator to iterate VMAs Liam Howlett
2022-07-20  2:17 ` [PATCH v12 41/69] exec: use VMA iterator instead of linked list Liam Howlett
2022-07-20  2:17 ` [PATCH v12 42/69] fs/proc/base: use maple tree iterators in place " Liam Howlett
2022-07-20  2:17 ` [PATCH v12 43/69] fs/proc/task_mmu: stop using linked list and highest_vm_end Liam Howlett
2022-07-20  2:17 ` [PATCH v12 46/69] acct: use VMA iterator instead of linked list Liam Howlett
2022-07-20  2:17 ` [PATCH v12 45/69] ipc/shm: " Liam Howlett
2022-07-20  2:17 ` [PATCH v12 47/69] perf: use VMA iterator Liam Howlett
2022-07-20  2:17 ` [PATCH v12 51/69] mm/gup: use maple tree navigation instead of linked list Liam Howlett
2022-07-20  2:17 ` [PATCH v12 48/69] sched: use maple tree iterator to walk VMAs Liam Howlett
2022-07-20  2:17 ` [PATCH v12 49/69] fork: use VMA iterator Liam Howlett
2022-07-20  2:17 ` [PATCH v12 50/69] bpf: remove VMA linked list Liam Howlett
2022-07-20  2:18 ` [PATCH v12 53/69] mm/ksm: use vma iterators instead of vma " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 54/69] mm/madvise: use vma_find() " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 52/69] mm/khugepaged: stop using " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 56/69] mm/mempolicy: use vma iterator & maple state instead of " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 57/69] mm/mlock: use vma iterator and " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 58/69] mm/mprotect: use maple tree navigation " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 55/69] mm/memcontrol: stop using mm->highest_vm_end Liam Howlett
2022-07-20  2:18 ` [PATCH v12 62/69] mm/pagewalk: use vma_find() instead of vma linked list Liam Howlett
2022-07-20  2:18 ` [PATCH v12 59/69] mm/mremap: use vma_find_intersection() " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 61/69] mm/oom_kill: use maple tree iterators " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 60/69] mm/msync: use vma_find() " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 63/69] mm/swapfile: use vma iterator " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 65/69] nommu: remove uses of VMA " Liam Howlett
2022-07-20  2:18 ` [PATCH v12 64/69] i915: use the VMA iterator Liam Howlett
2022-07-20  2:18 ` [PATCH v12 68/69] mm/mmap: drop range_has_overlap() function Liam Howlett
2022-07-20  2:18 ` [PATCH v12 67/69] mm: remove the vma linked list Liam Howlett
2022-07-20  2:18 ` [PATCH v12 66/69] riscv: use vma iterator for vdso Liam Howlett
2022-07-20  2:18 ` [PATCH v12 69/69] mm/mmap.c: pass in mapping to __vma_link_file() Liam Howlett
2022-07-20  5:09 ` [PATCH v12 00/69] Introducing the Maple Tree Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220720021727.17018-6-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=hughd@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    --cc=yuzhao@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).