* [PATCH 02/12] tools: Add testing support for changes to slab for sheaves
2025-09-01 11:08 [PATCH 00/12] maple_tree: slub sheaves conversion Vlastimil Babka
2025-09-01 11:08 ` [PATCH 01/12] maple_tree: Fix check_bulk_rebalance() test locks Vlastimil Babka
@ 2025-09-01 11:08 ` Vlastimil Babka
2025-09-01 11:08 ` [PATCH 03/12] maple_tree: use percpu sheaves for maple_node_cache Vlastimil Babka
` (9 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Vlastimil Babka @ 2025-09-01 11:08 UTC (permalink / raw)
To: Liam R. Howlett, Matthew Wilcox, Lorenzo Stoakes, Jann Horn,
Pedro Falcato, Suren Baghdasaryan
Cc: Harry Yoo, Andrew Morton, maple-tree, linux-mm, linux-fsdevel,
linux-kernel, Vlastimil Babka, Liam R. Howlett
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
The slab changes for sheaves requires more effort in the testing code.
Unite all the kmem_cache work into the tools/include slab header for
both the vma and maple tree testing.
The vma test code also requires importing more #defines to allow for
seamless use of the shared kmem_cache code.
This adds the pthread header to the slab header in the tools directory
to allow for the pthread_mutex in linux.c.
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
tools/include/linux/slab.h | 137 ++++++++++++++++++++++++++++++++++++--
tools/testing/shared/linux.c | 26 ++------
tools/testing/shared/maple-shim.c | 1 +
tools/testing/vma/vma_internal.h | 94 +-------------------------
4 files changed, 142 insertions(+), 116 deletions(-)
diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index c87051e2b26f5a7fee0362697fae067076b8e84d..c5c5cc6db5668be2cc94c29065ccfa7ca7b4bb08 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -4,11 +4,31 @@
#include <linux/types.h>
#include <linux/gfp.h>
+#include <pthread.h>
-#define SLAB_PANIC 2
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define kzalloc_node(size, flags, node) kmalloc(size, flags)
+enum _slab_flag_bits {
+ _SLAB_KMALLOC,
+ _SLAB_HWCACHE_ALIGN,
+ _SLAB_PANIC,
+ _SLAB_TYPESAFE_BY_RCU,
+ _SLAB_ACCOUNT,
+ _SLAB_FLAGS_LAST_BIT
+};
+
+#define __SLAB_FLAG_BIT(nr) ((unsigned int __force)(1U << (nr)))
+#define __SLAB_FLAG_UNUSED ((unsigned int __force)(0U))
+
+#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
+#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
+#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
+#ifdef CONFIG_MEMCG
+# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
+#else
+# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
+#endif
void *kmalloc(size_t size, gfp_t gfp);
void kfree(void *p);
@@ -23,6 +43,86 @@ enum slab_state {
FULL
};
+struct kmem_cache {
+ pthread_mutex_t lock;
+ unsigned int size;
+ unsigned int align;
+ unsigned int sheaf_capacity;
+ int nr_objs;
+ void *objs;
+ void (*ctor)(void *);
+ bool non_kernel_enabled;
+ unsigned int non_kernel;
+ unsigned long nr_allocated;
+ unsigned long nr_tallocated;
+ bool exec_callback;
+ void (*callback)(void *);
+ void *private;
+};
+
+struct kmem_cache_args {
+ /**
+ * @align: The required alignment for the objects.
+ *
+ * %0 means no specific alignment is requested.
+ */
+ unsigned int align;
+ /**
+ * @sheaf_capacity: The maximum size of the sheaf.
+ */
+ unsigned int sheaf_capacity;
+ /**
+ * @useroffset: Usercopy region offset.
+ *
+ * %0 is a valid offset, when @usersize is non-%0
+ */
+ unsigned int useroffset;
+ /**
+ * @usersize: Usercopy region size.
+ *
+ * %0 means no usercopy region is specified.
+ */
+ unsigned int usersize;
+ /**
+ * @freeptr_offset: Custom offset for the free pointer
+ * in &SLAB_TYPESAFE_BY_RCU caches
+ *
+ * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
+ * outside of the object. This might cause the object to grow in size.
+ * Cache creators that have a reason to avoid this can specify a custom
+ * free pointer offset in their struct where the free pointer will be
+ * placed.
+ *
+ * Note that placing the free pointer inside the object requires the
+ * caller to ensure that no fields are invalidated that are required to
+ * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
+ * details).
+ *
+ * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
+ * is specified, %use_freeptr_offset must be set %true.
+ *
+ * Note that @ctor currently isn't supported with custom free pointers
+ * as a @ctor requires an external free pointer.
+ */
+ unsigned int freeptr_offset;
+ /**
+ * @use_freeptr_offset: Whether a @freeptr_offset is used.
+ */
+ bool use_freeptr_offset;
+ /**
+ * @ctor: A constructor for the objects.
+ *
+ * The constructor is invoked for each object in a newly allocated slab
+ * page. It is the cache user's responsibility to free object in the
+ * same state as after calling the constructor, or deal appropriately
+ * with any differences between a freshly constructed and a reallocated
+ * object.
+ *
+ * %NULL means no constructor.
+ */
+ void (*ctor)(void *);
+};
+
static inline void *kzalloc(size_t size, gfp_t gfp)
{
return kmalloc(size, gfp | __GFP_ZERO);
@@ -37,9 +137,38 @@ static inline void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
}
void kmem_cache_free(struct kmem_cache *cachep, void *objp);
-struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
- unsigned int align, unsigned int flags,
- void (*ctor)(void *));
+
+struct kmem_cache *
+__kmem_cache_create_args(const char *name, unsigned int size,
+ struct kmem_cache_args *args, unsigned int flags);
+
+/* If NULL is passed for @args, use this variant with default arguments. */
+static inline struct kmem_cache *
+__kmem_cache_default_args(const char *name, unsigned int size,
+ struct kmem_cache_args *args, unsigned int flags)
+{
+ struct kmem_cache_args kmem_default_args = {};
+
+ return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
+}
+
+static inline struct kmem_cache *
+__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
+ unsigned int flags, void (*ctor)(void *))
+{
+ struct kmem_cache_args kmem_args = {
+ .align = align,
+ .ctor = ctor,
+ };
+
+ return __kmem_cache_create_args(name, size, &kmem_args, flags);
+}
+
+#define kmem_cache_create(__name, __object_size, __args, ...) \
+ _Generic((__args), \
+ struct kmem_cache_args *: __kmem_cache_create_args, \
+ void *: __kmem_cache_default_args, \
+ default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c
index 0f97fb0d19e19c327aa4843a35b45cc086f4f366..97b8412ccbb6d222604c7b397c53c65618d8d51b 100644
--- a/tools/testing/shared/linux.c
+++ b/tools/testing/shared/linux.c
@@ -16,21 +16,6 @@ int nr_allocated;
int preempt_count;
int test_verbose;
-struct kmem_cache {
- pthread_mutex_t lock;
- unsigned int size;
- unsigned int align;
- int nr_objs;
- void *objs;
- void (*ctor)(void *);
- unsigned int non_kernel;
- unsigned long nr_allocated;
- unsigned long nr_tallocated;
- bool exec_callback;
- void (*callback)(void *);
- void *private;
-};
-
void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *))
{
cachep->callback = callback;
@@ -234,23 +219,26 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
}
struct kmem_cache *
-kmem_cache_create(const char *name, unsigned int size, unsigned int align,
- unsigned int flags, void (*ctor)(void *))
+__kmem_cache_create_args(const char *name, unsigned int size,
+ struct kmem_cache_args *args,
+ unsigned int flags)
{
struct kmem_cache *ret = malloc(sizeof(*ret));
pthread_mutex_init(&ret->lock, NULL);
ret->size = size;
- ret->align = align;
+ ret->align = args->align;
+ ret->sheaf_capacity = args->sheaf_capacity;
ret->nr_objs = 0;
ret->nr_allocated = 0;
ret->nr_tallocated = 0;
ret->objs = NULL;
- ret->ctor = ctor;
+ ret->ctor = args->ctor;
ret->non_kernel = 0;
ret->exec_callback = false;
ret->callback = NULL;
ret->private = NULL;
+
return ret;
}
diff --git a/tools/testing/shared/maple-shim.c b/tools/testing/shared/maple-shim.c
index 640df76f483e09f3b6f85612786060dd273e2362..9d7b743415660305416e972fa75b56824211b0eb 100644
--- a/tools/testing/shared/maple-shim.c
+++ b/tools/testing/shared/maple-shim.c
@@ -3,5 +3,6 @@
/* Very simple shim around the maple tree. */
#include "maple-shared.h"
+#include <linux/slab.h>
#include "../../../lib/maple_tree.c"
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index 3639aa8dd2b06ebe5b9cfcfe6669994fd38c482d..09732eff8dd84555563b3d485805ebab7b204584 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -26,6 +26,7 @@
#include <linux/mm.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
+#include <linux/slab.h>
extern unsigned long stack_guard_gap;
#ifdef CONFIG_MMU
@@ -509,65 +510,6 @@ struct pagetable_move_control {
.len_in = len_, \
}
-struct kmem_cache_args {
- /**
- * @align: The required alignment for the objects.
- *
- * %0 means no specific alignment is requested.
- */
- unsigned int align;
- /**
- * @useroffset: Usercopy region offset.
- *
- * %0 is a valid offset, when @usersize is non-%0
- */
- unsigned int useroffset;
- /**
- * @usersize: Usercopy region size.
- *
- * %0 means no usercopy region is specified.
- */
- unsigned int usersize;
- /**
- * @freeptr_offset: Custom offset for the free pointer
- * in &SLAB_TYPESAFE_BY_RCU caches
- *
- * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
- * outside of the object. This might cause the object to grow in size.
- * Cache creators that have a reason to avoid this can specify a custom
- * free pointer offset in their struct where the free pointer will be
- * placed.
- *
- * Note that placing the free pointer inside the object requires the
- * caller to ensure that no fields are invalidated that are required to
- * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
- * details).
- *
- * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
- * is specified, %use_freeptr_offset must be set %true.
- *
- * Note that @ctor currently isn't supported with custom free pointers
- * as a @ctor requires an external free pointer.
- */
- unsigned int freeptr_offset;
- /**
- * @use_freeptr_offset: Whether a @freeptr_offset is used.
- */
- bool use_freeptr_offset;
- /**
- * @ctor: A constructor for the objects.
- *
- * The constructor is invoked for each object in a newly allocated slab
- * page. It is the cache user's responsibility to free object in the
- * same state as after calling the constructor, or deal appropriately
- * with any differences between a freshly constructed and a reallocated
- * object.
- *
- * %NULL means no constructor.
- */
- void (*ctor)(void *);
-};
-
static inline void vma_iter_invalidate(struct vma_iterator *vmi)
{
mas_pause(&vmi->mas);
@@ -652,40 +594,6 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma->vm_lock_seq = UINT_MAX;
}
-struct kmem_cache {
- const char *name;
- size_t object_size;
- struct kmem_cache_args *args;
-};
-
-static inline struct kmem_cache *__kmem_cache_create(const char *name,
- size_t object_size,
- struct kmem_cache_args *args)
-{
- struct kmem_cache *ret = malloc(sizeof(struct kmem_cache));
-
- ret->name = name;
- ret->object_size = object_size;
- ret->args = args;
-
- return ret;
-}
-
-#define kmem_cache_create(__name, __object_size, __args, ...) \
- __kmem_cache_create((__name), (__object_size), (__args))
-
-static inline void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
-{
- (void)gfpflags;
-
- return calloc(s->object_size, 1);
-}
-
-static inline void kmem_cache_free(struct kmem_cache *s, void *x)
-{
- free(x);
-}
-
/*
* These are defined in vma.h, but sadly vm_stat_account() is referenced by
* kernel/fork.c, so we have to these broadly available there, and temporarily
--
2.51.0
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 10/12] maple_tree: Sheaf conversion and testing
2025-09-01 11:08 [PATCH 00/12] maple_tree: slub sheaves conversion Vlastimil Babka
` (8 preceding siblings ...)
2025-09-01 11:08 ` [PATCH 09/12] tools: Add sheaf to slab testing Vlastimil Babka
@ 2025-09-01 11:09 ` Vlastimil Babka
2025-09-01 11:09 ` [PATCH 11/12] maple_tree: Add single node allocation support to maple state Vlastimil Babka
2025-09-01 11:09 ` [PATCH 12/12] maple_tree: Convert forking to use the sheaf interface Vlastimil Babka
11 siblings, 0 replies; 15+ messages in thread
From: Vlastimil Babka @ 2025-09-01 11:09 UTC (permalink / raw)
To: Liam R. Howlett, Matthew Wilcox, Lorenzo Stoakes, Jann Horn,
Pedro Falcato, Suren Baghdasaryan
Cc: Harry Yoo, Andrew Morton, maple-tree, linux-mm, linux-fsdevel,
linux-kernel, Vlastimil Babka
From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Use sheaves instead of bulk allocations. This should speed up the
allocations and the return path of unused allocations.
Remove the push and pop of nodes from the maple state as this is now
handled by the slab layer with sheaves.
Testing has been removed as necessary since the features of the tree
have been reduced.
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
include/linux/maple_tree.h | 6 +-
lib/maple_tree.c | 329 ++++++---------------------
lib/test_maple_tree.c | 8 +
tools/testing/radix-tree/maple.c | 464 ++-------------------------------------
tools/testing/shared/linux.c | 5 +-
5 files changed, 99 insertions(+), 713 deletions(-)
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index bafe143b1f783202e27b32567fffee4149e8e266..166fd67e00d882b1e6de1f80c1b590bba7497cd3 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -442,7 +442,8 @@ struct ma_state {
struct maple_enode *node; /* The node containing this entry */
unsigned long min; /* The minimum index of this node - implied pivot min */
unsigned long max; /* The maximum index of this node - implied pivot max */
- struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
+ unsigned long node_request;
enum maple_status status; /* The status of the state (active, start, none, etc) */
unsigned char depth; /* depth of tree descent during write */
unsigned char offset;
@@ -490,7 +491,8 @@ struct ma_wr_state {
.status = ma_start, \
.min = 0, \
.max = ULONG_MAX, \
- .alloc = NULL, \
+ .node_request= 0, \
+ .sheaf = NULL, \
.mas_flags = 0, \
.store_type = wr_invalid, \
}
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index b361b484cfcaacd99472dd4c2b8de9260b307425..cfe80c50a97e3118eefc24275fbcd2eec5e6e6e8 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -186,6 +186,22 @@ static inline void mt_free_bulk(size_t size, void __rcu **nodes)
kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
}
+static void mt_return_sheaf(struct slab_sheaf *sheaf)
+{
+ kmem_cache_return_sheaf(maple_node_cache, GFP_KERNEL, sheaf);
+}
+
+static struct slab_sheaf *mt_get_sheaf(gfp_t gfp, int count)
+{
+ return kmem_cache_prefill_sheaf(maple_node_cache, gfp, count);
+}
+
+static int mt_refill_sheaf(gfp_t gfp, struct slab_sheaf **sheaf,
+ unsigned int size)
+{
+ return kmem_cache_refill_sheaf(maple_node_cache, gfp, sheaf, size);
+}
+
/*
* ma_free_rcu() - Use rcu callback to free a maple node
* @node: The node to free
@@ -578,67 +594,6 @@ static __always_inline bool mte_dead_node(const struct maple_enode *enode)
return ma_dead_node(node);
}
-/*
- * mas_allocated() - Get the number of nodes allocated in a maple state.
- * @mas: The maple state
- *
- * The ma_state alloc member is overloaded to hold a pointer to the first
- * allocated node or to the number of requested nodes to allocate. If bit 0 is
- * set, then the alloc contains the number of requested nodes. If there is an
- * allocated node, then the total allocated nodes is in that node.
- *
- * Return: The total number of nodes allocated
- */
-static inline unsigned long mas_allocated(const struct ma_state *mas)
-{
- if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
- return 0;
-
- return mas->alloc->total;
-}
-
-/*
- * mas_set_alloc_req() - Set the requested number of allocations.
- * @mas: the maple state
- * @count: the number of allocations.
- *
- * The requested number of allocations is either in the first allocated node,
- * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
- * no allocated node. Set the request either in the node or do the necessary
- * encoding to store in @mas->alloc directly.
- */
-static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
-{
- if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
- if (!count)
- mas->alloc = NULL;
- else
- mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
- return;
- }
-
- mas->alloc->request_count = count;
-}
-
-/*
- * mas_alloc_req() - get the requested number of allocations.
- * @mas: The maple state
- *
- * The alloc count is either stored directly in @mas, or in
- * @mas->alloc->request_count if there is at least one node allocated. Decode
- * the request count if it's stored directly in @mas->alloc.
- *
- * Return: The allocation request count.
- */
-static inline unsigned int mas_alloc_req(const struct ma_state *mas)
-{
- if ((unsigned long)mas->alloc & 0x1)
- return (unsigned long)(mas->alloc) >> 1;
- else if (mas->alloc)
- return mas->alloc->request_count;
- return 0;
-}
-
/*
* ma_pivots() - Get a pointer to the maple node pivots.
* @node: the maple node
@@ -1142,77 +1097,15 @@ static int mas_ascend(struct ma_state *mas)
*/
static inline struct maple_node *mas_pop_node(struct ma_state *mas)
{
- struct maple_alloc *ret, *node = mas->alloc;
- unsigned long total = mas_allocated(mas);
- unsigned int req = mas_alloc_req(mas);
+ struct maple_node *ret;
- /* nothing or a request pending. */
- if (WARN_ON(!total))
+ if (WARN_ON_ONCE(!mas->sheaf))
return NULL;
- if (total == 1) {
- /* single allocation in this ma_state */
- mas->alloc = NULL;
- ret = node;
- goto single_node;
- }
-
- if (node->node_count == 1) {
- /* Single allocation in this node. */
- mas->alloc = node->slot[0];
- mas->alloc->total = node->total - 1;
- ret = node;
- goto new_head;
- }
- node->total--;
- ret = node->slot[--node->node_count];
- node->slot[node->node_count] = NULL;
-
-single_node:
-new_head:
- if (req) {
- req++;
- mas_set_alloc_req(mas, req);
- }
-
+ ret = kmem_cache_alloc_from_sheaf(maple_node_cache, GFP_NOWAIT, mas->sheaf);
memset(ret, 0, sizeof(*ret));
- return (struct maple_node *)ret;
-}
-
-/*
- * mas_push_node() - Push a node back on the maple state allocation.
- * @mas: The maple state
- * @used: The used maple node
- *
- * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
- * requested node count as necessary.
- */
-static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
-{
- struct maple_alloc *reuse = (struct maple_alloc *)used;
- struct maple_alloc *head = mas->alloc;
- unsigned long count;
- unsigned int requested = mas_alloc_req(mas);
- count = mas_allocated(mas);
-
- reuse->request_count = 0;
- reuse->node_count = 0;
- if (count) {
- if (head->node_count < MAPLE_ALLOC_SLOTS) {
- head->slot[head->node_count++] = reuse;
- head->total++;
- goto done;
- }
- reuse->slot[0] = head;
- reuse->node_count = 1;
- }
-
- reuse->total = count + 1;
- mas->alloc = reuse;
-done:
- if (requested > 1)
- mas_set_alloc_req(mas, requested - 1);
+ return ret;
}
/*
@@ -1222,75 +1115,32 @@ static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
*/
static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
{
- struct maple_alloc *node;
- unsigned long allocated = mas_allocated(mas);
- unsigned int requested = mas_alloc_req(mas);
- unsigned int count;
- void **slots = NULL;
- unsigned int max_req = 0;
-
- if (!requested)
- return;
+ if (unlikely(mas->sheaf)) {
+ unsigned long refill = mas->node_request;
- mas_set_alloc_req(mas, 0);
- if (mas->mas_flags & MA_STATE_PREALLOC) {
- if (allocated)
+ if(kmem_cache_sheaf_size(mas->sheaf) >= refill) {
+ mas->node_request = 0;
return;
- WARN_ON(!allocated);
- }
-
- if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
- node = (struct maple_alloc *)mt_alloc_one(gfp);
- if (!node)
- goto nomem_one;
-
- if (allocated) {
- node->slot[0] = mas->alloc;
- node->node_count = 1;
- } else {
- node->node_count = 0;
}
- mas->alloc = node;
- node->total = ++allocated;
- node->request_count = 0;
- requested--;
- }
+ if (mt_refill_sheaf(gfp, &mas->sheaf, refill))
+ goto error;
- node = mas->alloc;
- while (requested) {
- max_req = MAPLE_ALLOC_SLOTS - node->node_count;
- slots = (void **)&node->slot[node->node_count];
- max_req = min(requested, max_req);
- count = mt_alloc_bulk(gfp, max_req, slots);
- if (!count)
- goto nomem_bulk;
-
- if (node->node_count == 0) {
- node->slot[0]->node_count = 0;
- node->slot[0]->request_count = 0;
- }
+ mas->node_request = 0;
+ return;
+ }
- node->node_count += count;
- allocated += count;
- /* find a non-full node*/
- do {
- node = node->slot[0];
- } while (unlikely(node->node_count == MAPLE_ALLOC_SLOTS));
- requested -= count;
+ mas->sheaf = mt_get_sheaf(gfp, mas->node_request);
+ if (likely(mas->sheaf)) {
+ mas->node_request = 0;
+ return;
}
- mas->alloc->total = allocated;
- return;
-nomem_bulk:
- /* Clean up potential freed allocations on bulk failure */
- memset(slots, 0, max_req * sizeof(unsigned long));
- mas->alloc->total = allocated;
-nomem_one:
- mas_set_alloc_req(mas, requested);
+error:
mas_set_err(mas, -ENOMEM);
}
+
/*
* mas_free() - Free an encoded maple node
* @mas: The maple state
@@ -1301,42 +1151,7 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
*/
static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
{
- struct maple_node *tmp = mte_to_node(used);
-
- if (mt_in_rcu(mas->tree))
- ma_free_rcu(tmp);
- else
- mas_push_node(mas, tmp);
-}
-
-/*
- * mas_node_count_gfp() - Check if enough nodes are allocated and request more
- * if there is not enough nodes.
- * @mas: The maple state
- * @count: The number of nodes needed
- * @gfp: the gfp flags
- */
-static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
-{
- unsigned long allocated = mas_allocated(mas);
-
- if (allocated < count) {
- mas_set_alloc_req(mas, count - allocated);
- mas_alloc_nodes(mas, gfp);
- }
-}
-
-/*
- * mas_node_count() - Check if enough nodes are allocated and request more if
- * there is not enough nodes.
- * @mas: The maple state
- * @count: The number of nodes needed
- *
- * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
- */
-static void mas_node_count(struct ma_state *mas, int count)
-{
- return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
+ ma_free_rcu(mte_to_node(used));
}
/*
@@ -2511,10 +2326,7 @@ static inline void mas_topiary_node(struct ma_state *mas,
enode = tmp_mas->node;
tmp = mte_to_node(enode);
mte_set_node_dead(enode);
- if (in_rcu)
- ma_free_rcu(tmp);
- else
- mas_push_node(mas, tmp);
+ ma_free_rcu(tmp);
}
/*
@@ -4162,7 +3974,7 @@ static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas)
*
* Return: Number of nodes required for preallocation.
*/
-static inline int mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry)
+static inline void mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry)
{
struct ma_state *mas = wr_mas->mas;
unsigned char height = mas_mt_height(mas);
@@ -4208,7 +4020,7 @@ static inline int mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry)
WARN_ON_ONCE(1);
}
- return ret;
+ mas->node_request = ret;
}
/*
@@ -4269,15 +4081,15 @@ static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas)
*/
static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry)
{
- int request;
+ struct ma_state *mas = wr_mas->mas;
mas_wr_prealloc_setup(wr_mas);
- wr_mas->mas->store_type = mas_wr_store_type(wr_mas);
- request = mas_prealloc_calc(wr_mas, entry);
- if (!request)
+ mas->store_type = mas_wr_store_type(wr_mas);
+ mas_prealloc_calc(wr_mas, entry);
+ if (!mas->node_request)
return;
- mas_node_count(wr_mas->mas, request);
+ mas_alloc_nodes(mas, GFP_NOWAIT | __GFP_NOWARN);
}
/**
@@ -5390,7 +5202,6 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
*/
void *mas_store(struct ma_state *mas, void *entry)
{
- int request;
MA_WR_STATE(wr_mas, mas, entry);
trace_ma_write(__func__, mas, 0, entry);
@@ -5420,11 +5231,11 @@ void *mas_store(struct ma_state *mas, void *entry)
return wr_mas.content;
}
- request = mas_prealloc_calc(&wr_mas, entry);
- if (!request)
+ mas_prealloc_calc(&wr_mas, entry);
+ if (!mas->node_request)
goto store;
- mas_node_count(mas, request);
+ mas_alloc_nodes(mas, GFP_NOWAIT | __GFP_NOWARN);
if (mas_is_err(mas))
return NULL;
@@ -5512,20 +5323,19 @@ EXPORT_SYMBOL_GPL(mas_store_prealloc);
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
{
MA_WR_STATE(wr_mas, mas, entry);
- int ret = 0;
- int request;
mas_wr_prealloc_setup(&wr_mas);
mas->store_type = mas_wr_store_type(&wr_mas);
- request = mas_prealloc_calc(&wr_mas, entry);
- if (!request)
+ mas_prealloc_calc(&wr_mas, entry);
+ if (!mas->node_request)
goto set_flag;
mas->mas_flags &= ~MA_STATE_PREALLOC;
- mas_node_count_gfp(mas, request, gfp);
+ mas_alloc_nodes(mas, gfp);
if (mas_is_err(mas)) {
- mas_set_alloc_req(mas, 0);
- ret = xa_err(mas->node);
+ int ret = xa_err(mas->node);
+
+ mas->node_request = 0;
mas_destroy(mas);
mas_reset(mas);
return ret;
@@ -5533,7 +5343,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
set_flag:
mas->mas_flags |= MA_STATE_PREALLOC;
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(mas_preallocate);
@@ -5547,9 +5357,6 @@ EXPORT_SYMBOL_GPL(mas_preallocate);
*/
void mas_destroy(struct ma_state *mas)
{
- struct maple_alloc *node;
- unsigned long total;
-
/*
* When using mas_for_each() to insert an expected number of elements,
* it is possible that the number inserted is less than the expected
@@ -5570,21 +5377,11 @@ void mas_destroy(struct ma_state *mas)
}
mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
- total = mas_allocated(mas);
- while (total) {
- node = mas->alloc;
- mas->alloc = node->slot[0];
- if (node->node_count > 1) {
- size_t count = node->node_count - 1;
-
- mt_free_bulk(count, (void __rcu **)&node->slot[1]);
- total -= count;
- }
- kfree(ma_mnode_ptr(node));
- total--;
- }
+ mas->node_request = 0;
+ if (mas->sheaf)
+ mt_return_sheaf(mas->sheaf);
- mas->alloc = NULL;
+ mas->sheaf = NULL;
}
EXPORT_SYMBOL_GPL(mas_destroy);
@@ -5634,7 +5431,8 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
/* Internal nodes */
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
/* Add working room for split (2 nodes) + new parents */
- mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
+ mas->node_request = nr_nodes + 3;
+ mas_alloc_nodes(mas, GFP_KERNEL);
/* Detect if allocations run out */
mas->mas_flags |= MA_STATE_PREALLOC;
@@ -6281,7 +6079,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp)
mas_alloc_nodes(mas, gfp);
}
- if (!mas_allocated(mas))
+ if (!mas->sheaf)
return false;
mas->status = ma_start;
@@ -7676,8 +7474,9 @@ void mas_dump(const struct ma_state *mas)
pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
mas->index, mas->last);
- pr_err(" min=%lx max=%lx alloc=" PTR_FMT ", depth=%u, flags=%x\n",
- mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
+ pr_err(" min=%lx max=%lx sheaf=" PTR_FMT ", request %lu depth=%u, flags=%x\n",
+ mas->min, mas->max, mas->sheaf, mas->node_request, mas->depth,
+ mas->mas_flags);
if (mas->index > mas->last)
pr_err("Check index & last\n");
}
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index cb3936595b0d56a9682ff100eba54693a1427829..1848d127eb50650e7cc2b9dfbb15ed93aa889f01 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -2746,6 +2746,7 @@ static noinline void __init check_fuzzer(struct maple_tree *mt)
mtree_test_erase(mt, ULONG_MAX - 10);
}
+#if 0
/* duplicate the tree with a specific gap */
static noinline void __init check_dup_gaps(struct maple_tree *mt,
unsigned long nr_entries, bool zero_start,
@@ -2770,6 +2771,7 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
mtree_store_range(mt, i*10, (i+1)*10 - gap,
xa_mk_value(i), GFP_KERNEL);
+ mt_dump(mt, mt_dump_dec);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_non_kernel(99999);
down_write(&newmt_lock);
@@ -2779,9 +2781,12 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
rcu_read_lock();
mas_for_each(&mas, tmp, ULONG_MAX) {
+ printk("%lu nodes %lu\n", mas.index,
+ kmem_cache_sheaf_count(newmas.sheaf));
newmas.index = mas.index;
newmas.last = mas.last;
mas_store(&newmas, tmp);
+ mt_dump(&newmt, mt_dump_dec);
}
rcu_read_unlock();
mas_destroy(&newmas);
@@ -2878,6 +2883,7 @@ static noinline void __init check_dup(struct maple_tree *mt)
cond_resched();
}
}
+#endif
static noinline void __init check_bnode_min_spanning(struct maple_tree *mt)
{
@@ -4077,9 +4083,11 @@ static int __init maple_tree_seed(void)
check_fuzzer(&tree);
mtree_destroy(&tree);
+#if 0
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_dup(&tree);
mtree_destroy(&tree);
+#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_bnode_min_spanning(&tree);
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index 7fe91f24849b35723ec6aadbe45ec7d2abedcc11..da3e03d73b52162dab6fa5c368ad7b71b9e58521 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -57,430 +57,6 @@ struct rcu_reader_struct {
struct rcu_test_struct2 *test;
};
-static int get_alloc_node_count(struct ma_state *mas)
-{
- int count = 1;
- struct maple_alloc *node = mas->alloc;
-
- if (!node || ((unsigned long)node & 0x1))
- return 0;
- while (node->node_count) {
- count += node->node_count;
- node = node->slot[0];
- }
- return count;
-}
-
-static void check_mas_alloc_node_count(struct ma_state *mas)
-{
- mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 1, GFP_KERNEL);
- mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 3, GFP_KERNEL);
- MT_BUG_ON(mas->tree, get_alloc_node_count(mas) != mas->alloc->total);
- mas_destroy(mas);
-}
-
-/*
- * check_new_node() - Check the creation of new nodes and error path
- * verification.
- */
-static noinline void __init check_new_node(struct maple_tree *mt)
-{
-
- struct maple_node *mn, *mn2, *mn3;
- struct maple_alloc *smn;
- struct maple_node *nodes[100];
- int i, j, total;
-
- MA_STATE(mas, mt, 0, 0);
-
- check_mas_alloc_node_count(&mas);
-
- /* Try allocating 3 nodes */
- mtree_lock(mt);
- mt_set_non_kernel(0);
- /* request 3 nodes to be allocated. */
- mas_node_count(&mas, 3);
- /* Allocation request of 3. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 3);
- /* Allocate failed. */
- MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
-
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mas.alloc == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
- mas_push_node(&mas, mn);
- mas_reset(&mas);
- mas_destroy(&mas);
- mtree_unlock(mt);
-
-
- /* Try allocating 1 node, then 2 more */
- mtree_lock(mt);
- /* Set allocation request to 1. */
- mas_set_alloc_req(&mas, 1);
- /* Check Allocation request of 1. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
- mas_set_err(&mas, -ENOMEM);
- /* Validate allocation request. */
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- /* Eat the requested node. */
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mn->slot[0] != NULL);
- MT_BUG_ON(mt, mn->slot[1] != NULL);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mas.status = ma_start;
- mas_destroy(&mas);
- /* Allocate 3 nodes, will fail. */
- mas_node_count(&mas, 3);
- /* Drop the lock and allocate 3 nodes. */
- mas_nomem(&mas, GFP_KERNEL);
- /* Ensure 3 are allocated. */
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- /* Allocation request of 0. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 0);
-
- MT_BUG_ON(mt, mas.alloc == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[1] == NULL);
- /* Ensure we counted 3. */
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- /* Free. */
- mas_reset(&mas);
- mas_destroy(&mas);
-
- /* Set allocation request to 1. */
- mas_set_alloc_req(&mas, 1);
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
- mas_set_err(&mas, -ENOMEM);
- /* Validate allocation request. */
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_allocated(&mas) != 1);
- /* Check the node is only one node. */
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mn->slot[0] != NULL);
- MT_BUG_ON(mt, mn->slot[1] != NULL);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != 1);
- MT_BUG_ON(mt, mas.alloc->node_count);
-
- mas_set_alloc_req(&mas, 2); /* request 2 more. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 2);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- MT_BUG_ON(mt, mas.alloc == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[1] == NULL);
- for (i = 2; i >= 0; i--) {
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != i);
- MT_BUG_ON(mt, !mn);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
-
- total = 64;
- mas_set_alloc_req(&mas, total); /* request 2 more. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != total);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- for (i = total; i > 0; i--) {
- unsigned int e = 0; /* expected node_count */
-
- if (!MAPLE_32BIT) {
- if (i >= 35)
- e = i - 34;
- else if (i >= 5)
- e = i - 4;
- else if (i >= 2)
- e = i - 1;
- } else {
- if (i >= 4)
- e = i - 3;
- else if (i >= 1)
- e = i - 1;
- else
- e = 0;
- }
-
- MT_BUG_ON(mt, mas.alloc->node_count != e);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != i - 1);
- MT_BUG_ON(mt, !mn);
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
-
- total = 100;
- for (i = 1; i < total; i++) {
- mas_set_alloc_req(&mas, i);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- for (j = i; j > 0; j--) {
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
- MT_BUG_ON(mt, !mn);
- MT_BUG_ON(mt, not_empty(mn));
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != j);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
- mas_set_alloc_req(&mas, i);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- for (j = 0; j <= i/2; j++) {
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
- nodes[j] = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
- }
-
- while (j) {
- j--;
- mas_push_node(&mas, nodes[j]);
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != i);
- for (j = 0; j <= i/2; j++) {
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
- }
- mas_reset(&mas);
- MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL));
- mas_destroy(&mas);
-
- }
-
- /* Set allocation request. */
- total = 500;
- mas_node_count(&mas, total);
- /* Drop the lock and allocate the nodes. */
- mas_nomem(&mas, GFP_KERNEL);
- MT_BUG_ON(mt, !mas.alloc);
- i = 1;
- smn = mas.alloc;
- while (i < total) {
- for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) {
- i++;
- MT_BUG_ON(mt, !smn->slot[j]);
- if (i == total)
- break;
- }
- smn = smn->slot[0]; /* next. */
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != total);
- mas_reset(&mas);
- mas_destroy(&mas); /* Free. */
-
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- for (i = 1; i < 128; i++) {
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */
- for (j = i; j > 0; j--) { /*Free the requests */
- mn = mas_pop_node(&mas); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- }
-
- for (i = 1; i < MAPLE_NODE_MASK + 1; i++) {
- MA_STATE(mas2, mt, 0, 0);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */
- for (j = 1; j <= i; j++) { /* Move the allocations to mas2 */
- mn = mas_pop_node(&mas); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- mas_push_node(&mas2, mn);
- MT_BUG_ON(mt, mas_allocated(&mas2) != j);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- MT_BUG_ON(mt, mas_allocated(&mas2) != i);
-
- for (j = i; j > 0; j--) { /*Free the requests */
- MT_BUG_ON(mt, mas_allocated(&mas2) != j);
- mn = mas_pop_node(&mas2); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas2) != 0);
- }
-
-
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */
- MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
-
- mn = mas_pop_node(&mas); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
-
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
-
- /* Check the limit of pop/push/pop */
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
- MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_alloc_req(&mas));
- MT_BUG_ON(mt, mas.alloc->node_count != 1);
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas.alloc->node_count != 1);
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) {
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
-
- for (i = 3; i < MAPLE_NODE_MASK * 3; i++) {
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mas_push_node(&mas, mn); /* put it back */
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn2 = mas_pop_node(&mas); /* get the next node. */
- mas_push_node(&mas, mn); /* put them back */
- mas_push_node(&mas, mn2);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn2 = mas_pop_node(&mas); /* get the next node. */
- mn3 = mas_pop_node(&mas); /* get the next node. */
- mas_push_node(&mas, mn); /* put them back */
- mas_push_node(&mas, mn2);
- mas_push_node(&mas, mn3);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mas_destroy(&mas);
- }
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, 5); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != 5);
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, 10); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.status = ma_start;
- MT_BUG_ON(mt, mas_allocated(&mas) != 10);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS - 1); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS - 1);
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.status = ma_start;
- MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS * 2 + 2); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.status = ma_start;
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS * 2 + 2);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS * 2 + 1); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS * 2 + 1);
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS * 3 + 2); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.status = ma_start;
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS * 3 + 2);
- mas_destroy(&mas);
-
- mtree_unlock(mt);
-}
-
/*
* Check erasing including RCU.
*/
@@ -35452,8 +35028,7 @@ static void check_dfs_preorder(struct maple_tree *mt)
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
mas_reset(&mas);
mt_zero_nr_tallocated();
- mt_set_non_kernel(200);
- mas_expected_entries(&mas, max);
+ mt_set_non_kernel(1000);
for (count = 0; count <= max; count++) {
mas.index = mas.last = count;
mas_store(&mas, xa_mk_value(count));
@@ -35518,6 +35093,13 @@ static unsigned char get_vacant_height(struct ma_wr_state *wr_mas, void *entry)
return vacant_height;
}
+static int mas_allocated(struct ma_state *mas)
+{
+ if (mas->sheaf)
+ return kmem_cache_sheaf_size(mas->sheaf);
+
+ return 0;
+}
/* Preallocation testing */
static noinline void __init check_prealloc(struct maple_tree *mt)
{
@@ -35536,7 +35118,10 @@ static noinline void __init check_prealloc(struct maple_tree *mt)
/* Spanning store */
mas_set_range(&mas, 470, 500);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+
+ mas_wr_preallocate(&wr_mas, ptr);
+ MT_BUG_ON(mt, mas.store_type != wr_spanning_store);
+ MT_BUG_ON(mt, mas_is_err(&mas));
allocated = mas_allocated(&mas);
height = mas_mt_height(&mas);
vacant_height = get_vacant_height(&wr_mas, ptr);
@@ -35546,6 +35131,7 @@ static noinline void __init check_prealloc(struct maple_tree *mt)
allocated = mas_allocated(&mas);
MT_BUG_ON(mt, allocated != 0);
+ mas_wr_preallocate(&wr_mas, ptr);
MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
allocated = mas_allocated(&mas);
height = mas_mt_height(&mas);
@@ -35586,20 +35172,6 @@ static noinline void __init check_prealloc(struct maple_tree *mt)
mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- vacant_height = get_vacant_height(&wr_mas, ptr);
- MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != allocated);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- mas_destroy(&mas);
- allocated = mas_allocated(&mas);
- MT_BUG_ON(mt, allocated != 0);
-
MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
allocated = mas_allocated(&mas);
height = mas_mt_height(&mas);
@@ -36400,11 +35972,17 @@ static void check_nomem_writer_race(struct maple_tree *mt)
check_load(mt, 6, xa_mk_value(0xC));
mtree_unlock(mt);
+ mt_set_non_kernel(0);
/* test for the same race but with mas_store_gfp() */
mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL);
mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL);
mas_set_range(&mas, 0, 5);
+
+ /* setup writer 2 that will trigger the race condition */
+ mt_set_private(mt);
+ mt_set_callback(writer2);
+
mtree_lock(mt);
mas_store_gfp(&mas, NULL, GFP_KERNEL);
@@ -36546,10 +36124,6 @@ void farmer_tests(void)
check_erase_testset(&tree);
mtree_destroy(&tree);
- mt_init_flags(&tree, 0);
- check_new_node(&tree);
- mtree_destroy(&tree);
-
if (!MAPLE_32BIT) {
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_rcu_simulated(&tree);
diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c
index 4ceff7969b78cf8e33cd1e021c68bc9f8a02a7a1..8c72571559583759456c2b469a2abc2611117c13 100644
--- a/tools/testing/shared/linux.c
+++ b/tools/testing/shared/linux.c
@@ -64,7 +64,8 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
if (!cachep->non_kernel) {
- cachep->exec_callback = true;
+ if (cachep->callback)
+ cachep->exec_callback = true;
return NULL;
}
@@ -210,6 +211,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
for (i = 0; i < size; i++)
__kmem_cache_free_locked(cachep, p[i]);
pthread_mutex_unlock(&cachep->lock);
+ if (cachep->callback)
+ cachep->exec_callback = true;
return 0;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 15+ messages in thread