linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 0/4] support large align and nid in Rust allocators
@ 2025-06-26  8:35 Vitaly Wool
  2025-06-26  8:36 ` [PATCHi v4 1/4] mm/vmalloc: allow to set node and align in vrealloc Vitaly Wool
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Vitaly Wool @ 2025-06-26  8:35 UTC (permalink / raw)
  To: linux-mm
  Cc: akpm, linux-kernel, Uladzislau Rezki, Danilo Krummrich,
	Alice Ryhl, rust-for-linux, Vitaly Wool

The coming patches provide the ability for Rust allocators to set
NUMA node and large alignment.

Changelog:
v2 -> v3:
* fixed the build breakage for non-MMU configs
v3 -> v4:
* added NUMA node support for k[v]realloc (patch #2)
* removed extra logic in Rust helpers
* patch for Rust allocators split into 2 (align: patch #3 and
  NUMA ids: patch #4)

Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.se>

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCHi v4 1/4] mm/vmalloc: allow to set node and align in vrealloc
  2025-06-26  8:35 [PATCH v4 0/4] support large align and nid in Rust allocators Vitaly Wool
@ 2025-06-26  8:36 ` Vitaly Wool
  2025-06-26  8:36 ` [PATCH v4 2/4] mm/slub: allow to set node and align in k[v]realloc Vitaly Wool
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 11+ messages in thread
From: Vitaly Wool @ 2025-06-26  8:36 UTC (permalink / raw)
  To: linux-mm
  Cc: akpm, linux-kernel, Uladzislau Rezki, Danilo Krummrich,
	Alice Ryhl, rust-for-linux, Vitaly Wool

Reimplement vrealloc() to be able to set node and alignment should
a user need to do so. Rename the function to vrealloc_node() to
better match what it actually does now and introduce a macro for
vrealloc() for backward compatibility.

With that change we also provide the ability for the Rust part of
the kernel to set node and aligmnent in its allocations.

Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.se>
Reviewed-by: "Uladzislau Rezki (Sony)" <urezki@gmail.com>

---
 include/linux/vmalloc.h |  8 +++++---
 mm/nommu.c              |  3 ++-
 mm/vmalloc.c            | 16 +++++++++++++---
 3 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index fdc9aeb74a44..7d5251287687 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -197,9 +197,11 @@ extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1
 extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
 #define vcalloc(...)		alloc_hooks(vcalloc_noprof(__VA_ARGS__))
 
-void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
-		__realloc_size(2);
-#define vrealloc(...)		alloc_hooks(vrealloc_noprof(__VA_ARGS__))
+void *__must_check vrealloc_node_noprof(const void *p, size_t size,
+		unsigned long align, gfp_t flags, int nid) __realloc_size(2);
+#define vrealloc_noprof(p, s, f)	vrealloc_node_noprof(p, s, 1, f, NUMA_NO_NODE)
+#define vrealloc_node(...)		alloc_hooks(vrealloc_node_noprof(__VA_ARGS__))
+#define vrealloc(...)			alloc_hooks(vrealloc_noprof(__VA_ARGS__))
 
 extern void vfree(const void *addr);
 extern void vfree_atomic(const void *addr);
diff --git a/mm/nommu.c b/mm/nommu.c
index 87e1acab0d64..6cec05c3312c 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -119,7 +119,8 @@ void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(__vmalloc_noprof);
 
-void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+void *vrealloc_node_noprof(const void *p, size_t size, unsigned long align,
+			   gfp_t flags, int nid)
 {
 	return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM);
 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6dbcdceecae1..412bf5b8d3ff 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4089,10 +4089,12 @@ void *vzalloc_node_noprof(unsigned long size, int node)
 EXPORT_SYMBOL(vzalloc_node_noprof);
 
 /**
- * vrealloc - reallocate virtually contiguous memory; contents remain unchanged
+ * vrealloc_node - reallocate virtually contiguous memory; contents remain unchanged
  * @p: object to reallocate memory for
  * @size: the size to reallocate
+ * @align: requested alignment
  * @flags: the flags for the page level allocator
+ * @nid: node id
  *
  * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
  * @p is not a %NULL pointer, the object pointed to is freed.
@@ -4111,7 +4113,7 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
  * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
  *         failure
  */
-void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+void *vrealloc_node_noprof(const void *p, size_t size, unsigned long align, gfp_t flags, int nid)
 {
 	struct vm_struct *vm = NULL;
 	size_t alloced_size = 0;
@@ -4135,6 +4137,13 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
 		if (WARN(alloced_size < old_size,
 			 "vrealloc() has mismatched area vs requested sizes (%p)\n", p))
 			return NULL;
+		if (WARN(nid != NUMA_NO_NODE && nid != page_to_nid(vmalloc_to_page(p)),
+			 "vrealloc() has mismatched nids\n"))
+			return NULL;
+		if (WARN((uintptr_t)p & (align - 1),
+			 "will not reallocate with a bigger alignment (0x%lx)\n",
+			 align))
+			return NULL;
 	}
 
 	/*
@@ -4166,7 +4175,8 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
 	}
 
 	/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
-	n = __vmalloc_noprof(size, flags);
+	n = __vmalloc_node_noprof(size, align, flags, nid, __builtin_return_address(0));
+
 	if (!n)
 		return NULL;
 
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 2/4] mm/slub: allow to set node and align in k[v]realloc
  2025-06-26  8:35 [PATCH v4 0/4] support large align and nid in Rust allocators Vitaly Wool
  2025-06-26  8:36 ` [PATCHi v4 1/4] mm/vmalloc: allow to set node and align in vrealloc Vitaly Wool
@ 2025-06-26  8:36 ` Vitaly Wool
  2025-06-26 20:53   ` Tamir Duberstein
  2025-06-26  8:36 ` [PATCH v4 3/4] rust: support large alignments in allocations Vitaly Wool
  2025-06-26  8:36 ` [PATCH v4 4/4] rust: support NUMA ids " Vitaly Wool
  3 siblings, 1 reply; 11+ messages in thread
From: Vitaly Wool @ 2025-06-26  8:36 UTC (permalink / raw)
  To: linux-mm
  Cc: akpm, linux-kernel, Uladzislau Rezki, Danilo Krummrich,
	Alice Ryhl, rust-for-linux, Vitaly Wool

Reimplement k[v]realloc() to be able to set node and alignment
should a user need to do so. Rename the respective functions to
k[v]realloc_node() to better match what they actually do now and
introduce macros for k[v]realloc() for backward compatibility.

With that change we also provide the ability for the Rust part of
the kernel to set node and aligmnent in its K[v]xxx [re]allocations.

Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.se>
---
 include/linux/slab.h | 12 ++++++++----
 mm/slub.c            | 33 ++++++++++++++++++++++-----------
 2 files changed, 30 insertions(+), 15 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index d5a8ab98035c..119f100978c8 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -465,9 +465,11 @@ int kmem_cache_shrink(struct kmem_cache *s);
 /*
  * Common kmalloc functions provided by all allocators
  */
-void * __must_check krealloc_noprof(const void *objp, size_t new_size,
-				    gfp_t flags) __realloc_size(2);
-#define krealloc(...)				alloc_hooks(krealloc_noprof(__VA_ARGS__))
+void * __must_check krealloc_node_noprof(const void *objp, size_t new_size,
+					 gfp_t flags, int nid) __realloc_size(2);
+#define krealloc_node(...)		alloc_hooks(krealloc_node_noprof(__VA_ARGS__))
+#define krealloc_noprof(o, s, f)	krealloc_node_noprof(o, s, f, NUMA_NO_NODE)
+#define krealloc(...)			alloc_hooks(krealloc_noprof(__VA_ARGS__))
 
 void kfree(const void *objp);
 void kfree_sensitive(const void *objp);
@@ -1073,8 +1075,10 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
 #define kvcalloc_node(...)			alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
 #define kvcalloc(...)				alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
 
-void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
+void *kvrealloc_node_noprof(const void *p, size_t size, gfp_t flags, int nid)
 		__realloc_size(2);
+#define kvrealloc_node(...)			alloc_hooks(kvrealloc_node_noprof(__VA_ARGS__))
+#define kvrealloc_noprof(p, s, f)		kvrealloc_node_noprof(p, s, f, NUMA_NO_NODE)
 #define kvrealloc(...)				alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
 
 extern void kvfree(const void *addr);
diff --git a/mm/slub.c b/mm/slub.c
index c4b64821e680..2d5150d075d5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4845,7 +4845,7 @@ void kfree(const void *object)
 EXPORT_SYMBOL(kfree);
 
 static __always_inline __realloc_size(2) void *
-__do_krealloc(const void *p, size_t new_size, gfp_t flags)
+__do_krealloc(const void *p, size_t new_size, gfp_t flags, int nid)
 {
 	void *ret;
 	size_t ks = 0;
@@ -4859,6 +4859,15 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
 	if (!kasan_check_byte(p))
 		return NULL;
 
+	/*
+	 * it is possible to support reallocation with a different nid, but
+	 * it doesn't go well with the concept of krealloc(). Such
+	 * reallocation should be done explicitly instead.
+	 */
+	if (WARN(nid != NUMA_NO_NODE && nid != page_to_nid(virt_to_page(p)),
+				"krealloc() has mismatched nids\n"))
+		return NULL;
+
 	if (is_kfence_address(p)) {
 		ks = orig_size = kfence_ksize(p);
 	} else {
@@ -4903,7 +4912,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
 	return (void *)p;
 
 alloc_new:
-	ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
+	ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_);
 	if (ret && p) {
 		/* Disable KASAN checks as the object's redzone is accessed. */
 		kasan_disable_current();
@@ -4919,6 +4928,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
  * @p: object to reallocate memory for.
  * @new_size: how many bytes of memory are required.
  * @flags: the type of memory to allocate.
+ * @nid: NUMA node or NUMA_NO_NODE
  *
  * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
  * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
@@ -4947,7 +4957,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
  *
  * Return: pointer to the allocated memory or %NULL in case of error
  */
-void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
+void *krealloc_node_noprof(const void *p, size_t new_size, gfp_t flags, int nid)
 {
 	void *ret;
 
@@ -4956,13 +4966,13 @@ void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
 		return ZERO_SIZE_PTR;
 	}
 
-	ret = __do_krealloc(p, new_size, flags);
+	ret = __do_krealloc(p, new_size, flags, nid);
 	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
 		kfree(p);
 
 	return ret;
 }
-EXPORT_SYMBOL(krealloc_noprof);
+EXPORT_SYMBOL(krealloc_node_noprof);
 
 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
 {
@@ -5079,10 +5089,11 @@ void kvfree_sensitive(const void *addr, size_t len)
 EXPORT_SYMBOL(kvfree_sensitive);
 
 /**
- * kvrealloc - reallocate memory; contents remain unchanged
+ * kvrealloc_node - reallocate memory; contents remain unchanged
  * @p: object to reallocate memory for
  * @size: the size to reallocate
  * @flags: the flags for the page level allocator
+ * @nid: NUMA node id
  *
  * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
  * and @p is not a %NULL pointer, the object pointed to is freed.
@@ -5100,17 +5111,17 @@ EXPORT_SYMBOL(kvfree_sensitive);
  *
  * Return: pointer to the allocated memory or %NULL in case of error
  */
-void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
+void *kvrealloc_node_noprof(const void *p, size_t size, gfp_t flags, int nid)
 {
 	void *n;
 
 	if (is_vmalloc_addr(p))
-		return vrealloc_noprof(p, size, flags);
+		return vrealloc_node_noprof(p, size, 1, flags, nid);
 
-	n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
+	n = krealloc_node_noprof(p, size, kmalloc_gfp_adjust(flags, size), nid);
 	if (!n) {
 		/* We failed to krealloc(), fall back to kvmalloc(). */
-		n = kvmalloc_noprof(size, flags);
+		n = kvmalloc_node_noprof(size, flags, nid);
 		if (!n)
 			return NULL;
 
@@ -5126,7 +5137,7 @@ void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
 
 	return n;
 }
-EXPORT_SYMBOL(kvrealloc_noprof);
+EXPORT_SYMBOL(kvrealloc_node_noprof);
 
 struct detached_freelist {
 	struct slab *slab;
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 3/4] rust: support large alignments in allocations
  2025-06-26  8:35 [PATCH v4 0/4] support large align and nid in Rust allocators Vitaly Wool
  2025-06-26  8:36 ` [PATCHi v4 1/4] mm/vmalloc: allow to set node and align in vrealloc Vitaly Wool
  2025-06-26  8:36 ` [PATCH v4 2/4] mm/slub: allow to set node and align in k[v]realloc Vitaly Wool
@ 2025-06-26  8:36 ` Vitaly Wool
  2025-06-26 12:36   ` Danilo Krummrich
  2025-06-26  8:36 ` [PATCH v4 4/4] rust: support NUMA ids " Vitaly Wool
  3 siblings, 1 reply; 11+ messages in thread
From: Vitaly Wool @ 2025-06-26  8:36 UTC (permalink / raw)
  To: linux-mm
  Cc: akpm, linux-kernel, Uladzislau Rezki, Danilo Krummrich,
	Alice Ryhl, rust-for-linux, Vitaly Wool

Add support for large (> PAGE_SIZE) alignments in Rust allocators
(Kmalloc support for large alignments is limited to the requested
size, which is a reasonable limitation anyway).

Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.se>
---
 rust/helpers/slab.c            |  4 ++--
 rust/helpers/vmalloc.c         |  4 ++--
 rust/kernel/alloc/allocator.rs | 16 ++--------------
 3 files changed, 6 insertions(+), 18 deletions(-)

diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c
index a842bfbddcba..5e9e8dd2bba0 100644
--- a/rust/helpers/slab.c
+++ b/rust/helpers/slab.c
@@ -3,13 +3,13 @@
 #include <linux/slab.h>
 
 void * __must_check __realloc_size(2)
-rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
+rust_helper_krealloc(const void *objp, size_t new_size, unsigned long align, gfp_t flags)
 {
 	return krealloc(objp, new_size, flags);
 }
 
 void * __must_check __realloc_size(2)
-rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags)
+rust_helper_kvrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
 {
 	return kvrealloc(p, size, flags);
 }
diff --git a/rust/helpers/vmalloc.c b/rust/helpers/vmalloc.c
index 80d34501bbc0..4618c0b79283 100644
--- a/rust/helpers/vmalloc.c
+++ b/rust/helpers/vmalloc.c
@@ -3,7 +3,7 @@
 #include <linux/vmalloc.h>
 
 void * __must_check __realloc_size(2)
-rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
+rust_helper_vrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
 {
-	return vrealloc(p, size, flags);
+	return vrealloc_node(p, size, align, flags, NUMA_NO_NODE);
 }
diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
index aa2dfa9dca4c..a0d78c497974 100644
--- a/rust/kernel/alloc/allocator.rs
+++ b/rust/kernel/alloc/allocator.rs
@@ -58,7 +58,7 @@ fn aligned_size(new_layout: Layout) -> usize {
 ///
 /// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
 struct ReallocFunc(
-    unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void,
+    unsafe extern "C" fn(*const crate::ffi::c_void, usize, usize, u32) -> *mut crate::ffi::c_void,
 );
 
 impl ReallocFunc {
@@ -110,7 +110,7 @@ unsafe fn call(
         // - Those functions provide the guarantees of this function.
         let raw_ptr = unsafe {
             // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed.
-            self.0(ptr.cast(), size, flags.0).cast()
+            self.0(ptr.cast(), size, layout.align(), flags.0).cast()
         };
 
         let ptr = if size == 0 {
@@ -152,12 +152,6 @@ unsafe fn realloc(
         old_layout: Layout,
         flags: Flags,
     ) -> Result<NonNull<[u8]>, AllocError> {
-        // TODO: Support alignments larger than PAGE_SIZE.
-        if layout.align() > bindings::PAGE_SIZE {
-            pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n");
-            return Err(AllocError);
-        }
-
         // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
         // allocated with this `Allocator`.
         unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) }
@@ -176,12 +170,6 @@ unsafe fn realloc(
         old_layout: Layout,
         flags: Flags,
     ) -> Result<NonNull<[u8]>, AllocError> {
-        // TODO: Support alignments larger than PAGE_SIZE.
-        if layout.align() > bindings::PAGE_SIZE {
-            pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
-            return Err(AllocError);
-        }
-
         // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
         // allocated with this `Allocator`.
         unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) }
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 4/4] rust: support NUMA ids in allocations
  2025-06-26  8:35 [PATCH v4 0/4] support large align and nid in Rust allocators Vitaly Wool
                   ` (2 preceding siblings ...)
  2025-06-26  8:36 ` [PATCH v4 3/4] rust: support large alignments in allocations Vitaly Wool
@ 2025-06-26  8:36 ` Vitaly Wool
  2025-06-26 12:40   ` Danilo Krummrich
  3 siblings, 1 reply; 11+ messages in thread
From: Vitaly Wool @ 2025-06-26  8:36 UTC (permalink / raw)
  To: linux-mm
  Cc: akpm, linux-kernel, Uladzislau Rezki, Danilo Krummrich,
	Alice Ryhl, rust-for-linux, Vitaly Wool

Add support for specifying NUMA ids in Rust allocators as an Option
(i. e. providing `None` as nid corresponds to NUMA_NO_NODE). This
will allow to specify node to use for allocation of e. g. {KV}Box.

Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.se>
---
 rust/helpers/slab.c            |  9 +++++----
 rust/helpers/vmalloc.c         |  4 ++--
 rust/kernel/alloc.rs           | 28 ++++++++++++++++++++++++++--
 rust/kernel/alloc/allocator.rs | 26 ++++++++++++++++++--------
 rust/kernel/alloc/kvec.rs      |  3 ++-
 5 files changed, 53 insertions(+), 17 deletions(-)

diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c
index 5e9e8dd2bba0..ab1cf72f8353 100644
--- a/rust/helpers/slab.c
+++ b/rust/helpers/slab.c
@@ -3,13 +3,14 @@
 #include <linux/slab.h>
 
 void * __must_check __realloc_size(2)
-rust_helper_krealloc(const void *objp, size_t new_size, unsigned long align, gfp_t flags)
+rust_helper_krealloc_node(const void *objp, size_t new_size, unsigned long align, gfp_t flags,
+			  int nid)
 {
-	return krealloc(objp, new_size, flags);
+	return krealloc_node(objp, new_size, flags, nid);
 }
 
 void * __must_check __realloc_size(2)
-rust_helper_kvrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
+rust_helper_kvrealloc_node(const void *p, size_t size, unsigned long align, gfp_t flags, int nid)
 {
-	return kvrealloc(p, size, flags);
+	return kvrealloc_node(p, size, flags, nid);
 }
diff --git a/rust/helpers/vmalloc.c b/rust/helpers/vmalloc.c
index 4618c0b79283..9131279222fa 100644
--- a/rust/helpers/vmalloc.c
+++ b/rust/helpers/vmalloc.c
@@ -3,7 +3,7 @@
 #include <linux/vmalloc.h>
 
 void * __must_check __realloc_size(2)
-rust_helper_vrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
+rust_helper_vrealloc_node(const void *p, size_t size, unsigned long align, gfp_t flags, int node)
 {
-	return vrealloc_node(p, size, align, flags, NUMA_NO_NODE);
+	return vrealloc_node(p, size, align, flags, node);
 }
diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs
index a2c49e5494d3..1e26c2a7f47c 100644
--- a/rust/kernel/alloc.rs
+++ b/rust/kernel/alloc.rs
@@ -156,7 +156,30 @@ pub unsafe trait Allocator {
     fn alloc(layout: Layout, flags: Flags) -> Result<NonNull<[u8]>, AllocError> {
         // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a
         // new memory allocation.
-        unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags) }
+        unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags, None) }
+    }
+
+    /// Allocate memory based on `layout`, `flags` and `nid`.
+    ///
+    /// On success, returns a buffer represented as `NonNull<[u8]>` that satisfies the layout
+    /// constraints (i.e. minimum size and alignment as specified by `layout`).
+    ///
+    /// This function is equivalent to `realloc` when called with `None`.
+    ///
+    /// # Guarantees
+    ///
+    /// When the return value is `Ok(ptr)`, then `ptr` is
+    /// - valid for reads and writes for `layout.size()` bytes, until it is passed to
+    ///   [`Allocator::free`] or [`Allocator::realloc`],
+    /// - aligned to `layout.align()`,
+    ///
+    /// Additionally, `Flags` are honored as documented in
+    /// <https://docs.kernel.org/core-api/mm-api.html#mm-api-gfp-flags>.
+    fn alloc_node(layout: Layout, flags: Flags, nid: Option<i32>)
+                -> Result<NonNull<[u8]>, AllocError> {
+        // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a
+        // new memory allocation.
+        unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags, nid) }
     }
 
     /// Re-allocate an existing memory allocation to satisfy the requested `layout`.
@@ -196,6 +219,7 @@ unsafe fn realloc(
         layout: Layout,
         old_layout: Layout,
         flags: Flags,
+        nid: Option<i32>,
     ) -> Result<NonNull<[u8]>, AllocError>;
 
     /// Free an existing memory allocation.
@@ -211,7 +235,7 @@ unsafe fn free(ptr: NonNull<u8>, layout: Layout) {
         // SAFETY: The caller guarantees that `ptr` points at a valid allocation created by this
         // allocator. We are passing a `Layout` with the smallest possible alignment, so it is
         // smaller than or equal to the alignment previously used with this allocation.
-        let _ = unsafe { Self::realloc(Some(ptr), Layout::new::<()>(), layout, Flags(0)) };
+        let _ = unsafe { Self::realloc(Some(ptr), Layout::new::<()>(), layout, Flags(0), None) };
     }
 }
 
diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
index a0d78c497974..5a0d0a57dfe5 100644
--- a/rust/kernel/alloc/allocator.rs
+++ b/rust/kernel/alloc/allocator.rs
@@ -58,18 +58,19 @@ fn aligned_size(new_layout: Layout) -> usize {
 ///
 /// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
 struct ReallocFunc(
-    unsafe extern "C" fn(*const crate::ffi::c_void, usize, usize, u32) -> *mut crate::ffi::c_void,
+    unsafe extern "C" fn(*const crate::ffi::c_void, usize, usize, u32, i32)
+                        -> *mut crate::ffi::c_void,
 );
 
 impl ReallocFunc {
     // INVARIANT: `krealloc` satisfies the type invariants.
-    const KREALLOC: Self = Self(bindings::krealloc);
+    const KREALLOC: Self = Self(bindings::krealloc_node);
 
     // INVARIANT: `vrealloc` satisfies the type invariants.
-    const VREALLOC: Self = Self(bindings::vrealloc);
+    const VREALLOC: Self = Self(bindings::vrealloc_node);
 
     // INVARIANT: `kvrealloc` satisfies the type invariants.
-    const KVREALLOC: Self = Self(bindings::kvrealloc);
+    const KVREALLOC: Self = Self(bindings::kvrealloc_node);
 
     /// # Safety
     ///
@@ -87,6 +88,7 @@ unsafe fn call(
         layout: Layout,
         old_layout: Layout,
         flags: Flags,
+        nid: Option<i32>,
     ) -> Result<NonNull<[u8]>, AllocError> {
         let size = aligned_size(layout);
         let ptr = match ptr {
@@ -100,6 +102,11 @@ unsafe fn call(
             None => ptr::null(),
         };
 
+        let c_nid = match nid {
+            None => bindings::NUMA_NO_NODE,
+            Some(n) => n,
+        };
+
         // SAFETY:
         // - `self.0` is one of `krealloc`, `vrealloc`, `kvrealloc` and thus only requires that
         //   `ptr` is NULL or valid.
@@ -110,7 +117,7 @@ unsafe fn call(
         // - Those functions provide the guarantees of this function.
         let raw_ptr = unsafe {
             // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed.
-            self.0(ptr.cast(), size, layout.align(), flags.0).cast()
+            self.0(ptr.cast(), size, layout.align(), flags.0, c_nid).cast()
         };
 
         let ptr = if size == 0 {
@@ -134,9 +141,10 @@ unsafe fn realloc(
         layout: Layout,
         old_layout: Layout,
         flags: Flags,
+        nid: Option<i32>,
     ) -> Result<NonNull<[u8]>, AllocError> {
         // SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`.
-        unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }
+        unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags, nid) }
     }
 }
 
@@ -151,10 +159,11 @@ unsafe fn realloc(
         layout: Layout,
         old_layout: Layout,
         flags: Flags,
+        nid: Option<i32>,
     ) -> Result<NonNull<[u8]>, AllocError> {
         // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
         // allocated with this `Allocator`.
-        unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) }
+        unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags, nid) }
     }
 }
 
@@ -169,9 +178,10 @@ unsafe fn realloc(
         layout: Layout,
         old_layout: Layout,
         flags: Flags,
+        nid: Option<i32>
     ) -> Result<NonNull<[u8]>, AllocError> {
         // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
         // allocated with this `Allocator`.
-        unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) }
+        unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags, nid) }
     }
 }
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
index 1a0dd852a468..ef4f977ba012 100644
--- a/rust/kernel/alloc/kvec.rs
+++ b/rust/kernel/alloc/kvec.rs
@@ -633,6 +633,7 @@ pub fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocEr
                 layout.into(),
                 self.layout.into(),
                 flags,
+                None,
             )?
         };
 
@@ -1058,7 +1059,7 @@ pub fn collect(self, flags: Flags) -> Vec<T, A> {
             // the type invariant to be smaller than `cap`. Depending on `realloc` this operation
             // may shrink the buffer or leave it as it is.
             ptr = match unsafe {
-                A::realloc(Some(buf.cast()), layout.into(), old_layout.into(), flags)
+                A::realloc(Some(buf.cast()), layout.into(), old_layout.into(), flags, None)
             } {
                 // If we fail to shrink, which likely can't even happen, continue with the existing
                 // buffer.
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 3/4] rust: support large alignments in allocations
  2025-06-26  8:36 ` [PATCH v4 3/4] rust: support large alignments in allocations Vitaly Wool
@ 2025-06-26 12:36   ` Danilo Krummrich
  2025-06-26 16:29     ` Vitaly Wool
  0 siblings, 1 reply; 11+ messages in thread
From: Danilo Krummrich @ 2025-06-26 12:36 UTC (permalink / raw)
  To: Vitaly Wool
  Cc: linux-mm, akpm, linux-kernel, Uladzislau Rezki, Alice Ryhl,
	rust-for-linux

On Thu, Jun 26, 2025 at 10:36:42AM +0200, Vitaly Wool wrote:
>  void * __must_check __realloc_size(2)
> -rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
> +rust_helper_krealloc(const void *objp, size_t new_size, unsigned long align, gfp_t flags)
>  {
>  	return krealloc(objp, new_size, flags);
>  }
>  
>  void * __must_check __realloc_size(2)
> -rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags)
> +rust_helper_kvrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
>  {
>  	return kvrealloc(p, size, flags);
>  }

I think you forgot to add comments explaining why we have the additional
discarded align argument.

Also please keep those helpers as they are. You can write an identical inline
function in Rust that discards the align argument and calls bindings::krealloc,
etc.

For instance:

	unsafe extern "C" fn krealloc_align(
	    ptr: *const c_void,
	    size: usize,
	    _align: c_ulong
	    flags: u32,
	) -> *mut c_void {
	    bindings::krealloc(ptr, size, flags)
	}

> diff --git a/rust/helpers/vmalloc.c b/rust/helpers/vmalloc.c
> index 80d34501bbc0..4618c0b79283 100644
> --- a/rust/helpers/vmalloc.c
> +++ b/rust/helpers/vmalloc.c
> @@ -3,7 +3,7 @@
>  #include <linux/vmalloc.h>
>  
>  void * __must_check __realloc_size(2)
> -rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
> +rust_helper_vrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
>  {
> -	return vrealloc(p, size, flags);
> +	return vrealloc_node(p, size, align, flags, NUMA_NO_NODE);
>  }

Same here, just make this a "real" helper for vrealloc_node() and create a Rust
function vrealloc_align() like in the example above.

> diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
> index aa2dfa9dca4c..a0d78c497974 100644
> --- a/rust/kernel/alloc/allocator.rs
> +++ b/rust/kernel/alloc/allocator.rs
> @@ -58,7 +58,7 @@ fn aligned_size(new_layout: Layout) -> usize {
>  ///
>  /// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
>  struct ReallocFunc(
> -    unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void,
> +    unsafe extern "C" fn(*const crate::ffi::c_void, usize, usize, u32) -> *mut crate::ffi::c_void,

Should be c_ulong instead of usize.

>  );
>  
>  impl ReallocFunc {
> @@ -110,7 +110,7 @@ unsafe fn call(
>          // - Those functions provide the guarantees of this function.
>          let raw_ptr = unsafe {
>              // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed.
> -            self.0(ptr.cast(), size, flags.0).cast()
> +            self.0(ptr.cast(), size, layout.align(), flags.0).cast()
>          };
>  
>          let ptr = if size == 0 {
> @@ -152,12 +152,6 @@ unsafe fn realloc(
>          old_layout: Layout,
>          flags: Flags,
>      ) -> Result<NonNull<[u8]>, AllocError> {
> -        // TODO: Support alignments larger than PAGE_SIZE.
> -        if layout.align() > bindings::PAGE_SIZE {
> -            pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n");
> -            return Err(AllocError);
> -        }
> -
>          // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
>          // allocated with this `Allocator`.
>          unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) }
> @@ -176,12 +170,6 @@ unsafe fn realloc(
>          old_layout: Layout,
>          flags: Flags,
>      ) -> Result<NonNull<[u8]>, AllocError> {
> -        // TODO: Support alignments larger than PAGE_SIZE.
> -        if layout.align() > bindings::PAGE_SIZE {
> -            pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
> -            return Err(AllocError);
> -        }

Didn't you propose to use VREALLOC if layout.align() > bindings::PAGE_SIZE?

>          // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
>          // allocated with this `Allocator`.
>          unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) }

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 4/4] rust: support NUMA ids in allocations
  2025-06-26  8:36 ` [PATCH v4 4/4] rust: support NUMA ids " Vitaly Wool
@ 2025-06-26 12:40   ` Danilo Krummrich
  0 siblings, 0 replies; 11+ messages in thread
From: Danilo Krummrich @ 2025-06-26 12:40 UTC (permalink / raw)
  To: Vitaly Wool
  Cc: linux-mm, akpm, linux-kernel, Uladzislau Rezki, Alice Ryhl,
	rust-for-linux

On Thu, Jun 26, 2025 at 10:36:53AM +0200, Vitaly Wool wrote:
> Add support for specifying NUMA ids in Rust allocators as an Option
> (i. e. providing `None` as nid corresponds to NUMA_NO_NODE). This
> will allow to specify node to use for allocation of e. g. {KV}Box.
> 
> Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.se>
> ---
>  rust/helpers/slab.c            |  9 +++++----
>  rust/helpers/vmalloc.c         |  4 ++--
>  rust/kernel/alloc.rs           | 28 ++++++++++++++++++++++++++--
>  rust/kernel/alloc/allocator.rs | 26 ++++++++++++++++++--------
>  rust/kernel/alloc/kvec.rs      |  3 ++-
>  5 files changed, 53 insertions(+), 17 deletions(-)
> 
> diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c
> index 5e9e8dd2bba0..ab1cf72f8353 100644
> --- a/rust/helpers/slab.c
> +++ b/rust/helpers/slab.c
> @@ -3,13 +3,14 @@
>  #include <linux/slab.h>
>  
>  void * __must_check __realloc_size(2)
> -rust_helper_krealloc(const void *objp, size_t new_size, unsigned long align, gfp_t flags)
> +rust_helper_krealloc_node(const void *objp, size_t new_size, unsigned long align, gfp_t flags,
> +			  int nid)
>  {
> -	return krealloc(objp, new_size, flags);
> +	return krealloc_node(objp, new_size, flags, nid);
>  }
>  
>  void * __must_check __realloc_size(2)
> -rust_helper_kvrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
> +rust_helper_kvrealloc_node(const void *p, size_t size, unsigned long align, gfp_t flags, int nid)
>  {
> -	return kvrealloc(p, size, flags);
> +	return kvrealloc_node(p, size, flags, nid);
>  }

Same as in the previous patch, please keep those as "normal" helpers for
*realloc_node() and create the corresponding *realloc_node_align() helpers
discarding the argument on the Rust side.

> diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs
> index a2c49e5494d3..1e26c2a7f47c 100644
> --- a/rust/kernel/alloc.rs
> +++ b/rust/kernel/alloc.rs
> @@ -156,7 +156,30 @@ pub unsafe trait Allocator {
>      fn alloc(layout: Layout, flags: Flags) -> Result<NonNull<[u8]>, AllocError> {
>          // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a
>          // new memory allocation.
> -        unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags) }
> +        unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags, None) }
> +    }
> +
> +    /// Allocate memory based on `layout`, `flags` and `nid`.
> +    ///
> +    /// On success, returns a buffer represented as `NonNull<[u8]>` that satisfies the layout
> +    /// constraints (i.e. minimum size and alignment as specified by `layout`).
> +    ///
> +    /// This function is equivalent to `realloc` when called with `None`.
> +    ///
> +    /// # Guarantees
> +    ///
> +    /// When the return value is `Ok(ptr)`, then `ptr` is
> +    /// - valid for reads and writes for `layout.size()` bytes, until it is passed to
> +    ///   [`Allocator::free`] or [`Allocator::realloc`],
> +    /// - aligned to `layout.align()`,
> +    ///
> +    /// Additionally, `Flags` are honored as documented in
> +    /// <https://docs.kernel.org/core-api/mm-api.html#mm-api-gfp-flags>.
> +    fn alloc_node(layout: Layout, flags: Flags, nid: Option<i32>)
> +                -> Result<NonNull<[u8]>, AllocError> {
> +        // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a
> +        // new memory allocation.
> +        unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags, nid) }
>      }
>  
>      /// Re-allocate an existing memory allocation to satisfy the requested `layout`.
> @@ -196,6 +219,7 @@ unsafe fn realloc(
>          layout: Layout,
>          old_layout: Layout,
>          flags: Flags,
> +        nid: Option<i32>,
>      ) -> Result<NonNull<[u8]>, AllocError>;

I think you did forget to add realloc_node() as requested in the last iteration.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 3/4] rust: support large alignments in allocations
  2025-06-26 12:36   ` Danilo Krummrich
@ 2025-06-26 16:29     ` Vitaly Wool
  2025-06-26 17:58       ` Danilo Krummrich
  0 siblings, 1 reply; 11+ messages in thread
From: Vitaly Wool @ 2025-06-26 16:29 UTC (permalink / raw)
  To: Danilo Krummrich
  Cc: linux-mm, akpm, linux-kernel, Uladzislau Rezki, Alice Ryhl,
	rust-for-linux



> On Jun 26, 2025, at 2:36 PM, Danilo Krummrich <dakr@kernel.org> wrote:
> 
> On Thu, Jun 26, 2025 at 10:36:42AM +0200, Vitaly Wool wrote:
>> void * __must_check __realloc_size(2)
>> -rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
>> +rust_helper_krealloc(const void *objp, size_t new_size, unsigned long align, gfp_t flags)
>> {
>> return krealloc(objp, new_size, flags);
>> }
>> 
>> void * __must_check __realloc_size(2)
>> -rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags)
>> +rust_helper_kvrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
>> {
>> return kvrealloc(p, size, flags);
>> }
> 
> I think you forgot to add comments explaining why we have the additional
> discarded align argument.
> 
> Also please keep those helpers as they are. You can write an identical inline
> function in Rust that discards the align argument and calls bindings::krealloc,
> etc.
> 
> For instance:
> 
> unsafe extern "C" fn krealloc_align(
>    ptr: *const c_void,
>    size: usize,
>    _align: c_ulong
>    flags: u32,
> ) -> *mut c_void {
>    bindings::krealloc(ptr, size, flags)
> }
> 

Ugh. This is indeed a mistake from my side but I don’t quite agree with your variant here too.
The thing is that the new patchset has a patch #2 which adds kvrealloc_node and realloc_node so this chunk IMO should have looked like

-rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags)
+rust_helper_kvrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
 {
 -      return kvrealloc(p, size, flags);
 +      return kvrealloc_node(p, size, align, flags, NUMA_NO_NODE);

 }

…exactly like for vmalloc, see also my comment below.

>> diff --git a/rust/helpers/vmalloc.c b/rust/helpers/vmalloc.c
>> index 80d34501bbc0..4618c0b79283 100644
>> --- a/rust/helpers/vmalloc.c
>> +++ b/rust/helpers/vmalloc.c
>> @@ -3,7 +3,7 @@
>> #include <linux/vmalloc.h>
>> 
>> void * __must_check __realloc_size(2)
>> -rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
>> +rust_helper_vrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
>> {
>> - return vrealloc(p, size, flags);
>> + return vrealloc_node(p, size, align, flags, NUMA_NO_NODE);
>> }
> 
> Same here, just make this a "real" helper for vrealloc_node() and create a Rust
> function vrealloc_align() like in the example above.

Wait, why? What’s the use of vrealloc() if it doesn’t provide the align functionality that we need?
> 
>> diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
>> index aa2dfa9dca4c..a0d78c497974 100644
>> --- a/rust/kernel/alloc/allocator.rs
>> +++ b/rust/kernel/alloc/allocator.rs
>> @@ -58,7 +58,7 @@ fn aligned_size(new_layout: Layout) -> usize {
>> ///
>> /// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
>> struct ReallocFunc(
>> -    unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void,
>> +    unsafe extern "C" fn(*const crate::ffi::c_void, usize, usize, u32) -> *mut crate::ffi::c_void,
> 
> Should be c_ulong instead of usize.
> 

Noted.

>> );
>> 
>> impl ReallocFunc {
>> @@ -110,7 +110,7 @@ unsafe fn call(
>>         // - Those functions provide the guarantees of this function.
>>         let raw_ptr = unsafe {
>>             // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed.
>> -            self.0(ptr.cast(), size, flags.0).cast()
>> +            self.0(ptr.cast(), size, layout.align(), flags.0).cast()
>>         };
>> 
>>         let ptr = if size == 0 {
>> @@ -152,12 +152,6 @@ unsafe fn realloc(
>>         old_layout: Layout,
>>         flags: Flags,
>>     ) -> Result<NonNull<[u8]>, AllocError> {
>> -        // TODO: Support alignments larger than PAGE_SIZE.
>> -        if layout.align() > bindings::PAGE_SIZE {
>> -            pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n");
>> -            return Err(AllocError);
>> -        }
>> -
>>         // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
>>         // allocated with this `Allocator`.
>>         unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) }
>> @@ -176,12 +170,6 @@ unsafe fn realloc(
>>         old_layout: Layout,
>>         flags: Flags,
>>     ) -> Result<NonNull<[u8]>, AllocError> {
>> -        // TODO: Support alignments larger than PAGE_SIZE.
>> -        if layout.align() > bindings::PAGE_SIZE {
>> -            pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
>> -            return Err(AllocError);
>> -        }
> 
> Didn't you propose to use VREALLOC if layout.align() > bindings::PAGE_SIZE?
> 

I did, and this is what happens on the C side now, please see the #2 patch in series.
I think it’s better this way because of uniformity but I don’t have a strong opinion on this.

>>         // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
>>         // allocated with this `Allocator`.
>>         unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) }
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 3/4] rust: support large alignments in allocations
  2025-06-26 16:29     ` Vitaly Wool
@ 2025-06-26 17:58       ` Danilo Krummrich
  0 siblings, 0 replies; 11+ messages in thread
From: Danilo Krummrich @ 2025-06-26 17:58 UTC (permalink / raw)
  To: Vitaly Wool
  Cc: linux-mm, akpm, linux-kernel, Uladzislau Rezki, Alice Ryhl,
	rust-for-linux

On Thu, Jun 26, 2025 at 06:29:24PM +0200, Vitaly Wool wrote:
> 
> 
> > On Jun 26, 2025, at 2:36 PM, Danilo Krummrich <dakr@kernel.org> wrote:
> > 
> > On Thu, Jun 26, 2025 at 10:36:42AM +0200, Vitaly Wool wrote:
> >> void * __must_check __realloc_size(2)
> >> -rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
> >> +rust_helper_krealloc(const void *objp, size_t new_size, unsigned long align, gfp_t flags)
> >> {
> >> return krealloc(objp, new_size, flags);
> >> }
> >> 
> >> void * __must_check __realloc_size(2)
> >> -rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags)
> >> +rust_helper_kvrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
> >> {
> >> return kvrealloc(p, size, flags);
> >> }
> > 
> > I think you forgot to add comments explaining why we have the additional
> > discarded align argument.
> > 
> > Also please keep those helpers as they are. You can write an identical inline
> > function in Rust that discards the align argument and calls bindings::krealloc,
> > etc.
> > 
> > For instance:
> > 
> > unsafe extern "C" fn krealloc_align(
> >    ptr: *const c_void,
> >    size: usize,
> >    _align: c_ulong
> >    flags: u32,
> > ) -> *mut c_void {
> >    bindings::krealloc(ptr, size, flags)
> > }
> > 
> 
> Ugh. This is indeed a mistake from my side but I don’t quite agree with your variant here too.
> The thing is that the new patchset has a patch #2 which adds kvrealloc_node and realloc_node so this chunk IMO should have looked like
> 
> -rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags)
> +rust_helper_kvrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
>  {
>  -      return kvrealloc(p, size, flags);
>  +      return kvrealloc_node(p, size, align, flags, NUMA_NO_NODE);
> 
>  }

No for two reasons:

  1) Rust helpers are transparent wrappers for  C functions / macros slipping
     through bindgen. We don't add any logic to them, as you do here.

  2) This patch is only about supporting large alignments for VMALLOC. There's
     no need to introduce kvrealloc_node() (yet).

The only thing you want here is to keep the signature common between all realloc
functions. Hence, you want

	unsafe extern "C" fn krealloc_align(
	   ptr: *const c_void,
	   size: usize,
	   _align: c_ulong
	   flags: u32,
	) -> *mut c_void {
	   bindings::krealloc(ptr, size, flags)
	}

on the Rust side of things. And in the next patch you want

	unsafe extern "C" fn krealloc_node_align(
	   ptr: *const c_void,
	   size: usize,
	   _align: c_ulong
	   flags: u32,
	   c_int: nid,
	) -> *mut c_void {
	   bindings::krealloc_node(ptr, size, flags, nid)
	}

> …exactly like for vmalloc, see also my comment below.
> 
> >> diff --git a/rust/helpers/vmalloc.c b/rust/helpers/vmalloc.c
> >> index 80d34501bbc0..4618c0b79283 100644
> >> --- a/rust/helpers/vmalloc.c
> >> +++ b/rust/helpers/vmalloc.c
> >> @@ -3,7 +3,7 @@
> >> #include <linux/vmalloc.h>
> >> 
> >> void * __must_check __realloc_size(2)
> >> -rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
> >> +rust_helper_vrealloc(const void *p, size_t size, unsigned long align, gfp_t flags)
> >> {
> >> - return vrealloc(p, size, flags);
> >> + return vrealloc_node(p, size, align, flags, NUMA_NO_NODE);
> >> }
> > 
> > Same here, just make this a "real" helper for vrealloc_node() and create a Rust
> > function vrealloc_align() like in the example above.
> 
> Wait, why? What’s the use of vrealloc() if it doesn’t provide the align functionality that we need?

That's fine, then this should be

	void * __must_check __realloc_size(2)
	rust_helper_vrealloc_node(const void *p, size_t size,
				  unsigned long align,
				  gfp_t flags, int nid)
	{
		return vrealloc_node(p, size, align, flags, nid);
	}

and on the Rust side, for this patch, you want:

	unsafe extern "C" fn vrealloc_align(
	   ptr: *const c_void,
	   size: usize,
	   align: c_ulong
	   flags: u32,
	   c_int: nid,
	) -> *mut c_void {
	   bindings::vrealloc_node(ptr, size, align, flags, bindings::NUMA_NO_NODE)
	}

The diff between the patches may come out nicer if you do it the other way
around though, i.e. first support node IDs and then support larger alignments
than PAGE_SIZE for VMALLOC.

> > 
> >> diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
> >> index aa2dfa9dca4c..a0d78c497974 100644
> >> --- a/rust/kernel/alloc/allocator.rs
> >> +++ b/rust/kernel/alloc/allocator.rs
> >> @@ -58,7 +58,7 @@ fn aligned_size(new_layout: Layout) -> usize {
> >> ///
> >> /// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
> >> struct ReallocFunc(
> >> -    unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void,
> >> +    unsafe extern "C" fn(*const crate::ffi::c_void, usize, usize, u32) -> *mut crate::ffi::c_void,
> > 
> > Should be c_ulong instead of usize.
> > 
> 
> Noted.
> 
> >> );
> >> 
> >> impl ReallocFunc {
> >> @@ -110,7 +110,7 @@ unsafe fn call(
> >>         // - Those functions provide the guarantees of this function.
> >>         let raw_ptr = unsafe {
> >>             // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed.
> >> -            self.0(ptr.cast(), size, flags.0).cast()
> >> +            self.0(ptr.cast(), size, layout.align(), flags.0).cast()
> >>         };
> >> 
> >>         let ptr = if size == 0 {
> >> @@ -152,12 +152,6 @@ unsafe fn realloc(
> >>         old_layout: Layout,
> >>         flags: Flags,
> >>     ) -> Result<NonNull<[u8]>, AllocError> {
> >> -        // TODO: Support alignments larger than PAGE_SIZE.
> >> -        if layout.align() > bindings::PAGE_SIZE {
> >> -            pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n");
> >> -            return Err(AllocError);
> >> -        }
> >> -
> >>         // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
> >>         // allocated with this `Allocator`.
> >>         unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) }
> >> @@ -176,12 +170,6 @@ unsafe fn realloc(
> >>         old_layout: Layout,
> >>         flags: Flags,
> >>     ) -> Result<NonNull<[u8]>, AllocError> {
> >> -        // TODO: Support alignments larger than PAGE_SIZE.
> >> -        if layout.align() > bindings::PAGE_SIZE {
> >> -            pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
> >> -            return Err(AllocError);
> >> -        }
> > 
> > Didn't you propose to use VREALLOC if layout.align() > bindings::PAGE_SIZE?
> > 
> 
> I did, and this is what happens on the C side now, please see the #2 patch in series.

I'm fine doing it on the C side if the C side maintainers agree.

However, I don't see you doing it. kvrealloc_node_noprof() does not even have an
align argument AFAICS.

> I think it’s better this way because of uniformity but I don’t have a strong opinion on this.

I agree, but again, I don't think you do it yet. :)

> 
> >>         // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
> >>         // allocated with this `Allocator`.
> >>         unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) }
> > 
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 2/4] mm/slub: allow to set node and align in k[v]realloc
  2025-06-26  8:36 ` [PATCH v4 2/4] mm/slub: allow to set node and align in k[v]realloc Vitaly Wool
@ 2025-06-26 20:53   ` Tamir Duberstein
  2025-06-27 18:10     ` Vitaly Wool
  0 siblings, 1 reply; 11+ messages in thread
From: Tamir Duberstein @ 2025-06-26 20:53 UTC (permalink / raw)
  To: Vitaly Wool
  Cc: linux-mm, akpm, linux-kernel, Uladzislau Rezki, Danilo Krummrich,
	Alice Ryhl, rust-for-linux

On Thu, Jun 26, 2025 at 1:39 AM Vitaly Wool <vitaly.wool@konsulko.se> wrote:
>
> Reimplement k[v]realloc() to be able to set node and alignment
> should a user need to do so. Rename the respective functions to
> k[v]realloc_node() to better match what they actually do now and
> introduce macros for k[v]realloc() for backward compatibility.
>
> With that change we also provide the ability for the Rust part of
> the kernel to set node and aligmnent in its K[v]xxx [re]allocations.
>
> Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.se>

Hi Vitaly, there is a typo in the subject line: it should be slab, not slub.

> ---
>  include/linux/slab.h | 12 ++++++++----
>  mm/slub.c            | 33 ++++++++++++++++++++++-----------
>  2 files changed, 30 insertions(+), 15 deletions(-)
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index d5a8ab98035c..119f100978c8 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -465,9 +465,11 @@ int kmem_cache_shrink(struct kmem_cache *s);
>  /*
>   * Common kmalloc functions provided by all allocators
>   */
> -void * __must_check krealloc_noprof(const void *objp, size_t new_size,
> -                                   gfp_t flags) __realloc_size(2);
> -#define krealloc(...)                          alloc_hooks(krealloc_noprof(__VA_ARGS__))
> +void * __must_check krealloc_node_noprof(const void *objp, size_t new_size,
> +                                        gfp_t flags, int nid) __realloc_size(2);
> +#define krealloc_node(...)             alloc_hooks(krealloc_node_noprof(__VA_ARGS__))
> +#define krealloc_noprof(o, s, f)       krealloc_node_noprof(o, s, f, NUMA_NO_NODE)
> +#define krealloc(...)                  alloc_hooks(krealloc_noprof(__VA_ARGS__))
>
>  void kfree(const void *objp);
>  void kfree_sensitive(const void *objp);
> @@ -1073,8 +1075,10 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
>  #define kvcalloc_node(...)                     alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
>  #define kvcalloc(...)                          alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
>
> -void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
> +void *kvrealloc_node_noprof(const void *p, size_t size, gfp_t flags, int nid)
>                 __realloc_size(2);
> +#define kvrealloc_node(...)                    alloc_hooks(kvrealloc_node_noprof(__VA_ARGS__))
> +#define kvrealloc_noprof(p, s, f)              kvrealloc_node_noprof(p, s, f, NUMA_NO_NODE)
>  #define kvrealloc(...)                         alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
>
>  extern void kvfree(const void *addr);
> diff --git a/mm/slub.c b/mm/slub.c
> index c4b64821e680..2d5150d075d5 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4845,7 +4845,7 @@ void kfree(const void *object)
>  EXPORT_SYMBOL(kfree);
>
>  static __always_inline __realloc_size(2) void *
> -__do_krealloc(const void *p, size_t new_size, gfp_t flags)
> +__do_krealloc(const void *p, size_t new_size, gfp_t flags, int nid)
>  {
>         void *ret;
>         size_t ks = 0;
> @@ -4859,6 +4859,15 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>         if (!kasan_check_byte(p))
>                 return NULL;
>
> +       /*
> +        * it is possible to support reallocation with a different nid, but
> +        * it doesn't go well with the concept of krealloc(). Such
> +        * reallocation should be done explicitly instead.
> +        */
> +       if (WARN(nid != NUMA_NO_NODE && nid != page_to_nid(virt_to_page(p)),
> +                               "krealloc() has mismatched nids\n"))
> +               return NULL;
> +
>         if (is_kfence_address(p)) {
>                 ks = orig_size = kfence_ksize(p);
>         } else {
> @@ -4903,7 +4912,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>         return (void *)p;
>
>  alloc_new:
> -       ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
> +       ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_);
>         if (ret && p) {
>                 /* Disable KASAN checks as the object's redzone is accessed. */
>                 kasan_disable_current();
> @@ -4919,6 +4928,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>   * @p: object to reallocate memory for.
>   * @new_size: how many bytes of memory are required.
>   * @flags: the type of memory to allocate.
> + * @nid: NUMA node or NUMA_NO_NODE
>   *
>   * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
>   * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
> @@ -4947,7 +4957,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>   *
>   * Return: pointer to the allocated memory or %NULL in case of error
>   */
> -void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
> +void *krealloc_node_noprof(const void *p, size_t new_size, gfp_t flags, int nid)
>  {
>         void *ret;
>
> @@ -4956,13 +4966,13 @@ void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
>                 return ZERO_SIZE_PTR;
>         }
>
> -       ret = __do_krealloc(p, new_size, flags);
> +       ret = __do_krealloc(p, new_size, flags, nid);
>         if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
>                 kfree(p);
>
>         return ret;
>  }
> -EXPORT_SYMBOL(krealloc_noprof);
> +EXPORT_SYMBOL(krealloc_node_noprof);
>
>  static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
>  {
> @@ -5079,10 +5089,11 @@ void kvfree_sensitive(const void *addr, size_t len)
>  EXPORT_SYMBOL(kvfree_sensitive);
>
>  /**
> - * kvrealloc - reallocate memory; contents remain unchanged
> + * kvrealloc_node - reallocate memory; contents remain unchanged
>   * @p: object to reallocate memory for
>   * @size: the size to reallocate
>   * @flags: the flags for the page level allocator
> + * @nid: NUMA node id
>   *
>   * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
>   * and @p is not a %NULL pointer, the object pointed to is freed.
> @@ -5100,17 +5111,17 @@ EXPORT_SYMBOL(kvfree_sensitive);
>   *
>   * Return: pointer to the allocated memory or %NULL in case of error
>   */
> -void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
> +void *kvrealloc_node_noprof(const void *p, size_t size, gfp_t flags, int nid)
>  {
>         void *n;
>
>         if (is_vmalloc_addr(p))
> -               return vrealloc_noprof(p, size, flags);
> +               return vrealloc_node_noprof(p, size, 1, flags, nid);
>
> -       n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
> +       n = krealloc_node_noprof(p, size, kmalloc_gfp_adjust(flags, size), nid);
>         if (!n) {
>                 /* We failed to krealloc(), fall back to kvmalloc(). */
> -               n = kvmalloc_noprof(size, flags);
> +               n = kvmalloc_node_noprof(size, flags, nid);
>                 if (!n)
>                         return NULL;
>
> @@ -5126,7 +5137,7 @@ void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
>
>         return n;
>  }
> -EXPORT_SYMBOL(kvrealloc_noprof);
> +EXPORT_SYMBOL(kvrealloc_node_noprof);
>
>  struct detached_freelist {
>         struct slab *slab;
> --
> 2.39.2
>
>

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 2/4] mm/slub: allow to set node and align in k[v]realloc
  2025-06-26 20:53   ` Tamir Duberstein
@ 2025-06-27 18:10     ` Vitaly Wool
  0 siblings, 0 replies; 11+ messages in thread
From: Vitaly Wool @ 2025-06-27 18:10 UTC (permalink / raw)
  To: Tamir Duberstein
  Cc: linux-mm, akpm, linux-kernel, Uladzislau Rezki, Danilo Krummrich,
	Alice Ryhl, rust-for-linux



> On Jun 26, 2025, at 10:53 PM, Tamir Duberstein <tamird@gmail.com> wrote:
> 
> On Thu, Jun 26, 2025 at 1:39 AM Vitaly Wool <vitaly.wool@konsulko.se> wrote:
>> 
>> Reimplement k[v]realloc() to be able to set node and alignment
>> should a user need to do so. Rename the respective functions to
>> k[v]realloc_node() to better match what they actually do now and
>> introduce macros for k[v]realloc() for backward compatibility.
>> 
>> With that change we also provide the ability for the Rust part of
>> the kernel to set node and aligmnent in its K[v]xxx [re]allocations.
>> 
>> Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.se>
> 
> Hi Vitaly, there is a typo in the subject line: it should be slab, not slub.

Thanks, corrected.

> 
>> ---
>> include/linux/slab.h | 12 ++++++++----
>> mm/slub.c            | 33 ++++++++++++++++++++++-----------
>> 2 files changed, 30 insertions(+), 15 deletions(-)
>> 
>> diff --git a/include/linux/slab.h b/include/linux/slab.h
>> index d5a8ab98035c..119f100978c8 100644
>> --- a/include/linux/slab.h
>> +++ b/include/linux/slab.h
>> @@ -465,9 +465,11 @@ int kmem_cache_shrink(struct kmem_cache *s);
>> /*
>>  * Common kmalloc functions provided by all allocators
>>  */
>> -void * __must_check krealloc_noprof(const void *objp, size_t new_size,
>> -                                   gfp_t flags) __realloc_size(2);
>> -#define krealloc(...)                          alloc_hooks(krealloc_noprof(__VA_ARGS__))
>> +void * __must_check krealloc_node_noprof(const void *objp, size_t new_size,
>> +                                        gfp_t flags, int nid) __realloc_size(2);
>> +#define krealloc_node(...)             alloc_hooks(krealloc_node_noprof(__VA_ARGS__))
>> +#define krealloc_noprof(o, s, f)       krealloc_node_noprof(o, s, f, NUMA_NO_NODE)
>> +#define krealloc(...)                  alloc_hooks(krealloc_noprof(__VA_ARGS__))
>> 
>> void kfree(const void *objp);
>> void kfree_sensitive(const void *objp);
>> @@ -1073,8 +1075,10 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
>> #define kvcalloc_node(...)                     alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
>> #define kvcalloc(...)                          alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
>> 
>> -void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
>> +void *kvrealloc_node_noprof(const void *p, size_t size, gfp_t flags, int nid)
>>                __realloc_size(2);
>> +#define kvrealloc_node(...)                    alloc_hooks(kvrealloc_node_noprof(__VA_ARGS__))
>> +#define kvrealloc_noprof(p, s, f)              kvrealloc_node_noprof(p, s, f, NUMA_NO_NODE)
>> #define kvrealloc(...)                         alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
>> 
>> extern void kvfree(const void *addr);
>> diff --git a/mm/slub.c b/mm/slub.c
>> index c4b64821e680..2d5150d075d5 100644
>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -4845,7 +4845,7 @@ void kfree(const void *object)
>> EXPORT_SYMBOL(kfree);
>> 
>> static __always_inline __realloc_size(2) void *
>> -__do_krealloc(const void *p, size_t new_size, gfp_t flags)
>> +__do_krealloc(const void *p, size_t new_size, gfp_t flags, int nid)
>> {
>>        void *ret;
>>        size_t ks = 0;
>> @@ -4859,6 +4859,15 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>>        if (!kasan_check_byte(p))
>>                return NULL;
>> 
>> +       /*
>> +        * it is possible to support reallocation with a different nid, but
>> +        * it doesn't go well with the concept of krealloc(). Such
>> +        * reallocation should be done explicitly instead.
>> +        */
>> +       if (WARN(nid != NUMA_NO_NODE && nid != page_to_nid(virt_to_page(p)),
>> +                               "krealloc() has mismatched nids\n"))
>> +               return NULL;
>> +
>>        if (is_kfence_address(p)) {
>>                ks = orig_size = kfence_ksize(p);
>>        } else {
>> @@ -4903,7 +4912,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>>        return (void *)p;
>> 
>> alloc_new:
>> -       ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
>> +       ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_);
>>        if (ret && p) {
>>                /* Disable KASAN checks as the object's redzone is accessed. */
>>                kasan_disable_current();
>> @@ -4919,6 +4928,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>>  * @p: object to reallocate memory for.
>>  * @new_size: how many bytes of memory are required.
>>  * @flags: the type of memory to allocate.
>> + * @nid: NUMA node or NUMA_NO_NODE
>>  *
>>  * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
>>  * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
>> @@ -4947,7 +4957,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>>  *
>>  * Return: pointer to the allocated memory or %NULL in case of error
>>  */
>> -void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
>> +void *krealloc_node_noprof(const void *p, size_t new_size, gfp_t flags, int nid)
>> {
>>        void *ret;
>> 
>> @@ -4956,13 +4966,13 @@ void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
>>                return ZERO_SIZE_PTR;
>>        }
>> 
>> -       ret = __do_krealloc(p, new_size, flags);
>> +       ret = __do_krealloc(p, new_size, flags, nid);
>>        if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
>>                kfree(p);
>> 
>>        return ret;
>> }
>> -EXPORT_SYMBOL(krealloc_noprof);
>> +EXPORT_SYMBOL(krealloc_node_noprof);
>> 
>> static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
>> {
>> @@ -5079,10 +5089,11 @@ void kvfree_sensitive(const void *addr, size_t len)
>> EXPORT_SYMBOL(kvfree_sensitive);
>> 
>> /**
>> - * kvrealloc - reallocate memory; contents remain unchanged
>> + * kvrealloc_node - reallocate memory; contents remain unchanged
>>  * @p: object to reallocate memory for
>>  * @size: the size to reallocate
>>  * @flags: the flags for the page level allocator
>> + * @nid: NUMA node id
>>  *
>>  * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
>>  * and @p is not a %NULL pointer, the object pointed to is freed.
>> @@ -5100,17 +5111,17 @@ EXPORT_SYMBOL(kvfree_sensitive);
>>  *
>>  * Return: pointer to the allocated memory or %NULL in case of error
>>  */
>> -void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
>> +void *kvrealloc_node_noprof(const void *p, size_t size, gfp_t flags, int nid)
>> {
>>        void *n;
>> 
>>        if (is_vmalloc_addr(p))
>> -               return vrealloc_noprof(p, size, flags);
>> +               return vrealloc_node_noprof(p, size, 1, flags, nid);
>> 
>> -       n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
>> +       n = krealloc_node_noprof(p, size, kmalloc_gfp_adjust(flags, size), nid);
>>        if (!n) {
>>                /* We failed to krealloc(), fall back to kvmalloc(). */
>> -               n = kvmalloc_noprof(size, flags);
>> +               n = kvmalloc_node_noprof(size, flags, nid);
>>                if (!n)
>>                        return NULL;
>> 
>> @@ -5126,7 +5137,7 @@ void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
>> 
>>        return n;
>> }
>> -EXPORT_SYMBOL(kvrealloc_noprof);
>> +EXPORT_SYMBOL(kvrealloc_node_noprof);
>> 
>> struct detached_freelist {
>>        struct slab *slab;
>> --
>> 2.39.2
>> 
>> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2025-06-27 18:10 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-26  8:35 [PATCH v4 0/4] support large align and nid in Rust allocators Vitaly Wool
2025-06-26  8:36 ` [PATCHi v4 1/4] mm/vmalloc: allow to set node and align in vrealloc Vitaly Wool
2025-06-26  8:36 ` [PATCH v4 2/4] mm/slub: allow to set node and align in k[v]realloc Vitaly Wool
2025-06-26 20:53   ` Tamir Duberstein
2025-06-27 18:10     ` Vitaly Wool
2025-06-26  8:36 ` [PATCH v4 3/4] rust: support large alignments in allocations Vitaly Wool
2025-06-26 12:36   ` Danilo Krummrich
2025-06-26 16:29     ` Vitaly Wool
2025-06-26 17:58       ` Danilo Krummrich
2025-06-26  8:36 ` [PATCH v4 4/4] rust: support NUMA ids " Vitaly Wool
2025-06-26 12:40   ` Danilo Krummrich

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).