linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE
@ 2025-07-15  9:59 Hui Zhu
  2025-07-15  9:59 ` [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages Hui Zhu
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Hui Zhu @ 2025-07-15  9:59 UTC (permalink / raw)
  To: Andrew Morton, Uladzislau Rezki, Miguel Ojeda, Alex Gaynor,
	Boqun Feng, Gary Guo, bjorn3_gh, Benno Lossin, Andreas Hindborg,
	Alice Ryhl, Trevor Gross, Danilo Krummrich, Geliang Tang, Hui Zhu,
	linux-kernel, linux-mm, rust-for-linux

From: Hui Zhu <zhuhui@kylinos.cn>

There is a TODO in Vmalloc::realloc "Support alignments larger than
PAGE_SIZE."

These commits make allocator vmalloc support alignments larger than
PAGE_SIZE.
The function vrealloc_align is added to vmalloc.c to support reallocating
aligned vmap pages.
When Vmalloc::realloc intends to reallocate memory aligned beyond PAGE_SIZE,
vrealloc_align should be used instead of vrealloc, thus enabling support
for alignments larger than PAGE_SIZE.
And add a sample to the samples memory allocator usage.

Hui Zhu (3):
  vmalloc: Add vrealloc_align to support allocation of aligned vmap
    pages
  rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE
  rust: add a sample allocator usage

 include/linux/vmalloc.h        |   5 ++
 mm/vmalloc.c                   |  80 ++++++++++++++++---------
 rust/helpers/vmalloc.c         |   7 +++
 rust/kernel/alloc/allocator.rs |  32 ++++++----
 samples/rust/Kconfig           |  10 ++++
 samples/rust/Makefile          |   1 +
 samples/rust/rust_allocator.rs | 104 +++++++++++++++++++++++++++++++++
 7 files changed, 199 insertions(+), 40 deletions(-)
 create mode 100644 samples/rust/rust_allocator.rs

-- 
2.43.0



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages
  2025-07-15  9:59 [PATCH 0/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Hui Zhu
@ 2025-07-15  9:59 ` Hui Zhu
  2025-07-15 23:19   ` kernel test robot
  2025-07-16  7:02   ` Uladzislau Rezki
  2025-07-15  9:59 ` [PATCH 2/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Hui Zhu
                   ` (2 subsequent siblings)
  3 siblings, 2 replies; 9+ messages in thread
From: Hui Zhu @ 2025-07-15  9:59 UTC (permalink / raw)
  To: Andrew Morton, Uladzislau Rezki, Miguel Ojeda, Alex Gaynor,
	Boqun Feng, Gary Guo, bjorn3_gh, Benno Lossin, Andreas Hindborg,
	Alice Ryhl, Trevor Gross, Danilo Krummrich, Geliang Tang, Hui Zhu,
	linux-kernel, linux-mm, rust-for-linux

From: Hui Zhu <zhuhui@kylinos.cn>

This commit add new function vrealloc_align.
vrealloc_align support allocation of aligned vmap pages with
__vmalloc_node_noprof.
And vrealloc_align will check the old address. If this address does
not meet the current alignment requirements, it will also release
the old vmap pages and reallocate new vmap pages that satisfy the
alignment requirements.

Co-developed-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
---
 include/linux/vmalloc.h |  5 +++
 mm/vmalloc.c            | 80 ++++++++++++++++++++++++++---------------
 2 files changed, 57 insertions(+), 28 deletions(-)

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index fdc9aeb74a44..0ce0c1ea2427 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -201,6 +201,11 @@ void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
 		__realloc_size(2);
 #define vrealloc(...)		alloc_hooks(vrealloc_noprof(__VA_ARGS__))
 
+void * __must_check vrealloc_align_noprof(const void *p, size_t size,
+					  size_t align, gfp_t flags)
+		__realloc_size(2);
+#define vrealloc_align(...)	alloc_hooks(vrealloc_align_noprof(__VA_ARGS__))
+
 extern void vfree(const void *addr);
 extern void vfree_atomic(const void *addr);
 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ab986dd09b6a..41cb3603b3cc 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4081,9 +4081,11 @@ void *vzalloc_node_noprof(unsigned long size, int node)
 EXPORT_SYMBOL(vzalloc_node_noprof);
 
 /**
- * vrealloc - reallocate virtually contiguous memory; contents remain unchanged
+ * vrealloc_align - reallocate virtually contiguous memory;
+ *                  contents remain unchanged
  * @p: object to reallocate memory for
  * @size: the size to reallocate
+ * @align: requested alignment
  * @flags: the flags for the page level allocator
  *
  * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
@@ -4103,7 +4105,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
  * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
  *         failure
  */
-void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+void *vrealloc_align_noprof(const void *p, size_t size, size_t align,
+			    gfp_t flags)
 {
 	struct vm_struct *vm = NULL;
 	size_t alloced_size = 0;
@@ -4116,49 +4119,65 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
 	}
 
 	if (p) {
+		if (!is_power_of_2(align)) {
+			WARN(1, "Trying to vrealloc_align() align is not power of 2 (%ld)\n",
+			     align);
+			return NULL;
+		}
+
 		vm = find_vm_area(p);
 		if (unlikely(!vm)) {
-			WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
+			WARN(1, "Trying to vrealloc_align() nonexistent vm area (%p)\n", p);
 			return NULL;
 		}
 
 		alloced_size = get_vm_area_size(vm);
 		old_size = vm->requested_size;
 		if (WARN(alloced_size < old_size,
-			 "vrealloc() has mismatched area vs requested sizes (%p)\n", p))
+			 "vrealloc_align() has mismatched area vs requested sizes (%p)\n", p))
 			return NULL;
 	}
 
-	/*
-	 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
-	 * would be a good heuristic for when to shrink the vm_area?
-	 */
-	if (size <= old_size) {
-		/* Zero out "freed" memory, potentially for future realloc. */
-		if (want_init_on_free() || want_init_on_alloc(flags))
-			memset((void *)p + size, 0, old_size - size);
-		vm->requested_size = size;
-		kasan_poison_vmalloc(p + size, old_size - size);
-		return (void *)p;
-	}
+	if (IS_ALIGNED((unsigned long)p, align)) {
+		/*
+		 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
+		 * would be a good heuristic for when to shrink the vm_area?
+		 */
+		if (size <= old_size) {
+			/* Zero out "freed" memory, potentially for future realloc. */
+			if (want_init_on_free() || want_init_on_alloc(flags))
+				memset((void *)p + size, 0, old_size - size);
+			vm->requested_size = size;
+			kasan_poison_vmalloc(p + size, old_size - size);
+			return (void *)p;
+		}
 
-	/*
-	 * We already have the bytes available in the allocation; use them.
-	 */
-	if (size <= alloced_size) {
-		kasan_unpoison_vmalloc(p + old_size, size - old_size,
-				       KASAN_VMALLOC_PROT_NORMAL);
 		/*
-		 * No need to zero memory here, as unused memory will have
-		 * already been zeroed at initial allocation time or during
-		 * realloc shrink time.
+		 * We already have the bytes available in the allocation; use them.
+		 */
+		if (size <= alloced_size) {
+			kasan_unpoison_vmalloc(p + old_size, size - old_size,
+					KASAN_VMALLOC_PROT_NORMAL);
+			/*
+			 * No need to zero memory here, as unused memory will have
+			 * already been zeroed at initial allocation time or during
+			 * realloc shrink time.
+			 */
+			vm->requested_size = size;
+			return (void *)p;
+		}
+	} else {
+		/*
+		 * p is not aligned with align.
+		 * Allocate a new address to handle it.
 		 */
-		vm->requested_size = size;
-		return (void *)p;
+		if (size < old_size)
+			old_size = size;
 	}
 
 	/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
-	n = __vmalloc_noprof(size, flags);
+	n = __vmalloc_node_noprof(size, align, flags, NUMA_NO_NODE,
+				  __builtin_return_address(0));
 	if (!n)
 		return NULL;
 
@@ -4170,6 +4189,11 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
 	return n;
 }
 
+void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+{
+	return vrealloc_align_noprof(p, size, 1, flags);
+}
+
 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
-- 
2.43.0



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE
  2025-07-15  9:59 [PATCH 0/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Hui Zhu
  2025-07-15  9:59 ` [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages Hui Zhu
@ 2025-07-15  9:59 ` Hui Zhu
  2025-07-15  9:59 ` [PATCH 3/3] rust: add a sample allocator usage Hui Zhu
  2025-07-15 10:21 ` [PATCH 0/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Danilo Krummrich
  3 siblings, 0 replies; 9+ messages in thread
From: Hui Zhu @ 2025-07-15  9:59 UTC (permalink / raw)
  To: Andrew Morton, Uladzislau Rezki, Miguel Ojeda, Alex Gaynor,
	Boqun Feng, Gary Guo, bjorn3_gh, Benno Lossin, Andreas Hindborg,
	Alice Ryhl, Trevor Gross, Danilo Krummrich, Geliang Tang, Hui Zhu,
	linux-kernel, linux-mm, rust-for-linux

From: Hui Zhu <zhuhui@kylinos.cn>

This commit add code to make rust alloc Vmalloc support alignments
larger than PAGE_SIZE.

It adds a new option element to ReallocFunc. When an object supports
aligned reallocation, it can register its alignment-specific realloc
function here.
During VREALLOC initialization, it sets bindings::vrealloc_align to
this element.
When ReallocFunc::call executes, if the object supports aligned
reallocation and the alignment exceeds PAGE_SIZE, the aligned realloc
function is used to support alignment capabilities.

Co-developed-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
---
 rust/helpers/vmalloc.c         |  7 +++++++
 rust/kernel/alloc/allocator.rs | 32 ++++++++++++++++++++------------
 2 files changed, 27 insertions(+), 12 deletions(-)

diff --git a/rust/helpers/vmalloc.c b/rust/helpers/vmalloc.c
index 80d34501bbc0..3290c4c4c42f 100644
--- a/rust/helpers/vmalloc.c
+++ b/rust/helpers/vmalloc.c
@@ -7,3 +7,10 @@ rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
 {
 	return vrealloc(p, size, flags);
 }
+
+void * __must_check __realloc_size(2)
+rust_helper_vrealloc_align(const void *p, size_t size, size_t align,
+			   gfp_t flags)
+{
+	return vrealloc_align(p, size, align, flags);
+}
diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
index aa2dfa9dca4c..197222e15c26 100644
--- a/rust/kernel/alloc/allocator.rs
+++ b/rust/kernel/alloc/allocator.rs
@@ -59,17 +59,25 @@ fn aligned_size(new_layout: Layout) -> usize {
 /// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
 struct ReallocFunc(
     unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void,
+    Option<
+        unsafe extern "C" fn(
+            *const crate::ffi::c_void,
+            usize,
+            usize,
+            u32,
+        ) -> *mut crate::ffi::c_void,
+    >,
 );
 
 impl ReallocFunc {
     // INVARIANT: `krealloc` satisfies the type invariants.
-    const KREALLOC: Self = Self(bindings::krealloc);
+    const KREALLOC: Self = Self(bindings::krealloc, None);
 
     // INVARIANT: `vrealloc` satisfies the type invariants.
-    const VREALLOC: Self = Self(bindings::vrealloc);
+    const VREALLOC: Self = Self(bindings::vrealloc, Some(bindings::vrealloc_align));
 
     // INVARIANT: `kvrealloc` satisfies the type invariants.
-    const KVREALLOC: Self = Self(bindings::kvrealloc);
+    const KVREALLOC: Self = Self(bindings::kvrealloc, None);
 
     /// # Safety
     ///
@@ -108,9 +116,15 @@ unsafe fn call(
         // GUARANTEE:
         // - `self.0` is one of `krealloc`, `vrealloc`, `kvrealloc`.
         // - Those functions provide the guarantees of this function.
-        let raw_ptr = unsafe {
-            // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed.
-            self.0(ptr.cast(), size, flags.0).cast()
+        // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed.
+        let raw_ptr = if let Some(f) = self.1 {
+            if layout.align() > bindings::PAGE_SIZE {
+                unsafe { f(ptr.cast(), size, layout.align(), flags.0).cast() }
+            } else {
+                unsafe { self.0(ptr.cast(), size, flags.0).cast() }
+            }
+        } else {
+            unsafe { self.0(ptr.cast(), size, flags.0).cast() }
         };
 
         let ptr = if size == 0 {
@@ -152,12 +166,6 @@ unsafe fn realloc(
         old_layout: Layout,
         flags: Flags,
     ) -> Result<NonNull<[u8]>, AllocError> {
-        // TODO: Support alignments larger than PAGE_SIZE.
-        if layout.align() > bindings::PAGE_SIZE {
-            pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n");
-            return Err(AllocError);
-        }
-
         // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
         // allocated with this `Allocator`.
         unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) }
-- 
2.43.0



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/3] rust: add a sample allocator usage
  2025-07-15  9:59 [PATCH 0/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Hui Zhu
  2025-07-15  9:59 ` [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages Hui Zhu
  2025-07-15  9:59 ` [PATCH 2/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Hui Zhu
@ 2025-07-15  9:59 ` Hui Zhu
  2025-07-15 10:37   ` Danilo Krummrich
  2025-07-15 10:21 ` [PATCH 0/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Danilo Krummrich
  3 siblings, 1 reply; 9+ messages in thread
From: Hui Zhu @ 2025-07-15  9:59 UTC (permalink / raw)
  To: Andrew Morton, Uladzislau Rezki, Miguel Ojeda, Alex Gaynor,
	Boqun Feng, Gary Guo, bjorn3_gh, Benno Lossin, Andreas Hindborg,
	Alice Ryhl, Trevor Gross, Danilo Krummrich, Geliang Tang, Hui Zhu,
	linux-kernel, linux-mm, rust-for-linux

From: Hui Zhu <zhuhui@kylinos.cn>

Add a sample to the samples memory allocator usage.

Co-developed-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
---
 samples/rust/Kconfig           |  10 ++++
 samples/rust/Makefile          |   1 +
 samples/rust/rust_allocator.rs | 104 +++++++++++++++++++++++++++++++++
 3 files changed, 115 insertions(+)
 create mode 100644 samples/rust/rust_allocator.rs

diff --git a/samples/rust/Kconfig b/samples/rust/Kconfig
index 7f7371a004ee..79c73f6c5216 100644
--- a/samples/rust/Kconfig
+++ b/samples/rust/Kconfig
@@ -105,6 +105,16 @@ config SAMPLE_RUST_DRIVER_AUXILIARY
 
 	  If unsure, say N.
 
+config SAMPLE_RUST_ALLOCATOR
+	tristate "Allocator Test Driver"
+	help
+	  This option builds the Rust allocator Test driver sample.
+
+	  To compile this as a module, choose M here:
+	  the module will be called rust_dma.
+
+	  If unsure, say N.
+
 config SAMPLE_RUST_HOSTPROGS
 	bool "Host programs"
 	help
diff --git a/samples/rust/Makefile b/samples/rust/Makefile
index bd2faad63b4f..b378959eab19 100644
--- a/samples/rust/Makefile
+++ b/samples/rust/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SAMPLE_RUST_DRIVER_PLATFORM)	+= rust_driver_platform.o
 obj-$(CONFIG_SAMPLE_RUST_DRIVER_FAUX)		+= rust_driver_faux.o
 obj-$(CONFIG_SAMPLE_RUST_DRIVER_AUXILIARY)	+= rust_driver_auxiliary.o
 obj-$(CONFIG_SAMPLE_RUST_CONFIGFS)		+= rust_configfs.o
+obj-$(CONFIG_SAMPLE_RUST_ALLOCATOR)		+= rust_allocator.o
 
 rust_print-y := rust_print_main.o rust_print_events.o
 
diff --git a/samples/rust/rust_allocator.rs b/samples/rust/rust_allocator.rs
new file mode 100644
index 000000000000..13d23cc9d682
--- /dev/null
+++ b/samples/rust/rust_allocator.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (c) 2025, Kylin Software
+
+//! Rust allocator sample.
+
+use core::{alloc::Layout, ptr::NonNull};
+use kernel::alloc::allocator;
+use kernel::alloc::Allocator;
+use kernel::bindings;
+use kernel::prelude::*;
+
+module! {
+    type: RustAllocator,
+    name: "rust_allocator",
+    authors: ["Rust for Linux Contributors"],
+    description: "Rust allocator sample",
+    license: "GPL",
+}
+
+const VMALLOC_ARG: [(usize, usize); 2] = [
+    (bindings::PAGE_SIZE * 4, bindings::PAGE_SIZE * 2),
+    (1024, 128),
+];
+
+struct RustAllocator {
+    vmalloc_vec: KVec<(usize, Layout)>,
+}
+
+fn vmalloc_align(size: usize, align: usize) -> Result<(NonNull<[u8]>, Layout)> {
+    let layout = Layout::from_size_align(size, align).map_err(|_| EINVAL)?;
+
+    Ok((
+        <allocator::Vmalloc as Allocator>::alloc(layout, GFP_KERNEL).map_err(|_| EINVAL)?,
+        layout,
+    ))
+}
+
+fn vfree(addr: usize, layout: Layout) {
+    let vmalloc_ptr = NonNull::new(addr as *mut u8);
+    if let Some(ptr) = vmalloc_ptr {
+        unsafe {
+            <allocator::Vmalloc as Allocator>::free(ptr, layout);
+        }
+    } else {
+        pr_err!("Failed to vfree: pointer is null\n");
+    }
+}
+
+fn check_ptr(ptr: NonNull<[u8]>, size: usize, align: usize) -> (usize, bool) {
+    let current_size = unsafe { ptr.as_ref().len() };
+    if current_size != size {
+        pr_err!(
+            "The length to be allocated is {}, and the actually allocated memory length is {}.\n",
+            size,
+            current_size
+        );
+        return (0, false);
+    }
+
+    let addr = ptr.cast::<u8>().as_ptr() as usize;
+    debug_assert!(align.is_power_of_two());
+    if addr & (align - 1) != 0 {
+        pr_err!("Address {:#x} is not aligned with {:#x}.\n", addr, align);
+        return (0, false);
+    }
+
+    (addr, true)
+}
+
+fn clear_vmalloc_vec(v: &KVec<(usize, Layout)>) {
+    for (addr, layout) in v {
+        vfree(*addr, *layout);
+    }
+}
+
+impl kernel::Module for RustAllocator {
+    fn init(_module: &'static ThisModule) -> Result<Self> {
+        pr_info!("Rust allocator sample (init)\n");
+
+        let mut vmalloc_vec = KVec::new();
+        for (size, align) in VMALLOC_ARG {
+            let (ptr, layout) = vmalloc_align(size, align)?;
+
+            let (addr, is_ok) = check_ptr(ptr, size, align);
+            if !is_ok {
+                clear_vmalloc_vec(&vmalloc_vec);
+                return Err(EINVAL);
+            }
+
+            vmalloc_vec.push((addr, layout), GFP_KERNEL)?;
+        }
+
+        Ok(RustAllocator { vmalloc_vec })
+    }
+}
+
+impl Drop for RustAllocator {
+    fn drop(&mut self) {
+        pr_info!("Rust allocator sample (exit)\n");
+
+        clear_vmalloc_vec(&self.vmalloc_vec);
+    }
+}
-- 
2.43.0



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 0/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE
  2025-07-15  9:59 [PATCH 0/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Hui Zhu
                   ` (2 preceding siblings ...)
  2025-07-15  9:59 ` [PATCH 3/3] rust: add a sample allocator usage Hui Zhu
@ 2025-07-15 10:21 ` Danilo Krummrich
  3 siblings, 0 replies; 9+ messages in thread
From: Danilo Krummrich @ 2025-07-15 10:21 UTC (permalink / raw)
  To: Hui Zhu
  Cc: Andrew Morton, Uladzislau Rezki, Miguel Ojeda, Alex Gaynor,
	Boqun Feng, Gary Guo, bjorn3_gh, Benno Lossin, Andreas Hindborg,
	Alice Ryhl, Trevor Gross, Geliang Tang, Hui Zhu, linux-kernel,
	linux-mm, rust-for-linux

Hi Hui,

On Tue Jul 15, 2025 at 11:59 AM CEST, Hui Zhu wrote:
> From: Hui Zhu <zhuhui@kylinos.cn>
>
> There is a TODO in Vmalloc::realloc "Support alignments larger than
> PAGE_SIZE."
>
> These commits make allocator vmalloc support alignments larger than
> PAGE_SIZE.
> The function vrealloc_align is added to vmalloc.c to support reallocating
> aligned vmap pages.
> When Vmalloc::realloc intends to reallocate memory aligned beyond PAGE_SIZE,
> vrealloc_align should be used instead of vrealloc, thus enabling support
> for alignments larger than PAGE_SIZE.

Thanks for the patch!

Please note that there is already a rather progressed patch series addressing
this [1].

However, I'd be interested in your use-case for alignments larger than
PAGE_SIZE. :)

> And add a sample to the samples memory allocator usage.

The example looks a bit odd, more on that in the corresponding patch.

We already have other examples in the form of documentation tests [2]. Not all
of them are for VVec, some of them are for KVec and KVVec, however, they all
share the same code, only the allocator backend differs.

If you'd like to introduce additional examples, please do so in the context of
documentation tests in rust/kernel/alloc/kvec.rs.

If you enable CONFIG_RUST_KERNEL_DOCTESTS they're compiled and executed at boot
time.

[1] https://lore.kernel.org/lkml/20250709172345.1031907-1-vitaly.wool@konsulko.se/
[2] https://rust.docs.kernel.org/kernel/alloc/kvec/type.VVec.html


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/3] rust: add a sample allocator usage
  2025-07-15  9:59 ` [PATCH 3/3] rust: add a sample allocator usage Hui Zhu
@ 2025-07-15 10:37   ` Danilo Krummrich
  2025-07-17 10:02     ` Your Name
  0 siblings, 1 reply; 9+ messages in thread
From: Danilo Krummrich @ 2025-07-15 10:37 UTC (permalink / raw)
  To: Hui Zhu
  Cc: Andrew Morton, Uladzislau Rezki, Miguel Ojeda, Alex Gaynor,
	Boqun Feng, Gary Guo, bjorn3_gh, Benno Lossin, Andreas Hindborg,
	Alice Ryhl, Trevor Gross, Geliang Tang, Hui Zhu, linux-kernel,
	linux-mm, rust-for-linux

On Tue Jul 15, 2025 at 11:59 AM CEST, Hui Zhu wrote:
> +impl kernel::Module for RustAllocator {
> +    fn init(_module: &'static ThisModule) -> Result<Self> {
> +        pr_info!("Rust allocator sample (init)\n");
> +
> +        let mut vmalloc_vec = KVec::new();
> +        for (size, align) in VMALLOC_ARG {
> +            let (ptr, layout) = vmalloc_align(size, align)?;

Ok, I think I get the idea, you want to demonstrate how to use the Allocator
trait for raw memory allocations.

However, doing so is discouraged unless there's really no other way. One obvious
example are Rust's own memory allocation primitives, such as Box and Vec.

So, instead of this raw allocation, you can just use VBox::new() or
VBox::new_uninit() in the following way.

	[repr(align(ALIGN))]
	struct Blob([u8; SIZE]);

	// Creates a vmalloc allocation of size `SIZE` with an alignment of
	// `ALIGN`. The allocation is freed once `b` is dropped.
	let b = VBox::<Blob>::new_uninit(GFP_KERNEL)?;

This way you don't have to handle the layout and the Allocator type yourself and
you also don't have to care about explicitly calling vfree(), VBox does all this
for you.

> +
> +            let (addr, is_ok) = check_ptr(ptr, size, align);
> +            if !is_ok {
> +                clear_vmalloc_vec(&vmalloc_vec);
> +                return Err(EINVAL);
> +            }
> +
> +            vmalloc_vec.push((addr, layout), GFP_KERNEL)?;
> +        }
> +
> +        Ok(RustAllocator { vmalloc_vec })
> +    }
> +}


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages
  2025-07-15  9:59 ` [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages Hui Zhu
@ 2025-07-15 23:19   ` kernel test robot
  2025-07-16  7:02   ` Uladzislau Rezki
  1 sibling, 0 replies; 9+ messages in thread
From: kernel test robot @ 2025-07-15 23:19 UTC (permalink / raw)
  To: Hui Zhu, Andrew Morton, Uladzislau Rezki, Miguel Ojeda,
	Alex Gaynor, Boqun Feng, Gary Guo, bjorn3_gh, Benno Lossin,
	Andreas Hindborg, Alice Ryhl, Trevor Gross, Danilo Krummrich,
	Geliang Tang, Hui Zhu, linux-kernel, rust-for-linux
  Cc: llvm, oe-kbuild-all, Linux Memory Management List

Hi Hui,

kernel test robot noticed the following build warnings:

[auto build test WARNING on rust/rust-next]
[also build test WARNING on akpm-mm/mm-everything rust/alloc-next linus/master v6.16-rc6 next-20250715]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Hui-Zhu/vmalloc-Add-vrealloc_align-to-support-allocation-of-aligned-vmap-pages/20250715-180136
base:   https://github.com/Rust-for-Linux/linux rust-next
patch link:    https://lore.kernel.org/r/81647cce3b8e7139af47f20dbeba184b7a89b0cc.1752573305.git.zhuhui%40kylinos.cn
patch subject: [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages
config: i386-buildonly-randconfig-002-20250716 (https://download.01.org/0day-ci/archive/20250716/202507160708.jArplInK-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250716/202507160708.jArplInK-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202507160708.jArplInK-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> mm/vmalloc.c:4124:9: warning: format specifies type 'long' but the argument has type 'size_t' (aka 'unsigned int') [-Wformat]
    4123 |                         WARN(1, "Trying to vrealloc_align() align is not power of 2 (%ld)\n",
         |                                                                                      ~~~
         |                                                                                      %zu
    4124 |                              align);
         |                              ^~~~~
   include/asm-generic/bug.h:134:29: note: expanded from macro 'WARN'
     134 |                 __WARN_printf(TAINT_WARN, format);                      \
         |                                           ^~~~~~
   include/asm-generic/bug.h:106:17: note: expanded from macro '__WARN_printf'
     106 |                 __warn_printk(arg);                                     \
         |                               ^~~
   mm/vmalloc.c:1987:20: warning: unused function 'setup_vmalloc_vm' [-Wunused-function]
    1987 | static inline void setup_vmalloc_vm(struct vm_struct *vm,
         |                    ^~~~~~~~~~~~~~~~
   2 warnings generated.


vim +4124 mm/vmalloc.c

  4082	
  4083	/**
  4084	 * vrealloc_align - reallocate virtually contiguous memory;
  4085	 *                  contents remain unchanged
  4086	 * @p: object to reallocate memory for
  4087	 * @size: the size to reallocate
  4088	 * @align: requested alignment
  4089	 * @flags: the flags for the page level allocator
  4090	 *
  4091	 * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
  4092	 * @p is not a %NULL pointer, the object pointed to is freed.
  4093	 *
  4094	 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
  4095	 * initial memory allocation, every subsequent call to this API for the same
  4096	 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
  4097	 * __GFP_ZERO is not fully honored by this API.
  4098	 *
  4099	 * In any case, the contents of the object pointed to are preserved up to the
  4100	 * lesser of the new and old sizes.
  4101	 *
  4102	 * This function must not be called concurrently with itself or vfree() for the
  4103	 * same memory allocation.
  4104	 *
  4105	 * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
  4106	 *         failure
  4107	 */
  4108	void *vrealloc_align_noprof(const void *p, size_t size, size_t align,
  4109				    gfp_t flags)
  4110	{
  4111		struct vm_struct *vm = NULL;
  4112		size_t alloced_size = 0;
  4113		size_t old_size = 0;
  4114		void *n;
  4115	
  4116		if (!size) {
  4117			vfree(p);
  4118			return NULL;
  4119		}
  4120	
  4121		if (p) {
  4122			if (!is_power_of_2(align)) {
  4123				WARN(1, "Trying to vrealloc_align() align is not power of 2 (%ld)\n",
> 4124				     align);
  4125				return NULL;
  4126			}
  4127	
  4128			vm = find_vm_area(p);
  4129			if (unlikely(!vm)) {
  4130				WARN(1, "Trying to vrealloc_align() nonexistent vm area (%p)\n", p);
  4131				return NULL;
  4132			}
  4133	
  4134			alloced_size = get_vm_area_size(vm);
  4135			old_size = vm->requested_size;
  4136			if (WARN(alloced_size < old_size,
  4137				 "vrealloc_align() has mismatched area vs requested sizes (%p)\n", p))
  4138				return NULL;
  4139		}
  4140	
  4141		if (IS_ALIGNED((unsigned long)p, align)) {
  4142			/*
  4143			 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
  4144			 * would be a good heuristic for when to shrink the vm_area?
  4145			 */
  4146			if (size <= old_size) {
  4147				/* Zero out "freed" memory, potentially for future realloc. */
  4148				if (want_init_on_free() || want_init_on_alloc(flags))
  4149					memset((void *)p + size, 0, old_size - size);
  4150				vm->requested_size = size;
  4151				kasan_poison_vmalloc(p + size, old_size - size);
  4152				return (void *)p;
  4153			}
  4154	
  4155			/*
  4156			 * We already have the bytes available in the allocation; use them.
  4157			 */
  4158			if (size <= alloced_size) {
  4159				kasan_unpoison_vmalloc(p + old_size, size - old_size,
  4160						KASAN_VMALLOC_PROT_NORMAL);
  4161				/*
  4162				 * No need to zero memory here, as unused memory will have
  4163				 * already been zeroed at initial allocation time or during
  4164				 * realloc shrink time.
  4165				 */
  4166				vm->requested_size = size;
  4167				return (void *)p;
  4168			}
  4169		} else {
  4170			/*
  4171			 * p is not aligned with align.
  4172			 * Allocate a new address to handle it.
  4173			 */
  4174			if (size < old_size)
  4175				old_size = size;
  4176		}
  4177	
  4178		/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
  4179		n = __vmalloc_node_noprof(size, align, flags, NUMA_NO_NODE,
  4180					  __builtin_return_address(0));
  4181		if (!n)
  4182			return NULL;
  4183	
  4184		if (p) {
  4185			memcpy(n, p, old_size);
  4186			vfree(p);
  4187		}
  4188	
  4189		return n;
  4190	}
  4191	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages
  2025-07-15  9:59 ` [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages Hui Zhu
  2025-07-15 23:19   ` kernel test robot
@ 2025-07-16  7:02   ` Uladzislau Rezki
  1 sibling, 0 replies; 9+ messages in thread
From: Uladzislau Rezki @ 2025-07-16  7:02 UTC (permalink / raw)
  To: Hui Zhu
  Cc: Andrew Morton, Uladzislau Rezki, Miguel Ojeda, Alex Gaynor,
	Boqun Feng, Gary Guo, bjorn3_gh, Benno Lossin, Andreas Hindborg,
	Alice Ryhl, Trevor Gross, Danilo Krummrich, Geliang Tang, Hui Zhu,
	linux-kernel, linux-mm, rust-for-linux

On Tue, Jul 15, 2025 at 05:59:46PM +0800, Hui Zhu wrote:
> From: Hui Zhu <zhuhui@kylinos.cn>
> 
> This commit add new function vrealloc_align.
> vrealloc_align support allocation of aligned vmap pages with
> __vmalloc_node_noprof.
> And vrealloc_align will check the old address. If this address does
> not meet the current alignment requirements, it will also release
> the old vmap pages and reallocate new vmap pages that satisfy the
> alignment requirements.
> 
> Co-developed-by: Geliang Tang <geliang@kernel.org>
> Signed-off-by: Geliang Tang <geliang@kernel.org>
> Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
> ---
>  include/linux/vmalloc.h |  5 +++
>  mm/vmalloc.c            | 80 ++++++++++++++++++++++++++---------------
>  2 files changed, 57 insertions(+), 28 deletions(-)
> 
> diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
> index fdc9aeb74a44..0ce0c1ea2427 100644
> --- a/include/linux/vmalloc.h
> +++ b/include/linux/vmalloc.h
> @@ -201,6 +201,11 @@ void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
>  		__realloc_size(2);
>  #define vrealloc(...)		alloc_hooks(vrealloc_noprof(__VA_ARGS__))
>  
> +void * __must_check vrealloc_align_noprof(const void *p, size_t size,
> +					  size_t align, gfp_t flags)
> +		__realloc_size(2);
> +#define vrealloc_align(...)	alloc_hooks(vrealloc_align_noprof(__VA_ARGS__))
> +
>  extern void vfree(const void *addr);
>  extern void vfree_atomic(const void *addr);
>  
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index ab986dd09b6a..41cb3603b3cc 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -4081,9 +4081,11 @@ void *vzalloc_node_noprof(unsigned long size, int node)
>  EXPORT_SYMBOL(vzalloc_node_noprof);
>  
>  /**
> - * vrealloc - reallocate virtually contiguous memory; contents remain unchanged
> + * vrealloc_align - reallocate virtually contiguous memory;
> + *                  contents remain unchanged
>   * @p: object to reallocate memory for
>   * @size: the size to reallocate
> + * @align: requested alignment
>   * @flags: the flags for the page level allocator
>   *
>   * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
> @@ -4103,7 +4105,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
>   * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
>   *         failure
>   */
> -void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
> +void *vrealloc_align_noprof(const void *p, size_t size, size_t align,
> +			    gfp_t flags)
>  {
>  	struct vm_struct *vm = NULL;
>  	size_t alloced_size = 0;
> @@ -4116,49 +4119,65 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
>  	}
>  
>  	if (p) {
> +		if (!is_power_of_2(align)) {
> +			WARN(1, "Trying to vrealloc_align() align is not power of 2 (%ld)\n",
> +			     align);
> +			return NULL;
> +		}
> +
>  		vm = find_vm_area(p);
>  		if (unlikely(!vm)) {
> -			WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
> +			WARN(1, "Trying to vrealloc_align() nonexistent vm area (%p)\n", p);
>  			return NULL;
>  		}
>  
>  		alloced_size = get_vm_area_size(vm);
>  		old_size = vm->requested_size;
>  		if (WARN(alloced_size < old_size,
> -			 "vrealloc() has mismatched area vs requested sizes (%p)\n", p))
> +			 "vrealloc_align() has mismatched area vs requested sizes (%p)\n", p))
>  			return NULL;
>  	}
>  
> -	/*
> -	 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
> -	 * would be a good heuristic for when to shrink the vm_area?
> -	 */
> -	if (size <= old_size) {
> -		/* Zero out "freed" memory, potentially for future realloc. */
> -		if (want_init_on_free() || want_init_on_alloc(flags))
> -			memset((void *)p + size, 0, old_size - size);
> -		vm->requested_size = size;
> -		kasan_poison_vmalloc(p + size, old_size - size);
> -		return (void *)p;
> -	}
> +	if (IS_ALIGNED((unsigned long)p, align)) {
> +		/*
> +		 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
> +		 * would be a good heuristic for when to shrink the vm_area?
> +		 */
> +		if (size <= old_size) {
> +			/* Zero out "freed" memory, potentially for future realloc. */
> +			if (want_init_on_free() || want_init_on_alloc(flags))
> +				memset((void *)p + size, 0, old_size - size);
> +			vm->requested_size = size;
> +			kasan_poison_vmalloc(p + size, old_size - size);
> +			return (void *)p;
> +		}
>  
> -	/*
> -	 * We already have the bytes available in the allocation; use them.
> -	 */
> -	if (size <= alloced_size) {
> -		kasan_unpoison_vmalloc(p + old_size, size - old_size,
> -				       KASAN_VMALLOC_PROT_NORMAL);
>  		/*
> -		 * No need to zero memory here, as unused memory will have
> -		 * already been zeroed at initial allocation time or during
> -		 * realloc shrink time.
> +		 * We already have the bytes available in the allocation; use them.
> +		 */
> +		if (size <= alloced_size) {
> +			kasan_unpoison_vmalloc(p + old_size, size - old_size,
> +					KASAN_VMALLOC_PROT_NORMAL);
> +			/*
> +			 * No need to zero memory here, as unused memory will have
> +			 * already been zeroed at initial allocation time or during
> +			 * realloc shrink time.
> +			 */
> +			vm->requested_size = size;
> +			return (void *)p;
> +		}
> +	} else {
> +		/*
> +		 * p is not aligned with align.
> +		 * Allocate a new address to handle it.
>  		 */
> -		vm->requested_size = size;
> -		return (void *)p;
> +		if (size < old_size)
> +			old_size = size;
>  	}
>  
>  	/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
> -	n = __vmalloc_noprof(size, flags);
> +	n = __vmalloc_node_noprof(size, align, flags, NUMA_NO_NODE,
> +				  __builtin_return_address(0));
>  	if (!n)
>  		return NULL;
>  
> @@ -4170,6 +4189,11 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
>  	return n;
>  }
>  
> +void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
> +{
> +	return vrealloc_align_noprof(p, size, 1, flags);
> +}
> +
>  #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
>  #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
>  #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
> -- 
> 2.43.0
> 
This is similar what Vitaly is doing. There is already v14
but as example see it here: https://lkml.org/lkml/2025/7/9/1583

--
Uladzislau Rezki


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/3] rust: add a sample allocator usage
  2025-07-15 10:37   ` Danilo Krummrich
@ 2025-07-17 10:02     ` Your Name
  0 siblings, 0 replies; 9+ messages in thread
From: Your Name @ 2025-07-17 10:02 UTC (permalink / raw)
  To: Danilo Krummrich
  Cc: Andrew Morton, Uladzislau Rezki, Miguel Ojeda, Alex Gaynor,
	Boqun Feng, Gary Guo, bjorn3_gh, Benno Lossin, Andreas Hindborg,
	Alice Ryhl, Trevor Gross, Geliang Tang, Hui Zhu, linux-kernel,
	linux-mm, rust-for-linux

Hi Danilo,

Thanks for your help.

On Tue, Jul 15, 2025 at 12:37:52PM +0200, Danilo Krummrich wrote:
> On Tue Jul 15, 2025 at 11:59 AM CEST, Hui Zhu wrote:
> > +impl kernel::Module for RustAllocator {
> > +    fn init(_module: &'static ThisModule) -> Result<Self> {
> > +        pr_info!("Rust allocator sample (init)\n");
> > +
> > +        let mut vmalloc_vec = KVec::new();
> > +        for (size, align) in VMALLOC_ARG {
> > +            let (ptr, layout) = vmalloc_align(size, align)?;
> 
> Ok, I think I get the idea, you want to demonstrate how to use the Allocator
> trait for raw memory allocations.
> 
> However, doing so is discouraged unless there's really no other way. One obvious
> example are Rust's own memory allocation primitives, such as Box and Vec.
> 
> So, instead of this raw allocation, you can just use VBox::new() or
> VBox::new_uninit() in the following way.
> 
> 	[repr(align(ALIGN))]
> 	struct Blob([u8; SIZE]);
> 
> 	// Creates a vmalloc allocation of size `SIZE` with an alignment of
> 	// `ALIGN`. The allocation is freed once `b` is dropped.
> 	let b = VBox::<Blob>::new_uninit(GFP_KERNEL)?;
> 
> This way you don't have to handle the layout and the Allocator type yourself and
> you also don't have to care about explicitly calling vfree(), VBox does all this
> for you.
> 
> > +
> > +            let (addr, is_ok) = check_ptr(ptr, size, align);
> > +            if !is_ok {
> > +                clear_vmalloc_vec(&vmalloc_vec);
> > +                return Err(EINVAL);
> > +            }
> > +
> > +            vmalloc_vec.push((addr, layout), GFP_KERNEL)?;
> > +        }
> > +
> > +        Ok(RustAllocator { vmalloc_vec })
> > +    }
> > +}
>

I sent version v2.
It only included the sample code and updated to use VBox according to your comments.

Best,
Hui


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2025-07-17 10:03 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-07-15  9:59 [PATCH 0/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Hui Zhu
2025-07-15  9:59 ` [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages Hui Zhu
2025-07-15 23:19   ` kernel test robot
2025-07-16  7:02   ` Uladzislau Rezki
2025-07-15  9:59 ` [PATCH 2/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Hui Zhu
2025-07-15  9:59 ` [PATCH 3/3] rust: add a sample allocator usage Hui Zhu
2025-07-15 10:37   ` Danilo Krummrich
2025-07-17 10:02     ` Your Name
2025-07-15 10:21 ` [PATCH 0/3] rust: allocator: Vmalloc: Support alignments larger than PAGE_SIZE Danilo Krummrich

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).