From: "Lorenzo Stoakes (Oracle)" <ljs@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: David Hildenbrand <david@kernel.org>,
"Liam R . Howlett" <Liam.Howlett@oracle.com>,
Vlastimil Babka <vbabka@kernel.org>, Jann Horn <jannh@google.com>,
Pedro Falcato <pfalcato@suse.de>, Mike Rapoport <rppt@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Kees Cook <kees@kernel.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Vineet Gupta <vgupta@kernel.org>,
Russell King <linux@armlinux.org.uk>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>, Brian Cain <bcain@kernel.org>,
Huacai Chen <chenhuacai@kernel.org>,
WANG Xuerui <kernel@xen0n.name>,
Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
Dinh Nguyen <dinguyen@kernel.org>,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Michael Ellerman <mpe@ellerman.id.au>,
Nicholas Piggin <npiggin@gmail.com>,
Christophe Leroy <chleroy@kernel.org>,
Paul Walmsley <pjw@kernel.org>,
Palmer Dabbelt <palmer@dabbelt.com>,
Albert Ou <aou@eecs.berkeley.edu>,
Alexandre Ghiti <alex@ghiti.fr>,
Heiko Carstens <hca@linux.ibm.com>,
Vasily Gorbik <gor@linux.ibm.com>,
Alexander Gordeev <agordeev@linux.ibm.com>,
Christian Borntraeger <borntraeger@linux.ibm.com>,
Sven Schnelle <svens@linux.ibm.com>,
Thomas Gleixner <tglx@kernel.org>, Ingo Molnar <mingo@redhat.com>,
Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, "H . Peter Anvin" <hpa@zytor.com>,
Richard Weinberger <richard@nod.at>,
Anton Ivanov <anton.ivanov@cambridgegreys.com>,
Johannes Berg <johannes@sipsolutions.net>,
Alexander Viro <viro@zeniv.linux.org.uk>,
Christian Brauner <brauner@kernel.org>, Jan Kara <jack@suse.cz>,
Xu Xin <xu.xin16@zte.com.cn>,
Chengming Zhou <chengming.zhou@linux.dev>,
Michal Hocko <mhocko@suse.com>, Paul Moore <paul@paul-moore.com>,
Stephen Smalley <stephen.smalley.work@gmail.com>,
Ondrej Mosnacek <omosnace@redhat.com>,
linux-snps-arc@lists.infradead.org,
linux-arm-kernel@lists.infradead.org,
linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
linux-um@lists.infradead.org, linux-fsdevel@vger.kernel.org,
selinux@vger.kernel.org
Subject: [PATCH v4 21/25] mm/vma: convert as much as we can in mm/vma.c to vma_flags_t
Date: Fri, 20 Mar 2026 19:38:38 +0000 [thread overview]
Message-ID: <5fdeaf8af9a12c2a5d68497495f52fa627d05a5b.1774034900.git.ljs@kernel.org> (raw)
In-Reply-To: <cover.1774034900.git.ljs@kernel.org>
Now we have established a good foundation for vm_flags_t to vma_flags_t
changes, update mm/vma.c to utilise vma_flags_t wherever possible.
We are able to convert VM_STARTGAP_FLAGS entirely as this is only used in
mm/vma.c, and to account for the fact we can't use VM_NONE to make life
easier, place the definition of this within existing #ifdef's to be
cleaner.
Generally the remaining changes are mechanical.
Also update the VMA tests to reflect the changes.
Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
---
include/linux/mm.h | 6 ++-
mm/vma.c | 89 +++++++++++++++++--------------
tools/testing/vma/include/dup.h | 4 ++
tools/testing/vma/include/stubs.h | 2 +-
4 files changed, 59 insertions(+), 42 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d3585999aa0b..86a236443364 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -463,8 +463,10 @@ enum {
#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) || \
defined(CONFIG_RISCV_USER_CFI)
#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
+#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT)
#else
#define VM_SHADOW_STACK VM_NONE
+#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT)
#endif
#if defined(CONFIG_PPC64)
#define VM_SAO INIT_VM_FLAG(SAO)
@@ -539,8 +541,6 @@ enum {
/* Temporary until VMA flags conversion complete. */
#define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
-#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
-
#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
#define VM_SEALED_SYSMAP VM_SEALED
#else
@@ -584,6 +584,8 @@ enum {
/* This mask represents all the VMA flag bits used by mlock */
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
+#define VMA_LOCKED_MASK mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT)
+
/* These flags can be updated atomically via VMA/mmap read lock. */
#define VM_ATOMIC_SET_ALLOWED VM_MAYBE_GUARD
diff --git a/mm/vma.c b/mm/vma.c
index 9362860389ae..9d194f8e7acb 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -185,7 +185,7 @@ static void init_multi_vma_prep(struct vma_prepare *vp,
}
/*
- * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
+ * Return true if we can merge this (vma_flags,anon_vma,file,vm_pgoff)
* in front of (at a lower virtual address and file offset than) the vma.
*
* We cannot merge two vmas if they have differently assigned (non-NULL)
@@ -211,7 +211,7 @@ static bool can_vma_merge_before(struct vma_merge_struct *vmg)
}
/*
- * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
+ * Return true if we can merge this (vma_flags,anon_vma,file,vm_pgoff)
* beyond (at a higher virtual address and file offset than) the vma.
*
* We cannot merge two vmas if they have differently assigned (non-NULL)
@@ -850,7 +850,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
* furthermost left or right side of the VMA, then we have no chance of
* merging and should abort.
*/
- if (vmg->vm_flags & VM_SPECIAL || (!left_side && !right_side))
+ if (vma_flags_test_any_mask(&vmg->vma_flags, VMA_SPECIAL_FLAGS) ||
+ (!left_side && !right_side))
return NULL;
if (left_side)
@@ -1072,7 +1073,8 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
vmg->state = VMA_MERGE_NOMERGE;
/* Special VMAs are unmergeable, also if no prev/next. */
- if ((vmg->vm_flags & VM_SPECIAL) || (!prev && !next))
+ if (vma_flags_test_any_mask(&vmg->vma_flags, VMA_SPECIAL_FLAGS) ||
+ (!prev && !next))
return NULL;
can_merge_left = can_vma_merge_left(vmg);
@@ -1459,17 +1461,17 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
nrpages = vma_pages(next);
vms->nr_pages += nrpages;
- if (next->vm_flags & VM_LOCKED)
+ if (vma_test(next, VMA_LOCKED_BIT))
vms->locked_vm += nrpages;
- if (next->vm_flags & VM_ACCOUNT)
+ if (vma_test(next, VMA_ACCOUNT_BIT))
vms->nr_accounted += nrpages;
if (is_exec_mapping(next->vm_flags))
vms->exec_vm += nrpages;
else if (is_stack_mapping(next->vm_flags))
vms->stack_vm += nrpages;
- else if (is_data_mapping(next->vm_flags))
+ else if (is_data_mapping_vma_flags(&next->flags))
vms->data_vm += nrpages;
if (vms->uf) {
@@ -2065,14 +2067,13 @@ static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
static bool vma_is_shared_writable(struct vm_area_struct *vma)
{
- return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
- (VM_WRITE | VM_SHARED);
+ return vma_test_all(vma, VMA_WRITE_BIT, VMA_SHARED_BIT);
}
static bool vma_fs_can_writeback(struct vm_area_struct *vma)
{
/* No managed pages to writeback. */
- if (vma->vm_flags & VM_PFNMAP)
+ if (vma_test(vma, VMA_PFNMAP_BIT))
return false;
return vma->vm_file && vma->vm_file->f_mapping &&
@@ -2338,8 +2339,11 @@ void mm_drop_all_locks(struct mm_struct *mm)
* We account for memory if it's a private writeable mapping,
* not hugepages and VM_NORESERVE wasn't set.
*/
-static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
+static bool accountable_mapping(struct mmap_state *map)
{
+ const struct file *file = map->file;
+ vma_flags_t mask;
+
/*
* hugetlb has its own accounting separate from the core VM
* VM_HUGETLB may not be set yet so we cannot check for that flag.
@@ -2347,7 +2351,9 @@ static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
if (file && is_file_hugepages(file))
return false;
- return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
+ mask = vma_flags_and(&map->vma_flags, VMA_NORESERVE_BIT, VMA_SHARED_BIT,
+ VMA_WRITE_BIT);
+ return vma_flags_same(&mask, VMA_WRITE_BIT);
}
/*
@@ -2450,7 +2456,7 @@ static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc,
return -ENOMEM;
/* Private writable mapping: check memory availability. */
- if (accountable_mapping(map->file, map->vm_flags)) {
+ if (accountable_mapping(map)) {
map->charged = map->pglen;
map->charged -= vms->nr_accounted;
if (map->charged) {
@@ -2460,7 +2466,7 @@ static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc,
}
vms->nr_accounted = 0;
- map->vm_flags |= VM_ACCOUNT;
+ vma_flags_set(&map->vma_flags, VMA_ACCOUNT_BIT);
}
/*
@@ -2508,12 +2514,12 @@ static int __mmap_new_file_vma(struct mmap_state *map,
* Drivers should not permit writability when previously it was
* disallowed.
*/
- VM_WARN_ON_ONCE(map->vm_flags != vma->vm_flags &&
- !(map->vm_flags & VM_MAYWRITE) &&
- (vma->vm_flags & VM_MAYWRITE));
+ VM_WARN_ON_ONCE(!vma_flags_same_pair(&map->vma_flags, &vma->flags) &&
+ !vma_flags_test(&map->vma_flags, VMA_MAYWRITE_BIT) &&
+ vma_test(vma, VMA_MAYWRITE_BIT));
map->file = vma->vm_file;
- map->vm_flags = vma->vm_flags;
+ map->vma_flags = vma->flags;
return 0;
}
@@ -2544,7 +2550,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
vma_iter_config(vmi, map->addr, map->end);
vma_set_range(vma, map->addr, map->end, map->pgoff);
- vm_flags_init(vma, map->vm_flags);
+ vma->flags = map->vma_flags;
vma->vm_page_prot = map->page_prot;
if (vma_iter_prealloc(vmi, vma)) {
@@ -2554,7 +2560,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
if (map->file)
error = __mmap_new_file_vma(map, vma);
- else if (map->vm_flags & VM_SHARED)
+ else if (vma_flags_test(&map->vma_flags, VMA_SHARED_BIT))
error = shmem_zero_setup(vma);
else
vma_set_anonymous(vma);
@@ -2564,7 +2570,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
if (!map->check_ksm_early) {
update_ksm_flags(map);
- vm_flags_init(vma, map->vm_flags);
+ vma->flags = map->vma_flags;
}
#ifdef CONFIG_SPARC64
@@ -2604,7 +2610,6 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
{
struct mm_struct *mm = map->mm;
- vm_flags_t vm_flags = vma->vm_flags;
perf_event_mmap(vma);
@@ -2612,9 +2617,9 @@ static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
vm_stat_account(mm, vma->vm_flags, map->pglen);
- if (vm_flags & VM_LOCKED) {
+ if (vma_test(vma, VMA_LOCKED_BIT)) {
if (!vma_supports_mlock(vma))
- vm_flags_clear(vma, VM_LOCKED_MASK);
+ vma_clear_flags_mask(vma, VMA_LOCKED_MASK);
else
mm->locked_vm += map->pglen;
}
@@ -2630,7 +2635,7 @@ static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
* a completely new data area).
*/
if (pgtable_supports_soft_dirty())
- vm_flags_set(vma, VM_SOFTDIRTY);
+ vma_set_flags(vma, VMA_SOFTDIRTY_BIT);
vma_set_page_prot(vma);
}
@@ -2993,7 +2998,8 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
gap = vma_iter_addr(&vmi) + info->start_gap;
gap += (info->align_offset - gap) & info->align_mask;
tmp = vma_next(&vmi);
- if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
+ /* Avoid prev check if possible */
+ if (tmp && vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS)) {
if (vm_start_gap(tmp) < gap + length - 1) {
low_limit = tmp->vm_end;
vma_iter_reset(&vmi);
@@ -3045,7 +3051,8 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
gap -= (gap - info->align_offset) & info->align_mask;
gap_end = vma_iter_end(&vmi);
tmp = vma_next(&vmi);
- if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
+ /* Avoid prev check if possible */
+ if (tmp && vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS)) {
if (vm_start_gap(tmp) < gap_end) {
high_limit = vm_start_gap(tmp);
vma_iter_reset(&vmi);
@@ -3083,12 +3090,16 @@ static int acct_stack_growth(struct vm_area_struct *vma,
return -ENOMEM;
/* mlock limit tests */
- if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, grow << PAGE_SHIFT))
+ if (!mlock_future_ok(mm, vma_test(vma, VMA_LOCKED_BIT),
+ grow << PAGE_SHIFT))
return -ENOMEM;
/* Check to ensure the stack will not grow into a hugetlb-only region */
- new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
- vma->vm_end - size;
+ new_start = vma->vm_end - size;
+#ifdef CONFIG_STACK_GROWSUP
+ if (vma_test(vma, VMA_GROWSUP_BIT))
+ new_start = vma->vm_start;
+#endif
if (is_hugepage_only_range(vma->vm_mm, new_start, size))
return -EFAULT;
@@ -3102,7 +3113,7 @@ static int acct_stack_growth(struct vm_area_struct *vma,
return 0;
}
-#if defined(CONFIG_STACK_GROWSUP)
+#ifdef CONFIG_STACK_GROWSUP
/*
* PA-RISC uses this for its stack.
* vma is the last one with address > vma->vm_end. Have to extend vma.
@@ -3115,7 +3126,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
int error = 0;
VMA_ITERATOR(vmi, mm, vma->vm_start);
- if (!(vma->vm_flags & VM_GROWSUP))
+ if (!vma_test(vma, VMA_GROWSUP_BIT))
return -EFAULT;
mmap_assert_write_locked(mm);
@@ -3135,7 +3146,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
next = find_vma_intersection(mm, vma->vm_end, gap_addr);
if (next && vma_is_accessible(next)) {
- if (!(next->vm_flags & VM_GROWSUP))
+ if (!vma_test(next, VMA_GROWSUP_BIT))
return -ENOMEM;
/* Check that both stack segments have the same anon_vma? */
}
@@ -3169,7 +3180,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
- if (vma->vm_flags & VM_LOCKED)
+ if (vma_test(vma, VMA_LOCKED_BIT))
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
@@ -3200,7 +3211,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
int error = 0;
VMA_ITERATOR(vmi, mm, vma->vm_start);
- if (!(vma->vm_flags & VM_GROWSDOWN))
+ if (!vma_test(vma, VMA_GROWSDOWN_BIT))
return -EFAULT;
mmap_assert_write_locked(mm);
@@ -3213,7 +3224,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
prev = vma_prev(&vmi);
/* Check that both stack segments have the same anon_vma? */
if (prev) {
- if (!(prev->vm_flags & VM_GROWSDOWN) &&
+ if (!vma_test(prev, VMA_GROWSDOWN_BIT) &&
vma_is_accessible(prev) &&
(address - prev->vm_end < stack_guard_gap))
return -ENOMEM;
@@ -3248,7 +3259,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
if (grow <= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
- if (vma->vm_flags & VM_LOCKED)
+ if (vma_test(vma, VMA_LOCKED_BIT))
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
@@ -3297,7 +3308,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
return -ENOMEM;
- if ((vma->vm_flags & VM_ACCOUNT) &&
+ if (vma_test(vma, VMA_ACCOUNT_BIT) &&
security_vm_enough_memory_mm(mm, charged))
return -ENOMEM;
@@ -3319,7 +3330,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
}
if (vma_link(mm, vma)) {
- if (vma->vm_flags & VM_ACCOUNT)
+ if (vma_test(vma, VMA_ACCOUNT_BIT))
vm_unacct_memory(charged);
return -ENOMEM;
}
diff --git a/tools/testing/vma/include/dup.h b/tools/testing/vma/include/dup.h
index 93ea600d0895..58a621ec389f 100644
--- a/tools/testing/vma/include/dup.h
+++ b/tools/testing/vma/include/dup.h
@@ -267,8 +267,10 @@ enum {
#endif /* CONFIG_ARCH_HAS_PKEYS */
#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
+#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT)
#else
#define VM_SHADOW_STACK VM_NONE
+#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT)
#endif
#if defined(CONFIG_PPC64)
#define VM_SAO INIT_VM_FLAG(SAO)
@@ -366,6 +368,8 @@ enum {
/* This mask represents all the VMA flag bits used by mlock */
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
+#define VMA_LOCKED_MASK mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT)
+
#define RLIMIT_STACK 3 /* max stack size */
#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
diff --git a/tools/testing/vma/include/stubs.h b/tools/testing/vma/include/stubs.h
index b5dced3b0bd4..5afb0afe2d48 100644
--- a/tools/testing/vma/include/stubs.h
+++ b/tools/testing/vma/include/stubs.h
@@ -229,7 +229,7 @@ static inline bool signal_pending(void *p)
return false;
}
-static inline bool is_file_hugepages(struct file *file)
+static inline bool is_file_hugepages(const struct file *file)
{
return false;
}
--
2.53.0
next prev parent reply other threads:[~2026-03-20 19:39 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-20 19:38 [PATCH v4 00/25] mm/vma: convert vm_flags_t to vma_flags_t in vma code Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 01/25] mm/vma: add vma_flags_empty(), vma_flags_and(), vma_flags_diff_pair() Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 02/25] tools/testing/vma: add unit tests flag empty, diff_pair, and[_mask] Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 03/25] mm/vma: add further vma_flags_t unions Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 04/25] tools/testing/vma: convert bulk of test code to vma_flags_t Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 05/25] mm/vma: use new VMA flags for sticky flags logic Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 06/25] tools/testing/vma: fix VMA flag tests Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 07/25] mm/vma: add append_vma_flags() helper Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 08/25] tools/testing/vma: add simple test for append_vma_flags() Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 09/25] mm: unexport vm_brk_flags() and eliminate vm_flags parameter Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 10/25] mm/vma: introduce vma_flags_same[_mask/_pair]() Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 11/25] mm/vma: introduce [vma_flags,legacy]_to_[legacy,vma_flags]() helpers Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 12/25] tools/testing/vma: test that legacy flag helpers work correctly Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 13/25] mm/vma: introduce vma_test[_any[_mask]](), and make inlining consistent Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 14/25] tools/testing/vma: update VMA flag tests to test vma_test[_any_mask]() Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 15/25] mm: introduce vma_flags_count() and vma[_flags]_test_single_mask() Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 16/25] tools/testing/vma: test vma_flags_count,vma[_flags]_test_single_mask Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 17/25] mm: convert do_brk_flags() to use vma_flags_t Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 18/25] mm: update vma_supports_mlock() to use new VMA flags Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 19/25] mm/vma: introduce vma_clear_flags[_mask]() Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 20/25] tools/testing/vma: update VMA tests to test vma_clear_flags[_mask]() Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` Lorenzo Stoakes (Oracle) [this message]
2026-03-20 19:38 ` [PATCH v4 22/25] tools: bitmap: add missing bitmap_copy() implementation Lorenzo Stoakes (Oracle)
2026-03-20 19:38 ` [PATCH v4 23/25] mm/vma: convert vma_modify_flags[_uffd]() to use vma_flags_t Lorenzo Stoakes (Oracle)
2026-03-23 15:47 ` Vlastimil Babka (SUSE)
2026-03-20 19:38 ` [PATCH v4 24/25] mm/vma: convert __mmap_region() " Lorenzo Stoakes (Oracle)
2026-03-23 15:49 ` Vlastimil Babka (SUSE)
2026-03-20 19:38 ` [PATCH v4 25/25] mm: simplify VMA flag tests of excluded flags Lorenzo Stoakes (Oracle)
2026-03-23 15:51 ` Vlastimil Babka (SUSE)
2026-03-20 19:56 ` [PATCH v4 00/25] mm/vma: convert vm_flags_t to vma_flags_t in vma code Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5fdeaf8af9a12c2a5d68497495f52fa627d05a5b.1774034900.git.ljs@kernel.org \
--to=ljs@kernel.org \
--cc=Liam.Howlett@oracle.com \
--cc=agordeev@linux.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=alex@ghiti.fr \
--cc=anton.ivanov@cambridgegreys.com \
--cc=aou@eecs.berkeley.edu \
--cc=bcain@kernel.org \
--cc=borntraeger@linux.ibm.com \
--cc=bp@alien8.de \
--cc=brauner@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=chengming.zhou@linux.dev \
--cc=chenhuacai@kernel.org \
--cc=chleroy@kernel.org \
--cc=dave.hansen@linux.intel.com \
--cc=david@kernel.org \
--cc=dinguyen@kernel.org \
--cc=gor@linux.ibm.com \
--cc=hca@linux.ibm.com \
--cc=hpa@zytor.com \
--cc=jack@suse.cz \
--cc=jannh@google.com \
--cc=johannes@sipsolutions.net \
--cc=kees@kernel.org \
--cc=kernel@xen0n.name \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-hexagon@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux-s390@vger.kernel.org \
--cc=linux-snps-arc@lists.infradead.org \
--cc=linux-um@lists.infradead.org \
--cc=linux@armlinux.org.uk \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=loongarch@lists.linux.dev \
--cc=maddy@linux.ibm.com \
--cc=mhocko@suse.com \
--cc=mingo@redhat.com \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=omosnace@redhat.com \
--cc=palmer@dabbelt.com \
--cc=paul@paul-moore.com \
--cc=pfalcato@suse.de \
--cc=pjw@kernel.org \
--cc=richard@nod.at \
--cc=rppt@kernel.org \
--cc=selinux@vger.kernel.org \
--cc=stephen.smalley.work@gmail.com \
--cc=surenb@google.com \
--cc=svens@linux.ibm.com \
--cc=tglx@kernel.org \
--cc=tsbogend@alpha.franken.de \
--cc=vbabka@kernel.org \
--cc=vgupta@kernel.org \
--cc=viro@zeniv.linux.org.uk \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=xu.xin16@zte.com.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox