* [to-be-updated] userfaultfd-introduce-vm_uffd_ops.patch removed from -mm tree
@ 2026-04-02 4:23 Andrew Morton
0 siblings, 0 replies; 2+ messages in thread
From: Andrew Morton @ 2026-04-02 4:23 UTC (permalink / raw)
To: mm-commits, rppt, akpm
The quilt patch titled
Subject: userfaultfd: introduce vm_uffd_ops
has been removed from the -mm tree. Its filename was
userfaultfd-introduce-vm_uffd_ops.patch
This patch was dropped because an updated version will be issued
------------------------------------------------------
From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
Subject: userfaultfd: introduce vm_uffd_ops
Date: Mon, 30 Mar 2026 13:11:08 +0300
Current userfaultfd implementation works only with memory managed by core
MM: anonymous, shmem and hugetlb.
First, there is no fundamental reason to limit userfaultfd support only to
the core memory types and userfaults can be handled similarly to regular
page faults provided a VMA owner implements appropriate callbacks.
Second, historically various code paths were conditioned on
vma_is_anonymous(), vma_is_shmem() and is_vm_hugetlb_page() and some of
these conditions can be expressed as operations implemented by a
particular memory type.
Introduce vm_uffd_ops extension to vm_operations_struct that will delegate
memory type specific operations to a VMA owner.
Operations for anonymous memory are handled internally in userfaultfd
using anon_uffd_ops that implicitly assigned to anonymous VMAs.
Start with a single operation, ->can_userfault() that will verify that a
VMA meets requirements for userfaultfd support at registration time.
Implement that method for anonymous, shmem and hugetlb and move relevant
parts of vma_can_userfault() into the new callbacks.
Link: https://lkml.kernel.org/r/20260330101116.1117699-8-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrei Vagin <avagin@google.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand (Arm) <david@kernel.org>
Cc: Harry Yoo <harry.yoo@oracle.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nikita Kalyazin <kalyazin@amazon.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
include/linux/mm.h | 5 +++
include/linux/userfaultfd_k.h | 6 ++++
mm/hugetlb.c | 15 ++++++++++
mm/shmem.c | 15 ++++++++++
mm/userfaultfd.c | 44 ++++++++++++++++++++++----------
5 files changed, 72 insertions(+), 13 deletions(-)
--- a/include/linux/mm.h~userfaultfd-introduce-vm_uffd_ops
+++ a/include/linux/mm.h
@@ -758,6 +758,8 @@ struct vm_fault {
*/
};
+struct vm_uffd_ops;
+
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -865,6 +867,9 @@ struct vm_operations_struct {
struct page *(*find_normal_page)(struct vm_area_struct *vma,
unsigned long addr);
#endif /* CONFIG_FIND_NORMAL_PAGE */
+#ifdef CONFIG_USERFAULTFD
+ const struct vm_uffd_ops *uffd_ops;
+#endif
};
#ifdef CONFIG_NUMA_BALANCING
--- a/include/linux/userfaultfd_k.h~userfaultfd-introduce-vm_uffd_ops
+++ a/include/linux/userfaultfd_k.h
@@ -83,6 +83,12 @@ struct userfaultfd_ctx {
extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
+/* VMA userfaultfd operations */
+struct vm_uffd_ops {
+ /* Checks if a VMA can support userfaultfd */
+ bool (*can_userfault)(struct vm_area_struct *vma, vm_flags_t vm_flags);
+};
+
/* A combined operation mode + behavior flags. */
typedef unsigned int __bitwise uffd_flags_t;
--- a/mm/hugetlb.c~userfaultfd-introduce-vm_uffd_ops
+++ a/mm/hugetlb.c
@@ -4792,6 +4792,18 @@ static vm_fault_t hugetlb_vm_op_fault(st
return 0;
}
+#ifdef CONFIG_USERFAULTFD
+static bool hugetlb_can_userfault(struct vm_area_struct *vma,
+ vm_flags_t vm_flags)
+{
+ return true;
+}
+
+static const struct vm_uffd_ops hugetlb_uffd_ops = {
+ .can_userfault = hugetlb_can_userfault,
+};
+#endif
+
/*
* When a new function is introduced to vm_operations_struct and added
* to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
@@ -4805,6 +4817,9 @@ const struct vm_operations_struct hugetl
.close = hugetlb_vm_op_close,
.may_split = hugetlb_vm_op_split,
.pagesize = hugetlb_vm_op_pagesize,
+#ifdef CONFIG_USERFAULTFD
+ .uffd_ops = &hugetlb_uffd_ops,
+#endif
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
--- a/mm/shmem.c~userfaultfd-introduce-vm_uffd_ops
+++ a/mm/shmem.c
@@ -3288,6 +3288,15 @@ out_unacct_blocks:
shmem_inode_unacct_blocks(inode, 1);
return ret;
}
+
+static bool shmem_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
+{
+ return true;
+}
+
+static const struct vm_uffd_ops shmem_uffd_ops = {
+ .can_userfault = shmem_can_userfault,
+};
#endif /* CONFIG_USERFAULTFD */
#ifdef CONFIG_TMPFS
@@ -5307,6 +5316,9 @@ static const struct vm_operations_struct
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
#endif
+#ifdef CONFIG_USERFAULTFD
+ .uffd_ops = &shmem_uffd_ops,
+#endif
};
static const struct vm_operations_struct shmem_anon_vm_ops = {
@@ -5316,6 +5328,9 @@ static const struct vm_operations_struct
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
#endif
+#ifdef CONFIG_USERFAULTFD
+ .uffd_ops = &shmem_uffd_ops,
+#endif
};
int shmem_init_fs_context(struct fs_context *fc)
--- a/mm/userfaultfd.c~userfaultfd-introduce-vm_uffd_ops
+++ a/mm/userfaultfd.c
@@ -34,6 +34,25 @@ struct mfill_state {
pmd_t *pmd;
};
+static bool anon_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
+{
+ /* anonymous memory does not support MINOR mode */
+ if (vm_flags & VM_UFFD_MINOR)
+ return false;
+ return true;
+}
+
+static const struct vm_uffd_ops anon_uffd_ops = {
+ .can_userfault = anon_can_userfault,
+};
+
+static const struct vm_uffd_ops *vma_uffd_ops(struct vm_area_struct *vma)
+{
+ if (vma_is_anonymous(vma))
+ return &anon_uffd_ops;
+ return vma->vm_ops ? vma->vm_ops->uffd_ops : NULL;
+}
+
static __always_inline
bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
{
@@ -2021,34 +2040,33 @@ out:
bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags,
bool wp_async)
{
- vm_flags &= __VM_UFFD_FLAGS;
+ const struct vm_uffd_ops *ops = vma_uffd_ops(vma);
- if (vma->vm_flags & VM_DROPPABLE)
- return false;
-
- if ((vm_flags & VM_UFFD_MINOR) &&
- (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
- return false;
+ vm_flags &= __VM_UFFD_FLAGS;
/*
- * If wp async enabled, and WP is the only mode enabled, allow any
+ * If WP is the only mode enabled and context is wp async, allow any
* memory type.
*/
if (wp_async && (vm_flags == VM_UFFD_WP))
return true;
+ /* For any other mode reject VMAs that don't implement vm_uffd_ops */
+ if (!ops)
+ return false;
+
+ if (vma->vm_flags & VM_DROPPABLE)
+ return false;
+
/*
* If user requested uffd-wp but not enabled pte markers for
- * uffd-wp, then shmem & hugetlbfs are not supported but only
- * anonymous.
+ * uffd-wp, then only anonymous memory is supported
*/
if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP) &&
!vma_is_anonymous(vma))
return false;
- /* By default, allow any of anon|shmem|hugetlb */
- return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
- vma_is_shmem(vma);
+ return ops->can_userfault(vma, vm_flags);
}
static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
_
Patches currently in -mm which might be from rppt@kernel.org are
shmem-userfaultfd-use-a-vma-callback-to-handle-uffdio_continue.patch
userfaultfd-introduce-vm_uffd_ops-alloc_folio.patch
shmem-userfaultfd-implement-shmem-uffd-operations-using-vm_uffd_ops.patch
userfaultfd-mfill_atomic-remove-retry-logic.patch
^ permalink raw reply [flat|nested] 2+ messages in thread* [to-be-updated] userfaultfd-introduce-vm_uffd_ops.patch removed from -mm tree
@ 2026-03-30 19:34 Andrew Morton
0 siblings, 0 replies; 2+ messages in thread
From: Andrew Morton @ 2026-03-30 19:34 UTC (permalink / raw)
To: mm-commits, rppt, akpm
The quilt patch titled
Subject: userfaultfd: introduce vm_uffd_ops
has been removed from the -mm tree. Its filename was
userfaultfd-introduce-vm_uffd_ops.patch
This patch was dropped because an updated version will be issued
------------------------------------------------------
From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
Subject: userfaultfd: introduce vm_uffd_ops
Date: Fri, 6 Mar 2026 19:18:07 +0200
Current userfaultfd implementation works only with memory managed by core
MM: anonymous, shmem and hugetlb.
First, there is no fundamental reason to limit userfaultfd support only to
the core memory types and userfaults can be handled similarly to regular
page faults provided a VMA owner implements appropriate callbacks.
Second, historically various code paths were conditioned on
vma_is_anonymous(), vma_is_shmem() and is_vm_hugetlb_page() and some of
these conditions can be expressed as operations implemented by a
particular memory type.
Introduce vm_uffd_ops extension to vm_operations_struct that will delegate
memory type specific operations to a VMA owner.
Operations for anonymous memory are handled internally in userfaultfd
using anon_uffd_ops that implicitly assigned to anonymous VMAs.
Start with a single operation, ->can_userfault() that will verify that a
VMA meets requirements for userfaultfd support at registration time.
Implement that method for anonymous, shmem and hugetlb and move relevant
parts of vma_can_userfault() into the new callbacks.
[rppt@kernel.org: allow registration of WP_ASYNC for any VMA]
Link: https://lkml.kernel.org/r/abG5HFV8yoEHOFkh@kernel.org
Link: https://lkml.kernel.org/r/20260306171815.3160826-8-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nikita Kalyazin <kalyazin@amazon.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
include/linux/mm.h | 5 +++
include/linux/userfaultfd_k.h | 6 ++++
mm/hugetlb.c | 15 ++++++++++
mm/shmem.c | 15 ++++++++++
mm/userfaultfd.c | 44 ++++++++++++++++++++++----------
5 files changed, 72 insertions(+), 13 deletions(-)
--- a/include/linux/mm.h~userfaultfd-introduce-vm_uffd_ops
+++ a/include/linux/mm.h
@@ -758,6 +758,8 @@ struct vm_fault {
*/
};
+struct vm_uffd_ops;
+
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -865,6 +867,9 @@ struct vm_operations_struct {
struct page *(*find_normal_page)(struct vm_area_struct *vma,
unsigned long addr);
#endif /* CONFIG_FIND_NORMAL_PAGE */
+#ifdef CONFIG_USERFAULTFD
+ const struct vm_uffd_ops *uffd_ops;
+#endif
};
#ifdef CONFIG_NUMA_BALANCING
--- a/include/linux/userfaultfd_k.h~userfaultfd-introduce-vm_uffd_ops
+++ a/include/linux/userfaultfd_k.h
@@ -83,6 +83,12 @@ struct userfaultfd_ctx {
extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
+/* VMA userfaultfd operations */
+struct vm_uffd_ops {
+ /* Checks if a VMA can support userfaultfd */
+ bool (*can_userfault)(struct vm_area_struct *vma, vm_flags_t vm_flags);
+};
+
/* A combined operation mode + behavior flags. */
typedef unsigned int __bitwise uffd_flags_t;
--- a/mm/hugetlb.c~userfaultfd-introduce-vm_uffd_ops
+++ a/mm/hugetlb.c
@@ -4792,6 +4792,18 @@ static vm_fault_t hugetlb_vm_op_fault(st
return 0;
}
+#ifdef CONFIG_USERFAULTFD
+static bool hugetlb_can_userfault(struct vm_area_struct *vma,
+ vm_flags_t vm_flags)
+{
+ return true;
+}
+
+static const struct vm_uffd_ops hugetlb_uffd_ops = {
+ .can_userfault = hugetlb_can_userfault,
+};
+#endif
+
/*
* When a new function is introduced to vm_operations_struct and added
* to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
@@ -4805,6 +4817,9 @@ const struct vm_operations_struct hugetl
.close = hugetlb_vm_op_close,
.may_split = hugetlb_vm_op_split,
.pagesize = hugetlb_vm_op_pagesize,
+#ifdef CONFIG_USERFAULTFD
+ .uffd_ops = &hugetlb_uffd_ops,
+#endif
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
--- a/mm/shmem.c~userfaultfd-introduce-vm_uffd_ops
+++ a/mm/shmem.c
@@ -3288,6 +3288,15 @@ out_unacct_blocks:
shmem_inode_unacct_blocks(inode, 1);
return ret;
}
+
+static bool shmem_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
+{
+ return true;
+}
+
+static const struct vm_uffd_ops shmem_uffd_ops = {
+ .can_userfault = shmem_can_userfault,
+};
#endif /* CONFIG_USERFAULTFD */
#ifdef CONFIG_TMPFS
@@ -5307,6 +5316,9 @@ static const struct vm_operations_struct
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
#endif
+#ifdef CONFIG_USERFAULTFD
+ .uffd_ops = &shmem_uffd_ops,
+#endif
};
static const struct vm_operations_struct shmem_anon_vm_ops = {
@@ -5316,6 +5328,9 @@ static const struct vm_operations_struct
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
#endif
+#ifdef CONFIG_USERFAULTFD
+ .uffd_ops = &shmem_uffd_ops,
+#endif
};
int shmem_init_fs_context(struct fs_context *fc)
--- a/mm/userfaultfd.c~userfaultfd-introduce-vm_uffd_ops
+++ a/mm/userfaultfd.c
@@ -34,6 +34,25 @@ struct mfill_state {
pmd_t *pmd;
};
+static bool anon_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
+{
+ /* anonymous memory does not support MINOR mode */
+ if (vm_flags & VM_UFFD_MINOR)
+ return false;
+ return true;
+}
+
+static const struct vm_uffd_ops anon_uffd_ops = {
+ .can_userfault = anon_can_userfault,
+};
+
+static const struct vm_uffd_ops *vma_uffd_ops(struct vm_area_struct *vma)
+{
+ if (vma_is_anonymous(vma))
+ return &anon_uffd_ops;
+ return vma->vm_ops ? vma->vm_ops->uffd_ops : NULL;
+}
+
static __always_inline
bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
{
@@ -2021,34 +2040,33 @@ out:
bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags,
bool wp_async)
{
- vm_flags &= __VM_UFFD_FLAGS;
+ const struct vm_uffd_ops *ops = vma_uffd_ops(vma);
- if (vma->vm_flags & VM_DROPPABLE)
- return false;
-
- if ((vm_flags & VM_UFFD_MINOR) &&
- (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
- return false;
+ vm_flags &= __VM_UFFD_FLAGS;
/*
- * If wp async enabled, and WP is the only mode enabled, allow any
+ * If WP is the only mode enabled and context is wp async, allow any
* memory type.
*/
if (wp_async && (vm_flags == VM_UFFD_WP))
return true;
+ /* For any other mode reject VMAs that don't implement vm_uffd_ops */
+ if (!ops)
+ return false;
+
+ if (vma->vm_flags & VM_DROPPABLE)
+ return false;
+
/*
* If user requested uffd-wp but not enabled pte markers for
- * uffd-wp, then shmem & hugetlbfs are not supported but only
- * anonymous.
+ * uffd-wp, then only anonymous memory is supported
*/
if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP) &&
!vma_is_anonymous(vma))
return false;
- /* By default, allow any of anon|shmem|hugetlb */
- return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
- vma_is_shmem(vma);
+ return ops->can_userfault(vma, vm_flags);
}
static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
_
Patches currently in -mm which might be from rppt@kernel.org are
shmem-userfaultfd-use-a-vma-callback-to-handle-uffdio_continue.patch
userfaultfd-introduce-vm_uffd_ops-alloc_folio.patch
shmem-userfaultfd-implement-shmem-uffd-operations-using-vm_uffd_ops.patch
userfaultfd-mfill_atomic-remove-retry-logic.patch
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2026-04-02 4:23 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-02 4:23 [to-be-updated] userfaultfd-introduce-vm_uffd_ops.patch removed from -mm tree Andrew Morton
-- strict thread matches above, loose matches on Subject: below --
2026-03-30 19:34 Andrew Morton
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox