* [PATCH bpf v4 1/3] bpf: fix mm lifecycle in open-coded task_vma iterator
2026-03-16 18:57 [PATCH bpf v4 0/3] bpf: fix and improve open-coded task_vma iterator Puranjay Mohan
@ 2026-03-16 18:57 ` Puranjay Mohan
2026-03-24 19:38 ` Alexei Starovoitov
2026-03-16 18:57 ` [PATCH bpf v4 2/3] bpf: switch task_vma iterator from mmap_lock to per-VMA locks Puranjay Mohan
2026-03-16 18:57 ` [PATCH bpf v4 3/3] bpf: return VMA snapshot from task_vma iterator Puranjay Mohan
2 siblings, 1 reply; 6+ messages in thread
From: Puranjay Mohan @ 2026-03-16 18:57 UTC (permalink / raw)
To: bpf
Cc: Puranjay Mohan, Puranjay Mohan, Alexei Starovoitov,
Andrii Nakryiko, Daniel Borkmann, Martin KaFai Lau,
Eduard Zingerman, Kumar Kartikeya Dwivedi, Mykyta Yatsenko,
kernel-team
The open-coded task_vma iterator reads task->mm and acquires
mmap_read_trylock() but never calls mmget(). The mm can reach
mm_users == 0 if the task exits while the iterator holds the lock.
Add mmget_not_zero() before mmap_read_trylock(). Drop the mm reference
via mmput_async() in _destroy() and the error path.
Reject irqs-disabled contexts (including NMI) up front. Operations used
by _next() and _destroy() (mmap_read_unlock, mmput_async) take
spinlocks with IRQs disabled (pool->lock, pi_lock). Running from NMI
or from a tracepoint that fires with those locks held could deadlock.
Widen the mmput_async() #if guard to include CONFIG_BPF_SYSCALL,
following the same approach used for CONFIG_FUTEX_PRIVATE_HASH.
Fixes: 4ac454682158 ("bpf: Introduce task_vma open-coded iterator kfuncs")
Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
---
include/linux/sched/mm.h | 2 +-
kernel/bpf/task_iter.c | 31 ++++++++++++++++++++++++++++---
kernel/fork.c | 2 +-
3 files changed, 30 insertions(+), 5 deletions(-)
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 95d0040df584..5908de0c2f82 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -140,7 +140,7 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
-#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH)
+#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH) || defined(CONFIG_BPF_SYSCALL)
/* same as above but performs the slow path from the async context. Can
* be called from the atomic context as well
*/
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index 98d9b4c0daff..718f0f9b6396 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -9,6 +9,7 @@
#include <linux/bpf_mem_alloc.h>
#include <linux/btf_ids.h>
#include <linux/mm_types.h>
+#include <linux/sched/mm.h>
#include "mmap_unlock_work.h"
static const char * const iter_task_type_names[] = {
@@ -825,6 +826,18 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
BUILD_BUG_ON(sizeof(struct bpf_iter_task_vma_kern) != sizeof(struct bpf_iter_task_vma));
BUILD_BUG_ON(__alignof__(struct bpf_iter_task_vma_kern) != __alignof__(struct bpf_iter_task_vma));
+ /*
+ * Reject irqs-disabled contexts including NMI. Operations used
+ * by _next() and _destroy() (mmap_read_unlock, mmput_async)
+ * can take spinlocks with IRQs disabled (pi_lock, pool->lock).
+ * Running from NMI or from a tracepoint that fires with those
+ * locks held could deadlock.
+ */
+ if (irqs_disabled()) {
+ kit->data = NULL;
+ return -EBUSY;
+ }
+
/* is_iter_reg_valid_uninit guarantees that kit hasn't been initialized
* before, so non-NULL kit->data doesn't point to previously
* bpf_mem_alloc'd bpf_iter_task_vma_kern_data
@@ -842,17 +855,28 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
/* kit->data->work == NULL is valid after bpf_mmap_unlock_get_irq_work */
irq_work_busy = bpf_mmap_unlock_get_irq_work(&kit->data->work);
- if (irq_work_busy || !mmap_read_trylock(kit->data->mm)) {
+ if (irq_work_busy) {
err = -EBUSY;
goto err_cleanup_iter;
}
+ if (!mmget_not_zero(kit->data->mm)) {
+ err = -ENOENT;
+ goto err_cleanup_iter;
+ }
+
+ if (!mmap_read_trylock(kit->data->mm)) {
+ err = -EBUSY;
+ goto err_cleanup_mmget;
+ }
+
vma_iter_init(&kit->data->vmi, kit->data->mm, addr);
return 0;
+err_cleanup_mmget:
+ mmput_async(kit->data->mm);
err_cleanup_iter:
- if (kit->data->task)
- put_task_struct(kit->data->task);
+ put_task_struct(kit->data->task);
bpf_mem_free(&bpf_global_ma, kit->data);
/* NULL kit->data signals failed bpf_iter_task_vma initialization */
kit->data = NULL;
@@ -875,6 +899,7 @@ __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
if (kit->data) {
bpf_mmap_unlock_mm(kit->data->work, kit->data->mm);
put_task_struct(kit->data->task);
+ mmput_async(kit->data->mm);
bpf_mem_free(&bpf_global_ma, kit->data);
}
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 65113a304518..d0411a63d4ab 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1198,7 +1198,7 @@ void mmput(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(mmput);
-#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH)
+#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH) || defined(CONFIG_BPF_SYSCALL)
static void mmput_async_fn(struct work_struct *work)
{
struct mm_struct *mm = container_of(work, struct mm_struct,
--
2.52.0
^ permalink raw reply related [flat|nested] 6+ messages in thread* Re: [PATCH bpf v4 1/3] bpf: fix mm lifecycle in open-coded task_vma iterator
2026-03-16 18:57 ` [PATCH bpf v4 1/3] bpf: fix mm lifecycle in " Puranjay Mohan
@ 2026-03-24 19:38 ` Alexei Starovoitov
2026-03-24 20:47 ` Puranjay Mohan
0 siblings, 1 reply; 6+ messages in thread
From: Alexei Starovoitov @ 2026-03-24 19:38 UTC (permalink / raw)
To: Puranjay Mohan
Cc: bpf, Puranjay Mohan, Alexei Starovoitov, Andrii Nakryiko,
Daniel Borkmann, Martin KaFai Lau, Eduard Zingerman,
Kumar Kartikeya Dwivedi, Mykyta Yatsenko, Kernel Team
On Mon, Mar 16, 2026 at 11:57 AM Puranjay Mohan <puranjay@kernel.org> wrote:
>
> The open-coded task_vma iterator reads task->mm and acquires
> mmap_read_trylock() but never calls mmget(). The mm can reach
> mm_users == 0 if the task exits while the iterator holds the lock.
>
> Add mmget_not_zero() before mmap_read_trylock(). Drop the mm reference
> via mmput_async() in _destroy() and the error path.
>
> Reject irqs-disabled contexts (including NMI) up front. Operations used
> by _next() and _destroy() (mmap_read_unlock, mmput_async) take
> spinlocks with IRQs disabled (pool->lock, pi_lock). Running from NMI
> or from a tracepoint that fires with those locks held could deadlock.
>
> Widen the mmput_async() #if guard to include CONFIG_BPF_SYSCALL,
> following the same approach used for CONFIG_FUTEX_PRIVATE_HASH.
...
> diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
> index 95d0040df584..5908de0c2f82 100644
> --- a/include/linux/sched/mm.h
> +++ b/include/linux/sched/mm.h
> @@ -140,7 +140,7 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
>
> /* mmput gets rid of the mappings and all user-space */
> extern void mmput(struct mm_struct *);
> -#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH)
> +#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH) || defined(CONFIG_BPF_SYSCALL)
why? Is this some kind of odd config with bpf and without mmu?
Is it even possible?
These hunks make it impossible to land without broader review.
pw-bot: cr
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH bpf v4 1/3] bpf: fix mm lifecycle in open-coded task_vma iterator
2026-03-24 19:38 ` Alexei Starovoitov
@ 2026-03-24 20:47 ` Puranjay Mohan
0 siblings, 0 replies; 6+ messages in thread
From: Puranjay Mohan @ 2026-03-24 20:47 UTC (permalink / raw)
To: Alexei Starovoitov
Cc: bpf, Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann,
Martin KaFai Lau, Eduard Zingerman, Kumar Kartikeya Dwivedi,
Mykyta Yatsenko, Kernel Team
On Tue, Mar 24, 2026 at 7:38 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Mon, Mar 16, 2026 at 11:57 AM Puranjay Mohan <puranjay@kernel.org> wrote:
> >
> > The open-coded task_vma iterator reads task->mm and acquires
> > mmap_read_trylock() but never calls mmget(). The mm can reach
> > mm_users == 0 if the task exits while the iterator holds the lock.
> >
> > Add mmget_not_zero() before mmap_read_trylock(). Drop the mm reference
> > via mmput_async() in _destroy() and the error path.
> >
> > Reject irqs-disabled contexts (including NMI) up front. Operations used
> > by _next() and _destroy() (mmap_read_unlock, mmput_async) take
> > spinlocks with IRQs disabled (pool->lock, pi_lock). Running from NMI
> > or from a tracepoint that fires with those locks held could deadlock.
> >
> > Widen the mmput_async() #if guard to include CONFIG_BPF_SYSCALL,
> > following the same approach used for CONFIG_FUTEX_PRIVATE_HASH.
>
> ...
>
> > diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
> > index 95d0040df584..5908de0c2f82 100644
> > --- a/include/linux/sched/mm.h
> > +++ b/include/linux/sched/mm.h
> > @@ -140,7 +140,7 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
> >
> > /* mmput gets rid of the mappings and all user-space */
> > extern void mmput(struct mm_struct *);
> > -#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH)
> > +#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH) || defined(CONFIG_BPF_SYSCALL)
>
> why? Is this some kind of odd config with bpf and without mmu?
> Is it even possible?
>
> These hunks make it impossible to land without broader review.
It was found by the kernel test robot in:
https://lore.kernel.org/all/202603051628.H3HNsDUG-lkp@intel.com/
CONFIG_BPF_SYSCALL can exist without CONFIG_MMU, bpf_iter.o is
compiled only based on CONFIG_BPF_SYSCALL
When mmput_async() was originally introduced it was only used by
oom_reaper so they guarded it with CONFIG_MMU in
https://github.com/torvalds/linux/commit/7ef949d77f95f0d129f0d404b336459a34a00101
but now we need it in task_vma iterator that can exist without
CONFIG_MMU so I followed the same approach used for
CONFIG_FUTEX_PRIVATE_HASH as in
https://github.com/torvalds/linux/commit/56180dd20c19e5b0fa34822997a9ac66b517e7b3
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH bpf v4 2/3] bpf: switch task_vma iterator from mmap_lock to per-VMA locks
2026-03-16 18:57 [PATCH bpf v4 0/3] bpf: fix and improve open-coded task_vma iterator Puranjay Mohan
2026-03-16 18:57 ` [PATCH bpf v4 1/3] bpf: fix mm lifecycle in " Puranjay Mohan
@ 2026-03-16 18:57 ` Puranjay Mohan
2026-03-16 18:57 ` [PATCH bpf v4 3/3] bpf: return VMA snapshot from task_vma iterator Puranjay Mohan
2 siblings, 0 replies; 6+ messages in thread
From: Puranjay Mohan @ 2026-03-16 18:57 UTC (permalink / raw)
To: bpf
Cc: Puranjay Mohan, Puranjay Mohan, Alexei Starovoitov,
Andrii Nakryiko, Daniel Borkmann, Martin KaFai Lau,
Eduard Zingerman, Kumar Kartikeya Dwivedi, Mykyta Yatsenko,
kernel-team
The open-coded task_vma iterator holds mmap_lock for the entire duration
of iteration, increasing contention on this highly contended lock.
Switch to per-VMA locking. Find the next VMA via an RCU-protected maple
tree walk and lock it with lock_vma_under_rcu(). lock_next_vma() is not
used because its fallback takes mmap_read_lock(), and the iterator must
work in non-sleepable contexts.
lock_vma_under_rcu() is a point lookup (mas_walk) that finds the VMA
containing a given address but cannot iterate across gaps. An
RCU-protected vma_next() walk (mas_find) first locates the next VMA's
vm_start to pass to lock_vma_under_rcu().
Between the RCU walk and the lock, the VMA may be removed, shrunk, or
write-locked. On failure, advance past it using vm_end from the RCU
walk. Because the VMA slab is SLAB_TYPESAFE_BY_RCU, vm_end may be
stale; fall back to PAGE_SIZE advancement when it does not make forward
progress. Concurrent VMA insertions at addresses already passed by the
iterator are not detected.
CONFIG_PER_VMA_LOCK is required; return -EOPNOTSUPP without it.
Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
---
kernel/bpf/task_iter.c | 98 +++++++++++++++++++++++++++++++++---------
1 file changed, 77 insertions(+), 21 deletions(-)
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index 718f0f9b6396..ddaf1cf0ecae 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -9,6 +9,7 @@
#include <linux/bpf_mem_alloc.h>
#include <linux/btf_ids.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/sched/mm.h>
#include "mmap_unlock_work.h"
@@ -798,8 +799,8 @@ const struct bpf_func_proto bpf_find_vma_proto = {
struct bpf_iter_task_vma_kern_data {
struct task_struct *task;
struct mm_struct *mm;
- struct mmap_unlock_irq_work *work;
- struct vma_iterator vmi;
+ struct vm_area_struct *locked_vma;
+ u64 next_addr;
};
struct bpf_iter_task_vma {
@@ -820,15 +821,19 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
struct task_struct *task, u64 addr)
{
struct bpf_iter_task_vma_kern *kit = (void *)it;
- bool irq_work_busy = false;
int err;
BUILD_BUG_ON(sizeof(struct bpf_iter_task_vma_kern) != sizeof(struct bpf_iter_task_vma));
BUILD_BUG_ON(__alignof__(struct bpf_iter_task_vma_kern) != __alignof__(struct bpf_iter_task_vma));
+ if (!IS_ENABLED(CONFIG_PER_VMA_LOCK)) {
+ kit->data = NULL;
+ return -EOPNOTSUPP;
+ }
+
/*
* Reject irqs-disabled contexts including NMI. Operations used
- * by _next() and _destroy() (mmap_read_unlock, mmput_async)
+ * by _next() and _destroy() (vma_end_read, mmput_async)
* can take spinlocks with IRQs disabled (pi_lock, pool->lock).
* Running from NMI or from a tracepoint that fires with those
* locks held could deadlock.
@@ -853,28 +858,15 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
goto err_cleanup_iter;
}
- /* kit->data->work == NULL is valid after bpf_mmap_unlock_get_irq_work */
- irq_work_busy = bpf_mmap_unlock_get_irq_work(&kit->data->work);
- if (irq_work_busy) {
- err = -EBUSY;
- goto err_cleanup_iter;
- }
-
if (!mmget_not_zero(kit->data->mm)) {
err = -ENOENT;
goto err_cleanup_iter;
}
- if (!mmap_read_trylock(kit->data->mm)) {
- err = -EBUSY;
- goto err_cleanup_mmget;
- }
-
- vma_iter_init(&kit->data->vmi, kit->data->mm, addr);
+ kit->data->locked_vma = NULL;
+ kit->data->next_addr = addr;
return 0;
-err_cleanup_mmget:
- mmput_async(kit->data->mm);
err_cleanup_iter:
put_task_struct(kit->data->task);
bpf_mem_free(&bpf_global_ma, kit->data);
@@ -883,13 +875,76 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
return err;
}
+/*
+ * Find and lock the next VMA at or after data->next_addr.
+ *
+ * lock_vma_under_rcu() is a point lookup (mas_walk): it finds the VMA
+ * containing a given address but cannot iterate. An RCU-protected
+ * maple tree walk with vma_next() (mas_find) is needed first to locate
+ * the next VMA's vm_start across any gap.
+ *
+ * Between the RCU walk and the lock, the VMA may be removed, shrunk,
+ * or write-locked. On failure, advance past it using vm_end from the
+ * RCU walk. SLAB_TYPESAFE_BY_RCU can make vm_end stale, so fall back
+ * to PAGE_SIZE advancement to guarantee forward progress.
+ */
+static struct vm_area_struct *
+bpf_iter_task_vma_find_next(struct bpf_iter_task_vma_kern_data *data)
+{
+ struct vm_area_struct *vma;
+ struct vma_iterator vmi;
+ unsigned long start, end;
+
+retry:
+ rcu_read_lock();
+ vma_iter_init(&vmi, data->mm, data->next_addr);
+ vma = vma_next(&vmi);
+ if (!vma) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ start = vma->vm_start;
+ end = vma->vm_end;
+ rcu_read_unlock();
+
+ vma = lock_vma_under_rcu(data->mm, start);
+ if (!vma) {
+ if (end > data->next_addr)
+ data->next_addr = end;
+ else
+ data->next_addr += PAGE_SIZE;
+ goto retry;
+ }
+
+ if (unlikely(data->next_addr >= vma->vm_end)) {
+ data->next_addr += PAGE_SIZE;
+ vma_end_read(vma);
+ goto retry;
+ }
+
+ return vma;
+}
+
__bpf_kfunc struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it)
{
struct bpf_iter_task_vma_kern *kit = (void *)it;
+ struct vm_area_struct *vma;
if (!kit->data) /* bpf_iter_task_vma_new failed */
return NULL;
- return vma_next(&kit->data->vmi);
+
+ if (kit->data->locked_vma) {
+ vma_end_read(kit->data->locked_vma);
+ kit->data->locked_vma = NULL;
+ }
+
+ vma = bpf_iter_task_vma_find_next(kit->data);
+ if (!vma)
+ return NULL;
+
+ kit->data->locked_vma = vma;
+ kit->data->next_addr = vma->vm_end;
+ return vma;
}
__bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
@@ -897,7 +952,8 @@ __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
struct bpf_iter_task_vma_kern *kit = (void *)it;
if (kit->data) {
- bpf_mmap_unlock_mm(kit->data->work, kit->data->mm);
+ if (kit->data->locked_vma)
+ vma_end_read(kit->data->locked_vma);
put_task_struct(kit->data->task);
mmput_async(kit->data->mm);
bpf_mem_free(&bpf_global_ma, kit->data);
--
2.52.0
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH bpf v4 3/3] bpf: return VMA snapshot from task_vma iterator
2026-03-16 18:57 [PATCH bpf v4 0/3] bpf: fix and improve open-coded task_vma iterator Puranjay Mohan
2026-03-16 18:57 ` [PATCH bpf v4 1/3] bpf: fix mm lifecycle in " Puranjay Mohan
2026-03-16 18:57 ` [PATCH bpf v4 2/3] bpf: switch task_vma iterator from mmap_lock to per-VMA locks Puranjay Mohan
@ 2026-03-16 18:57 ` Puranjay Mohan
2 siblings, 0 replies; 6+ messages in thread
From: Puranjay Mohan @ 2026-03-16 18:57 UTC (permalink / raw)
To: bpf
Cc: Puranjay Mohan, Puranjay Mohan, Alexei Starovoitov,
Andrii Nakryiko, Daniel Borkmann, Martin KaFai Lau,
Eduard Zingerman, Kumar Kartikeya Dwivedi, Mykyta Yatsenko,
kernel-team
Holding the per-VMA lock across the BPF program body creates a lock
ordering problem when helpers acquire locks that depend on mmap_lock:
vm_lock -> i_rwsem -> mmap_lock -> vm_lock
Snapshot the VMA under the per-VMA lock in _next() via memcpy(), then
drop the lock before returning. The BPF program accesses only the
snapshot.
The verifier only trusts vm_mm and vm_file pointers (see
BTF_TYPE_SAFE_TRUSTED_OR_NULL in verifier.c). vm_file is reference-
counted with get_file() under the lock and released via fput() on the
next iteration or in _destroy(). vm_mm is already correct because
lock_vma_under_rcu() verifies vma->vm_mm == mm. All other pointers
are left as-is by memcpy() since the verifier treats them as untrusted.
Fixes: 4ac454682158 ("bpf: Introduce task_vma open-coded iterator kfuncs")
Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
---
kernel/bpf/task_iter.c | 42 ++++++++++++++++++++++++++++++------------
1 file changed, 30 insertions(+), 12 deletions(-)
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index ddaf1cf0ecae..5795c5ba2a71 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -799,7 +799,7 @@ const struct bpf_func_proto bpf_find_vma_proto = {
struct bpf_iter_task_vma_kern_data {
struct task_struct *task;
struct mm_struct *mm;
- struct vm_area_struct *locked_vma;
+ struct vm_area_struct snapshot;
u64 next_addr;
};
@@ -833,7 +833,7 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
/*
* Reject irqs-disabled contexts including NMI. Operations used
- * by _next() and _destroy() (vma_end_read, mmput_async)
+ * by _next() and _destroy() (vma_end_read, fput, mmput_async)
* can take spinlocks with IRQs disabled (pi_lock, pool->lock).
* Running from NMI or from a tracepoint that fires with those
* locks held could deadlock.
@@ -863,7 +863,7 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
goto err_cleanup_iter;
}
- kit->data->locked_vma = NULL;
+ kit->data->snapshot.vm_file = NULL;
kit->data->next_addr = addr;
return 0;
@@ -925,26 +925,45 @@ bpf_iter_task_vma_find_next(struct bpf_iter_task_vma_kern_data *data)
return vma;
}
+static void bpf_iter_task_vma_snapshot_reset(struct vm_area_struct *snap)
+{
+ if (snap->vm_file) {
+ fput(snap->vm_file);
+ snap->vm_file = NULL;
+ }
+}
+
__bpf_kfunc struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it)
{
struct bpf_iter_task_vma_kern *kit = (void *)it;
- struct vm_area_struct *vma;
+ struct vm_area_struct *snap, *vma;
if (!kit->data) /* bpf_iter_task_vma_new failed */
return NULL;
- if (kit->data->locked_vma) {
- vma_end_read(kit->data->locked_vma);
- kit->data->locked_vma = NULL;
- }
+ snap = &kit->data->snapshot;
+
+ bpf_iter_task_vma_snapshot_reset(snap);
vma = bpf_iter_task_vma_find_next(kit->data);
if (!vma)
return NULL;
- kit->data->locked_vma = vma;
+ memcpy(snap, vma, sizeof(*snap));
+
+ /*
+ * The verifier only trusts vm_mm and vm_file (see
+ * BTF_TYPE_SAFE_TRUSTED_OR_NULL in verifier.c). Take a reference
+ * on vm_file; vm_mm is already correct because lock_vma_under_rcu()
+ * verifies vma->vm_mm == mm. All other pointers are untrusted by
+ * the verifier and left as-is.
+ */
+ if (snap->vm_file)
+ get_file(snap->vm_file);
+
kit->data->next_addr = vma->vm_end;
- return vma;
+ vma_end_read(vma);
+ return snap;
}
__bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
@@ -952,8 +971,7 @@ __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
struct bpf_iter_task_vma_kern *kit = (void *)it;
if (kit->data) {
- if (kit->data->locked_vma)
- vma_end_read(kit->data->locked_vma);
+ bpf_iter_task_vma_snapshot_reset(&kit->data->snapshot);
put_task_struct(kit->data->task);
mmput_async(kit->data->mm);
bpf_mem_free(&bpf_global_ma, kit->data);
--
2.52.0
^ permalink raw reply related [flat|nested] 6+ messages in thread