From: Alistair Popple <apopple@nvidia.com>
To: linux-mm@kvack.org
Cc: zenghui.yu@linux.dev, Liam.Howlett@oracle.com,
akpm@linux-foundation.org, david@kernel.org, jgg@ziepe.ca,
leon@kernel.org, linux-kernel@vger.kernel.org, ljs@kernel.org,
mhocko@suse.com, rppt@kernel.org, surenb@google.com,
vbabka@kernel.org, dri-devel@lists.freedesktop.org,
balbirs@nvidia.com, Alistair Popple <apopple@nvidia.com>
Subject: [PATCH 1/3] lib: test_hmm: evict device pages on file close to avoid use-after-free
Date: Tue, 31 Mar 2026 17:34:43 +1100 [thread overview]
Message-ID: <20260331063445.3551404-2-apopple@nvidia.com> (raw)
In-Reply-To: <20260331063445.3551404-1-apopple@nvidia.com>
When dmirror_fops_release() is called it frees the dmirror struct but
doesn't migrate device private pages back to system memory first. This
leaves those pages with a dangling zone_device_data pointer to the freed
dmirror.
If a subsequent fault occurs on those pages (eg. during coredump) the
dmirror_devmem_fault() callback dereferences the stale pointer causing a
kernel panic. This was reported [1] when running mm/ksft_hmm.sh on
arm64, where a test failure triggered SIGABRT and the resulting coredump
walked the VMAs faulting in the stale device private pages.
Fix this by calling dmirror_device_evict_chunk() for each devmem chunk
in dmirror_fops_release() to migrate all device private pages back to
system memory before freeing the dmirror struct. The function is moved
earlier in the file to avoid a forward declaration.
Fixes: b2ef9f5a5cb3 ("mm/hmm/test: add selftest driver for HMM")
Reported-by: Zenghui Yu <zenghui.yu@linux.dev>
Closes: https://lore.kernel.org/linux-mm/8bd0396a-8997-4d2e-a13f-5aac033083d7@linux.dev/
Signed-off-by: Alistair Popple <apopple@nvidia.com>
---
Note that I wasn't able to replicate the exact crash in [1] although I
replicated something similar. So I haven't been able to verify this
fixes the crash conclusively, but it should.
[1] https://lore.kernel.org/linux-mm/8bd0396a-8997-4d2e-a13f-5aac033083d7@linux.dev/
---
lib/test_hmm.c | 112 +++++++++++++++++++++++++++----------------------
1 file changed, 62 insertions(+), 50 deletions(-)
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 0964d53365e6..79fe7d233df1 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -185,11 +185,73 @@ static int dmirror_fops_open(struct inode *inode, struct file *filp)
return 0;
}
+static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+{
+ unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
+ unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
+ unsigned long npages = end_pfn - start_pfn + 1;
+ unsigned long i;
+ unsigned long *src_pfns;
+ unsigned long *dst_pfns;
+ unsigned int order = 0;
+
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+
+ migrate_device_range(src_pfns, start_pfn, npages);
+ for (i = 0; i < npages; i++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage)))
+ continue;
+
+ order = folio_order(page_folio(spage));
+ spage = BACKING_PAGE(spage);
+ if (src_pfns[i] & MIGRATE_PFN_COMPOUND) {
+ dpage = folio_page(folio_alloc(GFP_HIGHUSER_MOVABLE,
+ order), 0);
+ } else {
+ dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
+ order = 0;
+ }
+
+ /* TODO Support splitting here */
+ lock_page(dpage);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ if (src_pfns[i] & MIGRATE_PFN_WRITE)
+ dst_pfns[i] |= MIGRATE_PFN_WRITE;
+ if (order)
+ dst_pfns[i] |= MIGRATE_PFN_COMPOUND;
+ folio_copy(page_folio(dpage), page_folio(spage));
+ }
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
+}
+
static int dmirror_fops_release(struct inode *inode, struct file *filp)
{
struct dmirror *dmirror = filp->private_data;
+ struct dmirror_device *mdevice = dmirror->mdevice;
+ int i;
mmu_interval_notifier_remove(&dmirror->notifier);
+
+ if (mdevice->devmem_chunks) {
+ for (i = 0; i < mdevice->devmem_count; i++) {
+ struct dmirror_chunk *devmem =
+ mdevice->devmem_chunks[i];
+
+ dmirror_device_evict_chunk(devmem);
+ }
+ }
+
xa_destroy(&dmirror->pt);
kfree(dmirror);
return 0;
@@ -1377,56 +1439,6 @@ static int dmirror_snapshot(struct dmirror *dmirror,
return ret;
}
-static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
-{
- unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
- unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
- unsigned long npages = end_pfn - start_pfn + 1;
- unsigned long i;
- unsigned long *src_pfns;
- unsigned long *dst_pfns;
- unsigned int order = 0;
-
- src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
- dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
-
- migrate_device_range(src_pfns, start_pfn, npages);
- for (i = 0; i < npages; i++) {
- struct page *dpage, *spage;
-
- spage = migrate_pfn_to_page(src_pfns[i]);
- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- if (WARN_ON(!is_device_private_page(spage) &&
- !is_device_coherent_page(spage)))
- continue;
-
- order = folio_order(page_folio(spage));
- spage = BACKING_PAGE(spage);
- if (src_pfns[i] & MIGRATE_PFN_COMPOUND) {
- dpage = folio_page(folio_alloc(GFP_HIGHUSER_MOVABLE,
- order), 0);
- } else {
- dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
- order = 0;
- }
-
- /* TODO Support splitting here */
- lock_page(dpage);
- dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
- if (src_pfns[i] & MIGRATE_PFN_WRITE)
- dst_pfns[i] |= MIGRATE_PFN_WRITE;
- if (order)
- dst_pfns[i] |= MIGRATE_PFN_COMPOUND;
- folio_copy(page_folio(dpage), page_folio(spage));
- }
- migrate_device_pages(src_pfns, dst_pfns, npages);
- migrate_device_finalize(src_pfns, dst_pfns, npages);
- kvfree(src_pfns);
- kvfree(dst_pfns);
-}
-
/* Removes free pages from the free list so they can't be re-allocated */
static void dmirror_remove_free_pages(struct dmirror_chunk *devmem)
{
--
2.53.0
next prev parent reply other threads:[~2026-03-31 6:35 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-31 6:34 [PATCH 0/3] Minor hmm_test fixes and cleanups Alistair Popple
2026-03-31 6:34 ` Alistair Popple [this message]
2026-03-31 8:47 ` [PATCH 1/3] lib: test_hmm: evict device pages on file close to avoid use-after-free Balbir Singh
2026-04-05 4:35 ` Zenghui Yu
2026-04-05 4:47 ` Zenghui Yu
2026-03-31 6:34 ` [PATCH 2/3] selftests/mm: hmm-tests: don't hardcode THP size to 2MB Alistair Popple
2026-03-31 8:51 ` Balbir Singh
2026-04-01 5:19 ` Matthew Brost
2026-04-01 23:01 ` Matthew Brost
2026-04-02 6:32 ` Sayali Patil
2026-03-31 6:34 ` [PATCH 3/3] lib: test_hmm: Implement a device release method Alistair Popple
2026-03-31 8:53 ` Balbir Singh
2026-04-05 4:47 ` Zenghui Yu
2026-04-01 0:33 ` [PATCH 0/3] Minor hmm_test fixes and cleanups Andrew Morton
2026-04-01 1:20 ` Alistair Popple
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260331063445.3551404-2-apopple@nvidia.com \
--to=apopple@nvidia.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=balbirs@nvidia.com \
--cc=david@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=jgg@ziepe.ca \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ljs@kernel.org \
--cc=mhocko@suse.com \
--cc=rppt@kernel.org \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
--cc=zenghui.yu@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox