From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: himal.prasad.ghimiray@intel.com, apopple@nvidia.com,
airlied@gmail.com, thomas.hellstrom@linux.intel.com,
simona.vetter@ffwll.ch, felix.kuehling@amd.com, dakr@kernel.org
Subject: [PATCH v5 03/32] mm/migrate: Trylock device page in do_swap_page
Date: Wed, 12 Feb 2025 18:10:43 -0800 [thread overview]
Message-ID: <20250213021112.1228481-4-matthew.brost@intel.com> (raw)
In-Reply-To: <20250213021112.1228481-1-matthew.brost@intel.com>
Avoid multiple CPU page faults to the same device page racing by trying
to lock the page in do_swap_page before taking an extra reference to the
page. This prevents scenarios where multiple CPU page faults each take
an extra reference to a device page, which could abort migration in
folio_migrate_mapping. With the device page being locked in
do_swap_page, the migrate_vma_* functions need to be updated to avoid
locking the fault_page argument.
Prior to this change, a livelock scenario could occur in Xe's (Intel GPU
DRM driver) SVM implementation if enough threads faulted the same device
page.
v3:
- Put page after unlocking page (Alistair)
- Warn on spliting a TPH which is fault page (Alistair)
- Warn on dst page == fault page (Alistair)
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Philip Yang <Philip.Yang@amd.com>
Cc: Felix Kuehling <felix.kuehling@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Suggested-by: Simona Vetter <simona.vetter@ffwll.ch>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
mm/memory.c | 13 ++++++---
mm/migrate_device.c | 64 ++++++++++++++++++++++++++++++++-------------
2 files changed, 55 insertions(+), 22 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 539c0f7c6d54..1e010c5d67bc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4337,10 +4337,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* Get a page reference while we know the page can't be
* freed.
*/
- get_page(vmf->page);
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
- put_page(vmf->page);
+ if (trylock_page(vmf->page)) {
+ get_page(vmf->page);
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
+ unlock_page(vmf->page);
+ put_page(vmf->page);
+ } else {
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ }
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
} else if (is_pte_marker_entry(entry)) {
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 19960743f927..3470357d9bae 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -60,6 +60,8 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
struct mm_walk *walk)
{
struct migrate_vma *migrate = walk->private;
+ struct folio *fault_folio = migrate->fault_page ?
+ page_folio(migrate->fault_page) : NULL;
struct vm_area_struct *vma = walk->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start, unmapped = 0;
@@ -88,11 +90,16 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
folio_get(folio);
spin_unlock(ptl);
+ /* FIXME support THP */
+ if (WARN_ON_ONCE(fault_folio == folio))
+ return migrate_vma_collect_skip(start, end,
+ walk);
if (unlikely(!folio_trylock(folio)))
return migrate_vma_collect_skip(start, end,
walk);
ret = split_folio(folio);
- folio_unlock(folio);
+ if (fault_folio != folio)
+ folio_unlock(folio);
folio_put(folio);
if (ret)
return migrate_vma_collect_skip(start, end,
@@ -192,7 +199,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
* optimisation to avoid walking the rmap later with
* try_to_migrate().
*/
- if (folio_trylock(folio)) {
+ if (fault_folio == folio || folio_trylock(folio)) {
bool anon_exclusive;
pte_t swp_pte;
@@ -204,7 +211,8 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
if (folio_try_share_anon_rmap_pte(folio, page)) {
set_pte_at(mm, addr, ptep, pte);
- folio_unlock(folio);
+ if (fault_folio != folio)
+ folio_unlock(folio);
folio_put(folio);
mpfn = 0;
goto next;
@@ -363,6 +371,8 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
unsigned long npages,
struct page *fault_page)
{
+ struct folio *fault_folio = fault_page ?
+ page_folio(fault_page) : NULL;
unsigned long i, restore = 0;
bool allow_drain = true;
unsigned long unmapped = 0;
@@ -427,7 +437,8 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
remove_migration_ptes(folio, folio, 0);
src_pfns[i] = 0;
- folio_unlock(folio);
+ if (fault_folio != folio)
+ folio_unlock(folio);
folio_put(folio);
restore--;
}
@@ -536,6 +547,8 @@ int migrate_vma_setup(struct migrate_vma *args)
return -EINVAL;
if (args->fault_page && !is_device_private_page(args->fault_page))
return -EINVAL;
+ if (args->fault_page && !PageLocked(args->fault_page))
+ return -EINVAL;
memset(args->src, 0, sizeof(*args->src) * nr_pages);
args->cpages = 0;
@@ -799,19 +812,13 @@ void migrate_vma_pages(struct migrate_vma *migrate)
}
EXPORT_SYMBOL(migrate_vma_pages);
-/*
- * migrate_device_finalize() - complete page migration
- * @src_pfns: src_pfns returned from migrate_device_range()
- * @dst_pfns: array of pfns allocated by the driver to migrate memory to
- * @npages: number of pages in the range
- *
- * Completes migration of the page by removing special migration entries.
- * Drivers must ensure copying of page data is complete and visible to the CPU
- * before calling this.
- */
-void migrate_device_finalize(unsigned long *src_pfns,
- unsigned long *dst_pfns, unsigned long npages)
+static void __migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns,
+ unsigned long npages,
+ struct page *fault_page)
{
+ struct folio *fault_folio = fault_page ?
+ page_folio(fault_page) : NULL;
unsigned long i;
for (i = 0; i < npages; i++) {
@@ -824,6 +831,7 @@ void migrate_device_finalize(unsigned long *src_pfns,
if (!page) {
if (dst) {
+ WARN_ON_ONCE(fault_folio == dst);
folio_unlock(dst);
folio_put(dst);
}
@@ -834,6 +842,7 @@ void migrate_device_finalize(unsigned long *src_pfns,
if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !dst) {
if (dst) {
+ WARN_ON_ONCE(fault_folio == dst);
folio_unlock(dst);
folio_put(dst);
}
@@ -841,7 +850,8 @@ void migrate_device_finalize(unsigned long *src_pfns,
}
remove_migration_ptes(src, dst, 0);
- folio_unlock(src);
+ if (fault_folio != src)
+ folio_unlock(src);
if (folio_is_zone_device(src))
folio_put(src);
@@ -849,6 +859,7 @@ void migrate_device_finalize(unsigned long *src_pfns,
folio_putback_lru(src);
if (dst != src) {
+ WARN_ON_ONCE(fault_folio == dst);
folio_unlock(dst);
if (folio_is_zone_device(dst))
folio_put(dst);
@@ -857,6 +868,22 @@ void migrate_device_finalize(unsigned long *src_pfns,
}
}
}
+
+/*
+ * migrate_device_finalize() - complete page migration
+ * @src_pfns: src_pfns returned from migrate_device_range()
+ * @dst_pfns: array of pfns allocated by the driver to migrate memory to
+ * @npages: number of pages in the range
+ *
+ * Completes migration of the page by removing special migration entries.
+ * Drivers must ensure copying of page data is complete and visible to the CPU
+ * before calling this.
+ */
+void migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages)
+{
+ return __migrate_device_finalize(src_pfns, dst_pfns, npages, NULL);
+}
EXPORT_SYMBOL(migrate_device_finalize);
/**
@@ -872,7 +899,8 @@ EXPORT_SYMBOL(migrate_device_finalize);
*/
void migrate_vma_finalize(struct migrate_vma *migrate)
{
- migrate_device_finalize(migrate->src, migrate->dst, migrate->npages);
+ __migrate_device_finalize(migrate->src, migrate->dst, migrate->npages,
+ migrate->fault_page);
}
EXPORT_SYMBOL(migrate_vma_finalize);
--
2.34.1
next prev parent reply other threads:[~2025-02-13 2:10 UTC|newest]
Thread overview: 75+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-13 2:10 [PATCH v5 00/32] Introduce GPU SVM and Xe SVM implementation Matthew Brost
2025-02-13 2:10 ` [PATCH v5 01/32] drm/xe: Retry BO allocation Matthew Brost
2025-02-13 2:10 ` [PATCH v5 02/32] mm/migrate: Add migrate_device_pfns Matthew Brost
2025-02-13 2:10 ` Matthew Brost [this message]
2025-02-19 5:36 ` [PATCH v5 03/32] mm/migrate: Trylock device page in do_swap_page Alistair Popple
2025-02-19 6:08 ` Matthew Brost
2025-02-19 6:25 ` Alistair Popple
2025-02-20 13:28 ` Gwan-gyeong Mun
2025-02-20 20:03 ` Matthew Brost
2025-02-13 2:10 ` [PATCH v5 04/32] drm/pagemap: Add DRM pagemap Matthew Brost
2025-02-20 13:53 ` Gwan-gyeong Mun
2025-02-13 2:10 ` [PATCH v5 05/32] drm/xe/bo: Introduce xe_bo_put_async Matthew Brost
2025-02-14 9:52 ` Ghimiray, Himal Prasad
2025-02-20 14:33 ` Gwan-gyeong Mun
2025-02-13 2:10 ` [PATCH v5 06/32] drm/gpusvm: Add support for GPU Shared Virtual Memory Matthew Brost
2025-02-19 8:59 ` Thomas Hellström
2025-02-13 2:10 ` [PATCH v5 07/32] drm/xe: Select DRM_GPUSVM Kconfig Matthew Brost
2025-02-13 2:10 ` [PATCH v5 08/32] drm/xe/uapi: Add DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR Matthew Brost
2025-02-13 2:10 ` [PATCH v5 09/32] drm/xe: Add SVM init / close / fini to faulting VMs Matthew Brost
2025-02-13 2:10 ` [PATCH v5 10/32] drm/xe: Add dma_addr res cursor Matthew Brost
2025-02-13 2:10 ` [PATCH v5 11/32] drm/xe: Nuke VM's mapping upon close Matthew Brost
2025-02-13 2:10 ` [PATCH v5 12/32] drm/xe: Add SVM range invalidation and page fault Matthew Brost
2025-02-13 10:05 ` Ghimiray, Himal Prasad
2025-02-13 2:10 ` [PATCH v5 13/32] drm/gpuvm: Add DRM_GPUVA_OP_DRIVER Matthew Brost
2025-02-13 2:10 ` [PATCH v5 14/32] drm/xe: Add (re)bind to SVM page fault handler Matthew Brost
2025-02-13 2:10 ` [PATCH v5 15/32] drm/xe: Add SVM garbage collector Matthew Brost
2025-02-13 10:07 ` Ghimiray, Himal Prasad
2025-02-13 2:10 ` [PATCH v5 16/32] drm/xe: Add unbind to " Matthew Brost
2025-02-19 15:05 ` Thomas Hellström
2025-02-13 2:10 ` [PATCH v5 17/32] drm/xe: Do not allow CPU address mirror VMA unbind if the GPU has bindings Matthew Brost
2025-02-13 11:28 ` Ghimiray, Himal Prasad
2025-02-13 2:10 ` [PATCH v5 18/32] drm/xe: Enable CPU address mirror uAPI Matthew Brost
2025-02-13 11:26 ` Ghimiray, Himal Prasad
2025-02-13 2:10 ` [PATCH v5 19/32] drm/xe/uapi: Add DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR Matthew Brost
2025-02-13 2:11 ` [PATCH v5 20/32] drm/xe: Add migrate layer functions for SVM support Matthew Brost
2025-02-13 2:11 ` [PATCH v5 21/32] drm/xe: Add SVM device memory mirroring Matthew Brost
2025-02-13 11:28 ` Ghimiray, Himal Prasad
2025-02-13 2:11 ` [PATCH v5 22/32] drm/xe: Add drm_gpusvm_devmem to xe_bo Matthew Brost
2025-02-13 11:29 ` Ghimiray, Himal Prasad
2025-02-13 2:11 ` [PATCH v5 23/32] drm/xe: Add drm_pagemap ops to SVM Matthew Brost
2025-02-13 2:11 ` [PATCH v5 24/32] drm/xe: Add GPUSVM device memory copy vfunc functions Matthew Brost
2025-02-13 2:11 ` [PATCH v5 25/32] drm/xe: Add Xe SVM populate_devmem_pfn GPU SVM vfunc Matthew Brost
2025-02-13 2:11 ` [PATCH v5 26/32] drm/xe: Add Xe SVM devmem_release " Matthew Brost
2025-02-13 18:29 ` Ghimiray, Himal Prasad
2025-02-13 2:11 ` [PATCH v5 27/32] drm/xe: Add SVM VRAM migration Matthew Brost
2025-02-13 18:28 ` Ghimiray, Himal Prasad
2025-02-18 21:54 ` Matthew Brost
2025-02-19 2:59 ` Ghimiray, Himal Prasad
2025-02-19 3:05 ` Matthew Brost
2025-02-19 3:40 ` Ghimiray, Himal Prasad
2025-02-19 10:30 ` Thomas Hellström
2025-02-19 17:38 ` Matthew Brost
2025-02-20 15:53 ` Matthew Auld
2025-02-20 15:59 ` Thomas Hellström
2025-02-20 19:55 ` Matthew Brost
2025-02-21 15:15 ` Matthew Auld
2025-02-21 15:22 ` Matthew Brost
2025-02-13 2:11 ` [PATCH v5 28/32] drm/xe: Basic SVM BO eviction Matthew Brost
2025-02-13 2:11 ` [PATCH v5 29/32] drm/xe: Add SVM debug Matthew Brost
2025-02-13 11:30 ` Ghimiray, Himal Prasad
2025-02-13 2:11 ` [PATCH v5 30/32] drm/xe: Add modparam for SVM notifier size Matthew Brost
2025-02-13 11:31 ` Ghimiray, Himal Prasad
2025-02-13 2:11 ` [PATCH v5 31/32] drm/xe: Add always_migrate_to_vram modparam Matthew Brost
2025-02-13 11:31 ` Ghimiray, Himal Prasad
2025-02-13 2:11 ` [PATCH v5 32/32] drm/doc: gpusvm: Add GPU SVM documentation Matthew Brost
2025-02-13 3:35 ` ✓ CI.Patch_applied: success for Introduce GPU SVM and Xe SVM implementation (rev5) Patchwork
2025-02-13 3:36 ` ✗ CI.checkpatch: warning " Patchwork
2025-02-13 3:37 ` ✗ CI.KUnit: failure " Patchwork
2025-02-13 21:23 ` [PATCH v5 00/32] Introduce GPU SVM and Xe SVM implementation Demi Marie Obenour
2025-02-14 8:47 ` Thomas Hellström
2025-02-14 9:07 ` Ghimiray, Himal Prasad
2025-02-14 9:10 ` Ghimiray, Himal Prasad
2025-02-14 16:14 ` Demi Marie Obenour
2025-02-14 16:26 ` Thomas Hellström
2025-02-14 18:36 ` Demi Marie Obenour
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250213021112.1228481-4-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=airlied@gmail.com \
--cc=apopple@nvidia.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=felix.kuehling@amd.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=simona.vetter@ffwll.ch \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox