linux-doc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: David Hildenbrand <david@redhat.com>
To: linux-kernel@vger.kernel.org
Cc: linux-doc@vger.kernel.org, dri-devel@lists.freedesktop.org,
	linux-mm@kvack.org, nouveau@lists.freedesktop.org,
	"David Hildenbrand" <david@redhat.com>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"Jonathan Corbet" <corbet@lwn.net>, "Alex Shi" <alexs@kernel.org>,
	"Yanteng Si" <si.yanteng@linux.dev>,
	"Karol Herbst" <kherbst@redhat.com>,
	"Lyude Paul" <lyude@redhat.com>,
	"Danilo Krummrich" <dakr@kernel.org>,
	"David Airlie" <airlied@gmail.com>,
	"Simona Vetter" <simona@ffwll.ch>,
	"Liam R. Howlett" <Liam.Howlett@oracle.com>,
	"Lorenzo Stoakes" <lorenzo.stoakes@oracle.com>,
	"Vlastimil Babka" <vbabka@suse.cz>,
	"Jann Horn" <jannh@google.com>,
	"Pasha Tatashin" <pasha.tatashin@soleen.com>,
	"Peter Xu" <peterx@redhat.com>,
	"Alistair Popple" <apopple@nvidia.com>,
	"Jason Gunthorpe" <jgg@nvidia.com>
Subject: [PATCH v1 08/12] mm/rmap: handle device-exclusive entries correctly in try_to_unmap_one()
Date: Wed, 29 Jan 2025 12:54:06 +0100	[thread overview]
Message-ID: <20250129115411.2077152-9-david@redhat.com> (raw)
In-Reply-To: <20250129115411.2077152-1-david@redhat.com>

Ever since commit b756a3b5e7ea ("mm: device exclusive memory access")
we can return with a device-exclusive entry from page_vma_mapped_walk().

try_to_unmap_one() is not prepared for that, so teach it about these
non-present nonswap PTEs.

Before that, could we also have triggered this case with device-private
entries? Unlikely.

Note that we could currently only run into this case with
device-exclusive entries on THPs. For order-0 folios, we still adjust
the mapcount on conversion to device-exclusive, making the rmap walk
abort early (folio_mapcount() == 0 and breaking swapout). We'll fix
that next, now that try_to_unmap_one() can handle it.

Further note that try_to_unmap() calls MMU notifiers and holds the
folio lock, so any device-exclusive users should be properly prepared
for this device-exclusive PTE to "vanish".

Fixes: b756a3b5e7ea ("mm: device exclusive memory access")
Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/rmap.c | 53 ++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 40 insertions(+), 13 deletions(-)

diff --git a/mm/rmap.c b/mm/rmap.c
index 65d9bbea16d0..12900f367a2a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1648,9 +1648,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 {
 	struct mm_struct *mm = vma->vm_mm;
 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+	bool anon_exclusive, ret = true;
 	pte_t pteval;
 	struct page *subpage;
-	bool anon_exclusive, ret = true;
 	struct mmu_notifier_range range;
 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
 	unsigned long pfn;
@@ -1722,7 +1722,19 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 		/* Unexpected PMD-mapped THP? */
 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
 
-		pfn = pte_pfn(ptep_get(pvmw.pte));
+		/*
+		 * We can end up here with selected non-swap entries that
+		 * actually map pages similar to PROT_NONE; see
+		 * page_vma_mapped_walk()->check_pte().
+		 */
+		pteval = ptep_get(pvmw.pte);
+		if (likely(pte_present(pteval))) {
+			pfn = pte_pfn(pteval);
+		} else {
+			pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+			VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+		}
+
 		subpage = folio_page(folio, pfn - folio_pfn(folio));
 		address = pvmw.address;
 		anon_exclusive = folio_test_anon(folio) &&
@@ -1778,7 +1790,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 				hugetlb_vma_unlock_write(vma);
 			}
 			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
-		} else {
+			if (pte_dirty(pteval))
+				folio_mark_dirty(folio);
+		} else if (likely(pte_present(pteval))) {
 			flush_cache_page(vma, address, pfn);
 			/* Nuke the page table entry. */
 			if (should_defer_flush(mm, flags)) {
@@ -1796,6 +1810,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 			} else {
 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
 			}
+			if (pte_dirty(pteval))
+				folio_mark_dirty(folio);
+		} else {
+			pte_clear(mm, address, pvmw.pte);
 		}
 
 		/*
@@ -1805,10 +1823,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 		 */
 		pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
 
-		/* Set the dirty flag on the folio now the pte is gone. */
-		if (pte_dirty(pteval))
-			folio_mark_dirty(folio);
-
 		/* Update high watermark before we lower rss */
 		update_hiwater_rss(mm);
 
@@ -1822,8 +1836,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 				dec_mm_counter(mm, mm_counter(folio));
 				set_pte_at(mm, address, pvmw.pte, pteval);
 			}
-
-		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
+		} else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
+			   !userfaultfd_armed(vma)) {
 			/*
 			 * The guest indicated that the page content is of no
 			 * interest anymore. Simply discard the pte, vmscan
@@ -1902,6 +1916,12 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 				set_pte_at(mm, address, pvmw.pte, pteval);
 				goto walk_abort;
 			}
+
+			/*
+			 * arch_unmap_one() is expected to be a NOP on
+			 * architectures where we could have non-swp entries
+			 * here, so we'll not check/care.
+			 */
 			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
 				swap_free(entry);
 				set_pte_at(mm, address, pvmw.pte, pteval);
@@ -1926,10 +1946,17 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 			swp_pte = swp_entry_to_pte(entry);
 			if (anon_exclusive)
 				swp_pte = pte_swp_mkexclusive(swp_pte);
-			if (pte_soft_dirty(pteval))
-				swp_pte = pte_swp_mksoft_dirty(swp_pte);
-			if (pte_uffd_wp(pteval))
-				swp_pte = pte_swp_mkuffd_wp(swp_pte);
+			if (likely(pte_present(pteval))) {
+				if (pte_soft_dirty(pteval))
+					swp_pte = pte_swp_mksoft_dirty(swp_pte);
+				if (pte_uffd_wp(pteval))
+					swp_pte = pte_swp_mkuffd_wp(swp_pte);
+			} else {
+				if (pte_swp_soft_dirty(pteval))
+					swp_pte = pte_swp_mksoft_dirty(swp_pte);
+				if (pte_swp_uffd_wp(pteval))
+					swp_pte = pte_swp_mkuffd_wp(swp_pte);
+			}
 			set_pte_at(mm, address, pvmw.pte, swp_pte);
 		} else {
 			/*
-- 
2.48.1


  parent reply	other threads:[~2025-01-29 11:54 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-29 11:53 [PATCH v1 00/12] mm: fixes for device-exclusive entries (hmm) David Hildenbrand
2025-01-29 11:53 ` [PATCH v1 01/12] mm/gup: reject FOLL_SPLIT_PMD with hugetlb VMAs David Hildenbrand
2025-01-29 21:42   ` John Hubbard
2025-01-30  8:56     ` David Hildenbrand
2025-01-30  5:46   ` Alistair Popple
2025-01-29 11:54 ` [PATCH v1 02/12] mm/rmap: reject hugetlb folios in folio_make_device_exclusive() David Hildenbrand
2025-01-30  5:47   ` Alistair Popple
2025-01-29 11:54 ` [PATCH v1 03/12] mm/rmap: convert make_device_exclusive_range() to make_device_exclusive() David Hildenbrand
2025-01-30  5:57   ` Alistair Popple
2025-01-30  9:04     ` David Hildenbrand
2025-01-31  0:28     ` Alistair Popple
2025-01-31  9:29       ` David Hildenbrand
2025-01-30 13:46   ` Simona Vetter
2025-01-30 15:56     ` David Hildenbrand
2025-01-29 11:54 ` [PATCH v1 04/12] mm/rmap: implement make_device_exclusive() using folio_walk instead of rmap walk David Hildenbrand
2025-01-30  6:11   ` Alistair Popple
2025-01-30  9:01     ` David Hildenbrand
2025-01-30  9:12       ` David Hildenbrand
2025-01-30  9:24       ` David Hildenbrand
2025-01-30 22:31         ` Alistair Popple
2025-02-04 10:56           ` David Hildenbrand
2025-01-30  9:40     ` Simona Vetter
2025-01-30  9:47       ` David Hildenbrand
2025-01-30 13:00         ` Simona Vetter
2025-01-30 15:59           ` David Hildenbrand
2025-01-31 17:00             ` Simona Vetter
2025-01-29 11:54 ` [PATCH v1 05/12] mm/memory: detect writability in restore_exclusive_pte() through can_change_pte_writable() David Hildenbrand
2025-01-30  9:51   ` Simona Vetter
2025-01-30  9:58     ` David Hildenbrand
2025-01-30 13:03       ` Simona Vetter
2025-01-30 23:06         ` Alistair Popple
2025-01-31 10:55           ` David Hildenbrand
2025-01-31 17:05             ` Simona Vetter
2025-02-04 10:58               ` David Hildenbrand
2025-01-29 11:54 ` [PATCH v1 06/12] mm: use single SWP_DEVICE_EXCLUSIVE entry type David Hildenbrand
2025-01-30 13:43   ` Simona Vetter
2025-01-30 23:28   ` Alistair Popple
2025-01-29 11:54 ` [PATCH v1 07/12] mm/page_vma_mapped: device-private entries are not migration entries David Hildenbrand
2025-01-30 23:36   ` Alistair Popple
2025-01-31 11:06     ` David Hildenbrand
2025-01-29 11:54 ` David Hildenbrand [this message]
2025-01-30 10:10   ` [PATCH v1 08/12] mm/rmap: handle device-exclusive entries correctly in try_to_unmap_one() Simona Vetter
2025-01-30 11:08     ` David Hildenbrand
2025-01-30 13:06       ` Simona Vetter
2025-01-30 14:08         ` Jason Gunthorpe
2025-01-30 16:10           ` Simona Vetter
2025-01-30 15:52         ` David Hildenbrand
2025-01-29 11:54 ` [PATCH v1 09/12] mm/rmap: handle device-exclusive entries correctly in try_to_migrate_one() David Hildenbrand
2025-01-29 11:54 ` [PATCH v1 10/12] mm/rmap: handle device-exclusive entries correctly in folio_referenced_one() David Hildenbrand
2025-01-29 11:54 ` [PATCH v1 11/12] mm/rmap: handle device-exclusive entries correctly in page_vma_mkclean_one() David Hildenbrand
2025-01-29 11:54 ` [PATCH v1 12/12] mm/rmap: keep mapcount untouched for device-exclusive entries David Hildenbrand
2025-01-30 10:37   ` Simona Vetter
2025-01-30 11:42     ` David Hildenbrand
2025-01-30 13:19       ` Simona Vetter
2025-01-30 15:43         ` David Hildenbrand
2025-01-31 17:13           ` Simona Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250129115411.2077152-9-david@redhat.com \
    --to=david@redhat.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=airlied@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexs@kernel.org \
    --cc=apopple@nvidia.com \
    --cc=corbet@lwn.net \
    --cc=dakr@kernel.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=jannh@google.com \
    --cc=jgg@nvidia.com \
    --cc=jglisse@redhat.com \
    --cc=kherbst@redhat.com \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lorenzo.stoakes@oracle.com \
    --cc=lyude@redhat.com \
    --cc=nouveau@lists.freedesktop.org \
    --cc=pasha.tatashin@soleen.com \
    --cc=peterx@redhat.com \
    --cc=si.yanteng@linux.dev \
    --cc=simona@ffwll.ch \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).