From: Anshuman Khandual <anshuman.khandual@arm.com>
To: linux-arm-kernel@lists.infradead.org
Cc: Anshuman Khandual <anshuman.khandual@arm.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>,
Ryan Roberts <ryan.roberts@arm.com>,
Mark Rutland <mark.rutland@arm.com>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
Mike Rapoport <rppt@kernel.org>,
Linu Cherian <linu.cherian@arm.com>,
Usama Arif <usama.arif@linux.dev>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [RFC V2 09/14] arm64/mm: Route all pgtable atomics to central helpers
Date: Wed, 13 May 2026 10:15:42 +0530 [thread overview]
Message-ID: <20260513044547.4128549-10-anshuman.khandual@arm.com> (raw)
In-Reply-To: <20260513044547.4128549-1-anshuman.khandual@arm.com>
Route all cmpxchg() operations performed on various page table entries to a
new pxxval_cmpxchg_relaxed() helper. Similarly route all xchg() operations
performed on page table entries to a new pxxval_xchg_relaxed() helper.
Currently these helpers just forward to the same APIs that were previously
called direct, but in future we will change the routing for D128 which is
too long to use the standard APIs.
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
Changes in RFC V2:
- Renamed all ptdesc_ instances as pxxval_ instead
arch/arm64/include/asm/pgtable.h | 23 +++++++++++++++++------
arch/arm64/mm/fault.c | 2 +-
2 files changed, 18 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c71bb829e9f1..f876200d383e 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -87,6 +87,17 @@ static inline void arch_leave_lazy_mmu_mode(void)
#define pxxval_get(x) READ_ONCE(x)
#define pxxval_set(x, val) WRITE_ONCE(x, val)
+static inline ptdesc_t pxxval_cmpxchg_relaxed(ptdesc_t *ptep, ptdesc_t old,
+ ptdesc_t new)
+{
+ return cmpxchg_relaxed(ptep, old, new);
+}
+
+static inline ptdesc_t pxxval_xchg_relaxed(ptdesc_t *ptep, ptdesc_t new)
+{
+ return xchg_relaxed(ptep, new);
+}
+
#define pmdp_get pmdp_get
static inline pmd_t pmdp_get(pmd_t *pmdp)
{
@@ -1340,8 +1351,8 @@ static inline bool __ptep_test_and_clear_young(struct vm_area_struct *vma,
do {
old_pte = pte;
pte = pte_mkold(pte);
- pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
- pte_val(old_pte), pte_val(pte));
+ pte_val(pte) = pxxval_cmpxchg_relaxed(&pte_val(*ptep),
+ pte_val(old_pte), pte_val(pte));
} while (pte_val(pte) != pte_val(old_pte));
return pte_young(pte);
@@ -1383,7 +1394,7 @@ static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm,
pte_t *ptep,
unsigned long pgsize)
{
- pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
+ pte_t pte = __pte(pxxval_xchg_relaxed(&pte_val(*ptep), 0));
switch (pgsize) {
case PAGE_SIZE:
@@ -1459,7 +1470,7 @@ static inline void ___ptep_set_wrprotect(struct mm_struct *mm,
do {
old_pte = pte;
pte = pte_wrprotect(pte);
- pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+ pte_val(pte) = pxxval_cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));
} while (pte_val(pte) != pte_val(old_pte));
}
@@ -1497,7 +1508,7 @@ static inline void __clear_young_dirty_pte(struct vm_area_struct *vma,
if (flags & CYDP_CLEAR_DIRTY)
pte = pte_mkclean(pte);
- pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+ pte_val(pte) = pxxval_cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));
} while (pte_val(pte) != pte_val(old_pte));
}
@@ -1536,7 +1547,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
- return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
+ return __pmd(pxxval_xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
}
#endif
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5c61f39f7f29..bce191d16090 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -232,7 +232,7 @@ int __ptep_set_access_flags_anysz(struct vm_area_struct *vma,
pteval ^= PTE_RDONLY;
pteval |= pte_val(entry);
pteval ^= PTE_RDONLY;
- pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
+ pteval = pxxval_cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
} while (pteval != old_pteval);
/*
--
2.43.0
next prev parent reply other threads:[~2026-05-13 4:47 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-13 4:45 [RFC V2 00/14] arm64/mm: Enable 128 bit page table entries Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 01/14] mm: Abstract printing of pxd_val() Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 02/14] mm: Add read-write accessors for vm_page_prot Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 03/14] arm64/mm: Convert READ_ONCE() as pmdp_get() while accessing PMD Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 04/14] arm64/mm: Convert READ_ONCE() as pudp_get() while accessing PUD Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 05/14] arm64/mm: Convert READ_ONCE() as p4dp_get() while accessing P4D Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 06/14] arm64/mm: Convert READ_ONCE() as pgdp_get() while accessing PGD Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 07/14] arm64/mm: Route all pgtable reads via pxxval_get() Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 08/14] arm64/mm: Route all pgtable writes via pxxval_set() Anshuman Khandual
2026-05-13 4:45 ` Anshuman Khandual [this message]
2026-05-13 4:45 ` [RFC V2 10/14] arm64/mm: Abstract printing of pxd_val() Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 11/14] arm64/mm: Override read-write accessors for vm_page_prot Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 13/14] arm64/mm: Add an abstraction level for tlbi_op Anshuman Khandual
2026-05-13 4:45 ` [RFC V2 14/14] arm64/mm: Add initial support for FEAT_D128 page tables Anshuman Khandual
2026-05-13 9:39 ` [RFC V2 00/14] arm64/mm: Enable 128 bit page table entries Lorenzo Stoakes
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260513044547.4128549-10-anshuman.khandual@arm.com \
--to=anshuman.khandual@arm.com \
--cc=akpm@linux-foundation.org \
--cc=catalin.marinas@arm.com \
--cc=david@kernel.org \
--cc=linu.cherian@arm.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mark.rutland@arm.com \
--cc=rppt@kernel.org \
--cc=ryan.roberts@arm.com \
--cc=usama.arif@linux.dev \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox