* [PATCH v3 1/3] powerpc: Add common pud_pfn stub for all platforms
@ 2022-10-24 0:35 Rohan McLure
2022-10-24 0:35 ` [PATCH v3 2/3] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers Rohan McLure
` (2 more replies)
0 siblings, 3 replies; 9+ messages in thread
From: Rohan McLure @ 2022-10-24 0:35 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Rohan McLure
Prior to this commit, pud_pfn was implemented with BUILD_BUG as the inline
function for 64-bit Book3S systems but is never included, as its
invocations in generic code are guarded by calls to pud_devmap which return
zero on such systems. A future patch will provide support for page table
checks, the generic code for which depends on a pud_pfn stub being
implemented, even while the patch will not interact with puds directly.
Remove the 64-bit Book3S stub and define pud_pfn to warn on all
platforms. pud_pfn may be defined properly on a per-platform basis
should it grow real usages in future.
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
---
V2: Remove conditional BUILD_BUG and BUG. Instead warn on usage.
---
arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ----------
arch/powerpc/include/asm/pgtable.h | 14 ++++++++++++++
2 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 486902aff040..f9aefa492df0 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1413,16 +1413,6 @@ static inline int pgd_devmap(pgd_t pgd)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int pud_pfn(pud_t pud)
-{
- /*
- * Currently all calls to pud_pfn() are gated around a pud_devmap()
- * check so this should never be used. If it grows another user we
- * want to know about it.
- */
- BUILD_BUG();
- return 0;
-}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 33f4bf8d22b0..36956fb440e1 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -158,6 +158,20 @@ struct seq_file;
void arch_report_meminfo(struct seq_file *m);
#endif /* CONFIG_PPC64 */
+/*
+ * Currently only consumed by page_table_check_pud_{set,clear}. Since clears
+ * and sets to page table entries at any level are done through
+ * page_table_check_pte_{set,clear}, provide stub implementation.
+ */
+#ifndef pud_pfn
+#define pud_pfn pud_pfn
+static inline int pud_pfn(pud_t pud)
+{
+ WARN(1, "pud: platform does not use pud entries directly");
+ return 0;
+}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH v3 2/3] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers 2022-10-24 0:35 [PATCH v3 1/3] powerpc: Add common pud_pfn stub for all platforms Rohan McLure @ 2022-10-24 0:35 ` Rohan McLure 2022-11-03 8:02 ` Christophe Leroy 2022-11-03 8:41 ` Christophe Leroy 2022-10-24 0:35 ` [PATCH v3 3/3] powerpc: mm: support page table check Rohan McLure 2022-11-03 7:56 ` [PATCH v3 1/3] powerpc: Add common pud_pfn stub for all platforms Christophe Leroy 2 siblings, 2 replies; 9+ messages in thread From: Rohan McLure @ 2022-10-24 0:35 UTC (permalink / raw) To: linuxppc-dev; +Cc: Rohan McLure Add the following helpers for detecting whether a page table entry is a leaf and is accessible to user space. * pte_user_accessible_page * pmd_user_accessible_page * pud_user_accessible_page Also implement missing pud_user definitions for both Book3S/nohash 64-bit systems, and pmd_user for Book3S/nohash 32-bit systems. Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> --- V2: Provide missing pud_user implementations, use p{u,m}d_is_leaf. V3: Provide missing pmd_user implementations as stubs in 32-bit. --- arch/powerpc/include/asm/book3s/32/pgtable.h | 4 ++++ arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ++++++++++ arch/powerpc/include/asm/nohash/32/pgtable.h | 1 + arch/powerpc/include/asm/nohash/64/pgtable.h | 10 ++++++++++ arch/powerpc/include/asm/pgtable.h | 15 +++++++++++++++ 5 files changed, 40 insertions(+) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 40041ac713d9..8bf1c538839a 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -531,6 +531,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); } +static inline bool pmd_user(pmd_t pmd) +{ + return 0; +} /* This low level function performs the actual PTE insertion diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index f9aefa492df0..3083111f9d0a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -621,6 +621,16 @@ static inline bool pte_user(pte_t pte) return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED)); } +static inline bool pmd_user(pmd_t pmd) +{ + return !(pmd_raw(pmd) & cpu_to_be64(_PAGE_PRIVILEGED)); +} + +static inline bool pud_user(pud_t pud) +{ + return !(pud_raw(pud) & cpu_to_be64(_PAGE_PRIVILEGED)); +} + #define pte_access_permitted pte_access_permitted static inline bool pte_access_permitted(pte_t pte, bool write) { diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 9091e4904a6b..b92044d9d778 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -354,6 +354,7 @@ static inline int pte_young(pte_t pte) #endif #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) +#define pmd_user(pmd) 0 /* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 599921cc257e..23c5135178d1 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -123,6 +123,11 @@ static inline pte_t pmd_pte(pmd_t pmd) return __pte(pmd_val(pmd)); } +static inline bool pmd_user(pmd_t pmd) +{ + return (pmd_val(pmd) & _PAGE_USER) == _PAGE_USER; +} + #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ || (pmd_val(pmd) & PMD_BAD_BITS)) @@ -158,6 +163,11 @@ static inline pte_t pud_pte(pud_t pud) return __pte(pud_val(pud)); } +static inline bool pud_user(pud_t pud) +{ + return (pud_val(pud) & _PAGE_USER) == _PAGE_USER; +} + static inline pud_t pte_pud(pte_t pte) { return __pud(pte_val(pte)); diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 36956fb440e1..3cb5de9f1aa4 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -172,6 +172,21 @@ static inline int pud_pfn(pud_t pud) } #endif +static inline bool pte_user_accessible_page(pte_t pte) +{ + return pte_present(pte) && pte_user(pte); +} + +static inline bool pmd_user_accessible_page(pmd_t pmd) +{ + return pmd_is_leaf(pmd) && pmd_present(pmd) && pmd_user(pmd); +} + +static inline bool pud_user_accessible_page(pud_t pud) +{ + return pud_is_leaf(pud) && pud_present(pud) && pud_user(pud); +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_H */ -- 2.34.1 ^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH v3 2/3] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers 2022-10-24 0:35 ` [PATCH v3 2/3] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers Rohan McLure @ 2022-11-03 8:02 ` Christophe Leroy 2022-11-06 22:47 ` Rohan McLure 2022-11-03 8:41 ` Christophe Leroy 1 sibling, 1 reply; 9+ messages in thread From: Christophe Leroy @ 2022-11-03 8:02 UTC (permalink / raw) To: Rohan McLure, linuxppc-dev@lists.ozlabs.org Le 24/10/2022 à 02:35, Rohan McLure a écrit : > Add the following helpers for detecting whether a page table entry > is a leaf and is accessible to user space. > > * pte_user_accessible_page > * pmd_user_accessible_page > * pud_user_accessible_page > > Also implement missing pud_user definitions for both Book3S/nohash 64-bit > systems, and pmd_user for Book3S/nohash 32-bit systems. > > Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> > --- > V2: Provide missing pud_user implementations, use p{u,m}d_is_leaf. > V3: Provide missing pmd_user implementations as stubs in 32-bit. > --- > arch/powerpc/include/asm/book3s/32/pgtable.h | 4 ++++ > arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ++++++++++ > arch/powerpc/include/asm/nohash/32/pgtable.h | 1 + > arch/powerpc/include/asm/nohash/64/pgtable.h | 10 ++++++++++ > arch/powerpc/include/asm/pgtable.h | 15 +++++++++++++++ > 5 files changed, 40 insertions(+) > > diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h > index 40041ac713d9..8bf1c538839a 100644 > --- a/arch/powerpc/include/asm/book3s/32/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h > @@ -531,6 +531,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) > return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); > } > > +static inline bool pmd_user(pmd_t pmd) > +{ > + return 0; > +} > > > /* This low level function performs the actual PTE insertion > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h > index f9aefa492df0..3083111f9d0a 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -621,6 +621,16 @@ static inline bool pte_user(pte_t pte) > return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED)); > } > > +static inline bool pmd_user(pmd_t pmd) > +{ > + return !(pmd_raw(pmd) & cpu_to_be64(_PAGE_PRIVILEGED)); > +} > + > +static inline bool pud_user(pud_t pud) > +{ > + return !(pud_raw(pud) & cpu_to_be64(_PAGE_PRIVILEGED)); > +} > + > #define pte_access_permitted pte_access_permitted > static inline bool pte_access_permitted(pte_t pte, bool write) > { > diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h > index 9091e4904a6b..b92044d9d778 100644 > --- a/arch/powerpc/include/asm/nohash/32/pgtable.h > +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h > @@ -354,6 +354,7 @@ static inline int pte_young(pte_t pte) > #endif > > #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) > +#define pmd_user(pmd) 0 > /* > * Encode and decode a swap entry. > * Note that the bits we use in a PTE for representing a swap entry > diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h > index 599921cc257e..23c5135178d1 100644 > --- a/arch/powerpc/include/asm/nohash/64/pgtable.h > +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h > @@ -123,6 +123,11 @@ static inline pte_t pmd_pte(pmd_t pmd) > return __pte(pmd_val(pmd)); > } > > +static inline bool pmd_user(pmd_t pmd) > +{ > + return (pmd_val(pmd) & _PAGE_USER) == _PAGE_USER; > +} > + > #define pmd_none(pmd) (!pmd_val(pmd)) > #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ > || (pmd_val(pmd) & PMD_BAD_BITS)) > @@ -158,6 +163,11 @@ static inline pte_t pud_pte(pud_t pud) > return __pte(pud_val(pud)); > } > > +static inline bool pud_user(pud_t pud) > +{ > + return (pud_val(pud) & _PAGE_USER) == _PAGE_USER; > +} > + > static inline pud_t pte_pud(pte_t pte) > { > return __pud(pte_val(pte)); > diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h > index 36956fb440e1..3cb5de9f1aa4 100644 > --- a/arch/powerpc/include/asm/pgtable.h > +++ b/arch/powerpc/include/asm/pgtable.h > @@ -172,6 +172,21 @@ static inline int pud_pfn(pud_t pud) > } > #endif > > +static inline bool pte_user_accessible_page(pte_t pte) > +{ > + return pte_present(pte) && pte_user(pte); > +} > + > +static inline bool pmd_user_accessible_page(pmd_t pmd) > +{ > + return pmd_is_leaf(pmd) && pmd_present(pmd) && pmd_user(pmd); pmd_is_leaf() is specific to powerpc and we may want to get rid of it. Can you use pmd_leaf() instead ? > +} > + > +static inline bool pud_user_accessible_page(pud_t pud) > +{ > + return pud_is_leaf(pud) && pud_present(pud) && pud_user(pud); pud_is_leaf() is specific to powerpc and we may want to get rid of it. Can you use pud_leaf() instead ? > +} > + > #endif /* __ASSEMBLY__ */ > > #endif /* _ASM_POWERPC_PGTABLE_H */ ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v3 2/3] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers 2022-11-03 8:02 ` Christophe Leroy @ 2022-11-06 22:47 ` Rohan McLure 0 siblings, 0 replies; 9+ messages in thread From: Rohan McLure @ 2022-11-06 22:47 UTC (permalink / raw) To: Christophe Leroy; +Cc: linuxppc-dev@lists.ozlabs.org > On 3 Nov 2022, at 7:02 pm, Christophe Leroy <christophe.leroy@csgroup.eu> wrote: > > > > Le 24/10/2022 à 02:35, Rohan McLure a écrit : >> Add the following helpers for detecting whether a page table entry >> is a leaf and is accessible to user space. >> >> * pte_user_accessible_page >> * pmd_user_accessible_page >> * pud_user_accessible_page >> >> Also implement missing pud_user definitions for both Book3S/nohash 64-bit >> systems, and pmd_user for Book3S/nohash 32-bit systems. >> >> Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> >> --- >> V2: Provide missing pud_user implementations, use p{u,m}d_is_leaf. >> V3: Provide missing pmd_user implementations as stubs in 32-bit. >> --- >> arch/powerpc/include/asm/book3s/32/pgtable.h | 4 ++++ >> arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ++++++++++ >> arch/powerpc/include/asm/nohash/32/pgtable.h | 1 + >> arch/powerpc/include/asm/nohash/64/pgtable.h | 10 ++++++++++ >> arch/powerpc/include/asm/pgtable.h | 15 +++++++++++++++ >> 5 files changed, 40 insertions(+) >> >> diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h >> index 40041ac713d9..8bf1c538839a 100644 >> --- a/arch/powerpc/include/asm/book3s/32/pgtable.h >> +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h >> @@ -531,6 +531,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) >> return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); >> } >> >> +static inline bool pmd_user(pmd_t pmd) >> +{ >> + return 0; >> +} >> >> >> /* This low level function performs the actual PTE insertion >> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h >> index f9aefa492df0..3083111f9d0a 100644 >> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h >> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h >> @@ -621,6 +621,16 @@ static inline bool pte_user(pte_t pte) >> return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED)); >> } >> >> +static inline bool pmd_user(pmd_t pmd) >> +{ >> + return !(pmd_raw(pmd) & cpu_to_be64(_PAGE_PRIVILEGED)); >> +} >> + >> +static inline bool pud_user(pud_t pud) >> +{ >> + return !(pud_raw(pud) & cpu_to_be64(_PAGE_PRIVILEGED)); >> +} >> + >> #define pte_access_permitted pte_access_permitted >> static inline bool pte_access_permitted(pte_t pte, bool write) >> { >> diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h >> index 9091e4904a6b..b92044d9d778 100644 >> --- a/arch/powerpc/include/asm/nohash/32/pgtable.h >> +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h >> @@ -354,6 +354,7 @@ static inline int pte_young(pte_t pte) >> #endif >> >> #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) >> +#define pmd_user(pmd) 0 >> /* >> * Encode and decode a swap entry. >> * Note that the bits we use in a PTE for representing a swap entry >> diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h >> index 599921cc257e..23c5135178d1 100644 >> --- a/arch/powerpc/include/asm/nohash/64/pgtable.h >> +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h >> @@ -123,6 +123,11 @@ static inline pte_t pmd_pte(pmd_t pmd) >> return __pte(pmd_val(pmd)); >> } >> >> +static inline bool pmd_user(pmd_t pmd) >> +{ >> + return (pmd_val(pmd) & _PAGE_USER) == _PAGE_USER; >> +} >> + >> #define pmd_none(pmd) (!pmd_val(pmd)) >> #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ >> || (pmd_val(pmd) & PMD_BAD_BITS)) >> @@ -158,6 +163,11 @@ static inline pte_t pud_pte(pud_t pud) >> return __pte(pud_val(pud)); >> } >> >> +static inline bool pud_user(pud_t pud) >> +{ >> + return (pud_val(pud) & _PAGE_USER) == _PAGE_USER; >> +} >> + >> static inline pud_t pte_pud(pte_t pte) >> { >> return __pud(pte_val(pte)); >> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h >> index 36956fb440e1..3cb5de9f1aa4 100644 >> --- a/arch/powerpc/include/asm/pgtable.h >> +++ b/arch/powerpc/include/asm/pgtable.h >> @@ -172,6 +172,21 @@ static inline int pud_pfn(pud_t pud) >> } >> #endif >> >> +static inline bool pte_user_accessible_page(pte_t pte) >> +{ >> + return pte_present(pte) && pte_user(pte); >> +} >> + >> +static inline bool pmd_user_accessible_page(pmd_t pmd) >> +{ >> + return pmd_is_leaf(pmd) && pmd_present(pmd) && pmd_user(pmd); > > pmd_is_leaf() is specific to powerpc and we may want to get rid of it. > > Can you use pmd_leaf() instead ? > >> +} >> + >> +static inline bool pud_user_accessible_page(pud_t pud) >> +{ >> + return pud_is_leaf(pud) && pud_present(pud) && pud_user(pud); > > pud_is_leaf() is specific to powerpc and we may want to get rid of it. > > Can you use pud_leaf() instead ? Going to resend, replacing all usages/definitions of p{m,u,4}d_is_leaf() with p{m,u,4}_leaf() in arch/powerpc prior to this patch. > >> +} >> + >> #endif /* __ASSEMBLY__ */ >> >> #endif /* _ASM_POWERPC_PGTABLE_H */ ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v3 2/3] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers 2022-10-24 0:35 ` [PATCH v3 2/3] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers Rohan McLure 2022-11-03 8:02 ` Christophe Leroy @ 2022-11-03 8:41 ` Christophe Leroy 1 sibling, 0 replies; 9+ messages in thread From: Christophe Leroy @ 2022-11-03 8:41 UTC (permalink / raw) To: Rohan McLure, linuxppc-dev@lists.ozlabs.org Le 24/10/2022 à 02:35, Rohan McLure a écrit : > Add the following helpers for detecting whether a page table entry > is a leaf and is accessible to user space. > > * pte_user_accessible_page > * pmd_user_accessible_page > * pud_user_accessible_page > > Also implement missing pud_user definitions for both Book3S/nohash 64-bit > systems, and pmd_user for Book3S/nohash 32-bit systems. > > Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> > --- > V2: Provide missing pud_user implementations, use p{u,m}d_is_leaf. > V3: Provide missing pmd_user implementations as stubs in 32-bit. > --- > arch/powerpc/include/asm/book3s/32/pgtable.h | 4 ++++ > arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ++++++++++ > arch/powerpc/include/asm/nohash/32/pgtable.h | 1 + > arch/powerpc/include/asm/nohash/64/pgtable.h | 10 ++++++++++ > arch/powerpc/include/asm/pgtable.h | 15 +++++++++++++++ > 5 files changed, 40 insertions(+) > > diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h > index 40041ac713d9..8bf1c538839a 100644 > --- a/arch/powerpc/include/asm/book3s/32/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h > @@ -531,6 +531,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) > return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); > } > > +static inline bool pmd_user(pmd_t pmd) > +{ > + return 0; > +} > > > /* This low level function performs the actual PTE insertion > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h > index f9aefa492df0..3083111f9d0a 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -621,6 +621,16 @@ static inline bool pte_user(pte_t pte) > return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED)); > } > > +static inline bool pmd_user(pmd_t pmd) > +{ > + return !(pmd_raw(pmd) & cpu_to_be64(_PAGE_PRIVILEGED)); > +} > + > +static inline bool pud_user(pud_t pud) > +{ > + return !(pud_raw(pud) & cpu_to_be64(_PAGE_PRIVILEGED)); > +} > + > #define pte_access_permitted pte_access_permitted > static inline bool pte_access_permitted(pte_t pte, bool write) > { > diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h > index 9091e4904a6b..b92044d9d778 100644 > --- a/arch/powerpc/include/asm/nohash/32/pgtable.h > +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h > @@ -354,6 +354,7 @@ static inline int pte_young(pte_t pte) > #endif > > #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) > +#define pmd_user(pmd) 0 Can it be a static inline like for book3s/32 instead ? > /* > * Encode and decode a swap entry. > * Note that the bits we use in a PTE for representing a swap entry > diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h > index 599921cc257e..23c5135178d1 100644 > --- a/arch/powerpc/include/asm/nohash/64/pgtable.h > +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h > @@ -123,6 +123,11 @@ static inline pte_t pmd_pte(pmd_t pmd) > return __pte(pmd_val(pmd)); > } > > +static inline bool pmd_user(pmd_t pmd) > +{ > + return (pmd_val(pmd) & _PAGE_USER) == _PAGE_USER; > +} > + > #define pmd_none(pmd) (!pmd_val(pmd)) > #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ > || (pmd_val(pmd) & PMD_BAD_BITS)) > @@ -158,6 +163,11 @@ static inline pte_t pud_pte(pud_t pud) > return __pte(pud_val(pud)); > } > > +static inline bool pud_user(pud_t pud) > +{ > + return (pud_val(pud) & _PAGE_USER) == _PAGE_USER; > +} > + > static inline pud_t pte_pud(pte_t pte) > { > return __pud(pte_val(pte)); > diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h > index 36956fb440e1..3cb5de9f1aa4 100644 > --- a/arch/powerpc/include/asm/pgtable.h > +++ b/arch/powerpc/include/asm/pgtable.h > @@ -172,6 +172,21 @@ static inline int pud_pfn(pud_t pud) > } > #endif > > +static inline bool pte_user_accessible_page(pte_t pte) > +{ > + return pte_present(pte) && pte_user(pte); > +} > + > +static inline bool pmd_user_accessible_page(pmd_t pmd) > +{ > + return pmd_is_leaf(pmd) && pmd_present(pmd) && pmd_user(pmd); > +} > + > +static inline bool pud_user_accessible_page(pud_t pud) > +{ > + return pud_is_leaf(pud) && pud_present(pud) && pud_user(pud); > +} > + > #endif /* __ASSEMBLY__ */ > > #endif /* _ASM_POWERPC_PGTABLE_H */ ^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v3 3/3] powerpc: mm: support page table check 2022-10-24 0:35 [PATCH v3 1/3] powerpc: Add common pud_pfn stub for all platforms Rohan McLure 2022-10-24 0:35 ` [PATCH v3 2/3] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers Rohan McLure @ 2022-10-24 0:35 ` Rohan McLure 2022-10-26 3:21 ` Russell Currey 2022-11-03 8:43 ` Christophe Leroy 2022-11-03 7:56 ` [PATCH v3 1/3] powerpc: Add common pud_pfn stub for all platforms Christophe Leroy 2 siblings, 2 replies; 9+ messages in thread From: Rohan McLure @ 2022-10-24 0:35 UTC (permalink / raw) To: linuxppc-dev; +Cc: Rohan McLure On creation and clearing of a page table mapping, instrument such calls by invoking page_table_check_pte_set and page_table_check_pte_clear respectively. These calls serve as a sanity check against illegal mappings. Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit platforms implementing Book3S. Change pud_pfn to be a runtime bug rather than a build bug as it is consumed by page_table_check_pud_{clear,set} which are not called. See also: riscv support in commit 3fee229a8eb9 ("riscv/mm: enable ARCH_SUPPORTS_PAGE_TABLE_CHECK") arm64 in commit 42b2547137f5 ("arm64/mm: enable ARCH_SUPPORTS_PAGE_TABLE_CHECK") x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table check") Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> --- V2: Update spacing and types assigned to pte_update calls. V3: Update one last pte_update call to remove __pte invocation. --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/book3s/32/pgtable.h | 9 ++++++++- arch/powerpc/include/asm/book3s/64/pgtable.h | 18 +++++++++++++++--- arch/powerpc/include/asm/nohash/32/pgtable.h | 7 ++++++- arch/powerpc/include/asm/nohash/64/pgtable.h | 8 ++++++-- arch/powerpc/include/asm/nohash/pgtable.h | 1 + 6 files changed, 37 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 4c466acdc70d..6c213ac46a92 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -149,6 +149,7 @@ config PPC select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x + select ARCH_SUPPORTS_PAGE_TABLE_CHECK select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_USE_MEMTEST diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 8bf1c538839a..6a592426b935 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -53,6 +53,8 @@ #ifndef __ASSEMBLY__ +#include <linux/page_table_check.h> + static inline bool pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; @@ -353,7 +355,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); + + page_table_check_pte_clear(mm, addr, old_pte); + + return old_pte; } #define __HAVE_ARCH_PTEP_SET_WRPROTECT @@ -545,6 +551,7 @@ static inline bool pmd_user(pmd_t pmd) static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int percpu) { + page_table_check_pte_set(mm, addr, ptep, pte); #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the * helper pte_update() which does an atomic update. We need to do that diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 3083111f9d0a..b5c5718d9b90 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -181,6 +181,8 @@ #define PAGE_AGP (PAGE_KERNEL_NC) #ifndef __ASSEMBLY__ +#include <linux/page_table_check.h> + /* * page table defines */ @@ -484,8 +486,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); - return __pte(old); + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0)); + + page_table_check_pte_clear(mm, addr, old_pte); + + return old_pte; } #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL @@ -494,11 +499,16 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, pte_t *ptep, int full) { if (full && radix_enabled()) { + pte_t old_pte; + /* * We know that this is a full mm pte clear and * hence can be sure there is no parallel set_pte. */ - return radix__ptep_get_and_clear_full(mm, addr, ptep, full); + old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full); + page_table_check_pte_clear(mm, addr, old_pte); + + return old_pte; } return ptep_get_and_clear(mm, addr, ptep); } @@ -884,6 +894,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, */ pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE)); + page_table_check_pte_set(mm, addr, ptep, pte); + if (radix_enabled()) return radix__set_pte_at(mm, addr, ptep, pte, percpu); return hash__set_pte_at(mm, addr, ptep, pte, percpu); diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index b92044d9d778..61e96f82044a 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -166,6 +166,7 @@ void unmap_kernel_page(unsigned long va); #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) #ifndef __ASSEMBLY__ +#include <linux/page_table_check.h> #define pte_clear(mm, addr, ptep) \ do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0) @@ -305,7 +306,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - return __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); + + page_table_check_pte_clear(mm, addr, old_pte); + + return old_pte; } #define __HAVE_ARCH_PTEP_SET_WRPROTECT diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 23c5135178d1..fedcdf2a959d 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -83,6 +83,7 @@ #define H_PAGE_4K_PFN 0 #ifndef __ASSEMBLY__ +#include <linux/page_table_check.h> /* pte_clear moved to later in this file */ static inline pte_t pte_mkwrite(pte_t pte) @@ -253,8 +254,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); - return __pte(old); + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0)); + + page_table_check_pte_clear(mm, addr, old_pte); + + return old_pte; } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index b499da6c1a99..62b221b7cccf 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -185,6 +185,7 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int percpu) { + page_table_check_pte_set(mm, addr, ptep, pte); /* Second case is 32-bit with 64-bit PTE. In this case, we * can just store as long as we do the two halves in the right order * with a barrier in between. -- 2.34.1 ^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH v3 3/3] powerpc: mm: support page table check 2022-10-24 0:35 ` [PATCH v3 3/3] powerpc: mm: support page table check Rohan McLure @ 2022-10-26 3:21 ` Russell Currey 2022-11-03 8:43 ` Christophe Leroy 1 sibling, 0 replies; 9+ messages in thread From: Russell Currey @ 2022-10-26 3:21 UTC (permalink / raw) To: Rohan McLure, linuxppc-dev On Mon, 2022-10-24 at 11:35 +1100, Rohan McLure wrote: > On creation and clearing of a page table mapping, instrument such > calls > by invoking page_table_check_pte_set and page_table_check_pte_clear > respectively. These calls serve as a sanity check against illegal > mappings. > > Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit > platforms implementing Book3S. > > Change pud_pfn to be a runtime bug rather than a build bug as it is > consumed by page_table_check_pud_{clear,set} which are not called. > > See also: > > riscv support in commit 3fee229a8eb9 ("riscv/mm: enable > ARCH_SUPPORTS_PAGE_TABLE_CHECK") > arm64 in commit 42b2547137f5 ("arm64/mm: enable > ARCH_SUPPORTS_PAGE_TABLE_CHECK") > x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page > table > check") > > Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> Reviewed-by: Russell Currey <ruscur@russell.cc> ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v3 3/3] powerpc: mm: support page table check 2022-10-24 0:35 ` [PATCH v3 3/3] powerpc: mm: support page table check Rohan McLure 2022-10-26 3:21 ` Russell Currey @ 2022-11-03 8:43 ` Christophe Leroy 1 sibling, 0 replies; 9+ messages in thread From: Christophe Leroy @ 2022-11-03 8:43 UTC (permalink / raw) To: Rohan McLure, linuxppc-dev@lists.ozlabs.org Le 24/10/2022 à 02:35, Rohan McLure a écrit : > On creation and clearing of a page table mapping, instrument such calls > by invoking page_table_check_pte_set and page_table_check_pte_clear > respectively. These calls serve as a sanity check against illegal > mappings. > > Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit > platforms implementing Book3S. > > Change pud_pfn to be a runtime bug rather than a build bug as it is > consumed by page_table_check_pud_{clear,set} which are not called. > > See also: > > riscv support in commit 3fee229a8eb9 ("riscv/mm: enable > ARCH_SUPPORTS_PAGE_TABLE_CHECK") > arm64 in commit 42b2547137f5 ("arm64/mm: enable > ARCH_SUPPORTS_PAGE_TABLE_CHECK") > x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table > check") > > Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu> > --- > V2: Update spacing and types assigned to pte_update calls. > V3: Update one last pte_update call to remove __pte invocation. > --- > arch/powerpc/Kconfig | 1 + > arch/powerpc/include/asm/book3s/32/pgtable.h | 9 ++++++++- > arch/powerpc/include/asm/book3s/64/pgtable.h | 18 +++++++++++++++--- > arch/powerpc/include/asm/nohash/32/pgtable.h | 7 ++++++- > arch/powerpc/include/asm/nohash/64/pgtable.h | 8 ++++++-- > arch/powerpc/include/asm/nohash/pgtable.h | 1 + > 6 files changed, 37 insertions(+), 7 deletions(-) > > diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig > index 4c466acdc70d..6c213ac46a92 100644 > --- a/arch/powerpc/Kconfig > +++ b/arch/powerpc/Kconfig > @@ -149,6 +149,7 @@ config PPC > select ARCH_STACKWALK > select ARCH_SUPPORTS_ATOMIC_RMW > select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x > + select ARCH_SUPPORTS_PAGE_TABLE_CHECK > select ARCH_USE_BUILTIN_BSWAP > select ARCH_USE_CMPXCHG_LOCKREF if PPC64 > select ARCH_USE_MEMTEST > diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h > index 8bf1c538839a..6a592426b935 100644 > --- a/arch/powerpc/include/asm/book3s/32/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h > @@ -53,6 +53,8 @@ > > #ifndef __ASSEMBLY__ > > +#include <linux/page_table_check.h> > + > static inline bool pte_user(pte_t pte) > { > return pte_val(pte) & _PAGE_USER; > @@ -353,7 +355,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, > static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, > pte_t *ptep) > { > - return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); > + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); > + > + page_table_check_pte_clear(mm, addr, old_pte); > + > + return old_pte; > } > > #define __HAVE_ARCH_PTEP_SET_WRPROTECT > @@ -545,6 +551,7 @@ static inline bool pmd_user(pmd_t pmd) > static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, > pte_t *ptep, pte_t pte, int percpu) > { > + page_table_check_pte_set(mm, addr, ptep, pte); > #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) > /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the > * helper pte_update() which does an atomic update. We need to do that > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h > index 3083111f9d0a..b5c5718d9b90 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -181,6 +181,8 @@ > #define PAGE_AGP (PAGE_KERNEL_NC) > > #ifndef __ASSEMBLY__ > +#include <linux/page_table_check.h> > + > /* > * page table defines > */ > @@ -484,8 +486,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, > static inline pte_t ptep_get_and_clear(struct mm_struct *mm, > unsigned long addr, pte_t *ptep) > { > - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); > - return __pte(old); > + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0)); > + > + page_table_check_pte_clear(mm, addr, old_pte); > + > + return old_pte; > } > > #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL > @@ -494,11 +499,16 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, > pte_t *ptep, int full) > { > if (full && radix_enabled()) { > + pte_t old_pte; > + > /* > * We know that this is a full mm pte clear and > * hence can be sure there is no parallel set_pte. > */ > - return radix__ptep_get_and_clear_full(mm, addr, ptep, full); > + old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full); > + page_table_check_pte_clear(mm, addr, old_pte); > + > + return old_pte; > } > return ptep_get_and_clear(mm, addr, ptep); > } > @@ -884,6 +894,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, > */ > pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE)); > > + page_table_check_pte_set(mm, addr, ptep, pte); > + > if (radix_enabled()) > return radix__set_pte_at(mm, addr, ptep, pte, percpu); > return hash__set_pte_at(mm, addr, ptep, pte, percpu); > diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h > index b92044d9d778..61e96f82044a 100644 > --- a/arch/powerpc/include/asm/nohash/32/pgtable.h > +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h > @@ -166,6 +166,7 @@ void unmap_kernel_page(unsigned long va); > #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) > > #ifndef __ASSEMBLY__ > +#include <linux/page_table_check.h> > > #define pte_clear(mm, addr, ptep) \ > do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0) > @@ -305,7 +306,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, > static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, > pte_t *ptep) > { > - return __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); > + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); > + > + page_table_check_pte_clear(mm, addr, old_pte); > + > + return old_pte; > } > > #define __HAVE_ARCH_PTEP_SET_WRPROTECT > diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h > index 23c5135178d1..fedcdf2a959d 100644 > --- a/arch/powerpc/include/asm/nohash/64/pgtable.h > +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h > @@ -83,6 +83,7 @@ > #define H_PAGE_4K_PFN 0 > > #ifndef __ASSEMBLY__ > +#include <linux/page_table_check.h> > /* pte_clear moved to later in this file */ > > static inline pte_t pte_mkwrite(pte_t pte) > @@ -253,8 +254,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, > static inline pte_t ptep_get_and_clear(struct mm_struct *mm, > unsigned long addr, pte_t *ptep) > { > - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); > - return __pte(old); > + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0)); > + > + page_table_check_pte_clear(mm, addr, old_pte); > + > + return old_pte; > } > > static inline void pte_clear(struct mm_struct *mm, unsigned long addr, > diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h > index b499da6c1a99..62b221b7cccf 100644 > --- a/arch/powerpc/include/asm/nohash/pgtable.h > +++ b/arch/powerpc/include/asm/nohash/pgtable.h > @@ -185,6 +185,7 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, > static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, > pte_t *ptep, pte_t pte, int percpu) > { > + page_table_check_pte_set(mm, addr, ptep, pte); > /* Second case is 32-bit with 64-bit PTE. In this case, we > * can just store as long as we do the two halves in the right order > * with a barrier in between. ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v3 1/3] powerpc: Add common pud_pfn stub for all platforms 2022-10-24 0:35 [PATCH v3 1/3] powerpc: Add common pud_pfn stub for all platforms Rohan McLure 2022-10-24 0:35 ` [PATCH v3 2/3] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers Rohan McLure 2022-10-24 0:35 ` [PATCH v3 3/3] powerpc: mm: support page table check Rohan McLure @ 2022-11-03 7:56 ` Christophe Leroy 2 siblings, 0 replies; 9+ messages in thread From: Christophe Leroy @ 2022-11-03 7:56 UTC (permalink / raw) To: Rohan McLure, linuxppc-dev@lists.ozlabs.org Le 24/10/2022 à 02:35, Rohan McLure a écrit : > Prior to this commit, pud_pfn was implemented with BUILD_BUG as the inline > function for 64-bit Book3S systems but is never included, as its > invocations in generic code are guarded by calls to pud_devmap which return > zero on such systems. A future patch will provide support for page table > checks, the generic code for which depends on a pud_pfn stub being > implemented, even while the patch will not interact with puds directly. > > Remove the 64-bit Book3S stub and define pud_pfn to warn on all > platforms. pud_pfn may be defined properly on a per-platform basis > should it grow real usages in future. > > Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> > --- > V2: Remove conditional BUILD_BUG and BUG. Instead warn on usage. > --- > arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ---------- > arch/powerpc/include/asm/pgtable.h | 14 ++++++++++++++ > 2 files changed, 14 insertions(+), 10 deletions(-) > > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h > index 486902aff040..f9aefa492df0 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -1413,16 +1413,6 @@ static inline int pgd_devmap(pgd_t pgd) > } > #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ > > -static inline int pud_pfn(pud_t pud) > -{ > - /* > - * Currently all calls to pud_pfn() are gated around a pud_devmap() > - * check so this should never be used. If it grows another user we > - * want to know about it. > - */ > - BUILD_BUG(); > - return 0; > -} > #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION > pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); > void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, > diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h > index 33f4bf8d22b0..36956fb440e1 100644 > --- a/arch/powerpc/include/asm/pgtable.h > +++ b/arch/powerpc/include/asm/pgtable.h > @@ -158,6 +158,20 @@ struct seq_file; > void arch_report_meminfo(struct seq_file *m); > #endif /* CONFIG_PPC64 */ > > +/* > + * Currently only consumed by page_table_check_pud_{set,clear}. Since clears > + * and sets to page table entries at any level are done through > + * page_table_check_pte_{set,clear}, provide stub implementation. > + */ > +#ifndef pud_pfn > +#define pud_pfn pud_pfn > +static inline int pud_pfn(pud_t pud) > +{ > + WARN(1, "pud: platform does not use pud entries directly"); Would a WARN_ONCE() be enough ? > + return 0; > +} > +#endif > + > #endif /* __ASSEMBLY__ */ > > #endif /* _ASM_POWERPC_PGTABLE_H */ ^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2022-11-06 22:49 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-10-24 0:35 [PATCH v3 1/3] powerpc: Add common pud_pfn stub for all platforms Rohan McLure
2022-10-24 0:35 ` [PATCH v3 2/3] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers Rohan McLure
2022-11-03 8:02 ` Christophe Leroy
2022-11-06 22:47 ` Rohan McLure
2022-11-03 8:41 ` Christophe Leroy
2022-10-24 0:35 ` [PATCH v3 3/3] powerpc: mm: support page table check Rohan McLure
2022-10-26 3:21 ` Russell Currey
2022-11-03 8:43 ` Christophe Leroy
2022-11-03 7:56 ` [PATCH v3 1/3] powerpc: Add common pud_pfn stub for all platforms Christophe Leroy
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).