* [PATCH v13 01/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pud_set()
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
@ 2025-02-11 16:13 ` Andrew Donnellan
2025-02-11 16:13 ` [PATCH v13 02/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pmd_set() Andrew Donnellan
` (11 subsequent siblings)
12 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:13 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
From: Rohan McLure <rmclure@linux.ibm.com>
This reverts commit 6d144436d954 ("mm/page_table_check: remove unused
parameter in [__]page_table_check_pud_set").
Reinstate previously unused parameters for the purpose of supporting
powerpc platforms, as many do not encode user/kernel ownership of the
page in the pte, but instead in the address of the access.
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
[ajd: rebase on riscv changes, remove riscv commit message comment]
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
v13: remove inaccurate comment on riscv in the commit message
---
arch/arm64/include/asm/pgtable.h | 2 +-
arch/riscv/include/asm/pgtable.h | 2 +-
arch/x86/include/asm/pgtable.h | 2 +-
include/linux/page_table_check.h | 11 +++++++----
mm/page_table_check.c | 3 ++-
5 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0b2a2ad1b9e8..f77bbaf3cf7c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -661,7 +661,7 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
- page_table_check_pud_set(mm, pudp, pud);
+ page_table_check_pud_set(mm, addr, pudp, pud);
return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
PUD_SIZE >> PAGE_SHIFT);
}
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 050fdc49b5ad..1a9f1091bd5c 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -778,7 +778,7 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
- page_table_check_pud_set(mm, pudp, pud);
+ page_table_check_pud_set(mm, addr, pudp, pud);
return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 593f10aabd45..e53674bb3814 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1287,7 +1287,7 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
- page_table_check_pud_set(mm, pudp, pud);
+ page_table_check_pud_set(mm, addr, pudp, pud);
native_set_pud(pudp, pud);
}
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 6722941c7cb8..d188428512f5 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -20,7 +20,8 @@ void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
unsigned int nr);
void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd);
-void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud);
+void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
unsigned long addr,
pmd_t pmd);
@@ -83,13 +84,14 @@ static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
__page_table_check_pmd_set(mm, pmdp, pmd);
}
-static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
+static inline void page_table_check_pud_set(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp,
pud_t pud)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pud_set(mm, pudp, pud);
+ __page_table_check_pud_set(mm, addr, pudp, pud);
}
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
@@ -134,7 +136,8 @@ static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
{
}
-static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
+static inline void page_table_check_pud_set(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp,
pud_t pud)
{
}
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 509c6ef8de40..f14fef81c61d 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -249,7 +249,8 @@ void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
}
EXPORT_SYMBOL(__page_table_check_pmd_set);
-void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud)
+void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud)
{
if (&init_mm == mm)
return;
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v13 02/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pmd_set()
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
2025-02-11 16:13 ` [PATCH v13 01/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pud_set() Andrew Donnellan
@ 2025-02-11 16:13 ` Andrew Donnellan
2025-02-11 16:13 ` [PATCH v13 03/11] mm/page_table_check: Provide addr parameter to page_table_check_pte_set() Andrew Donnellan
` (10 subsequent siblings)
12 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:13 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
From: Rohan McLure <rmclure@linux.ibm.com>
This reverts commit a3b837130b58 ("mm/page_table_check: remove unused
parameter in [__]page_table_check_pmd_set").
Reinstate previously unused parameters for the purpose of supporting
powerpc platforms, as many do not encode user/kernel ownership of the
page in the pte, but instead in the address of the access.
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
[ajd: rebase on riscv and mm/page_table_check.c changes, remove riscv
commit message comment]
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
v13: remove inaccurate comment on riscv in the commit message
---
arch/arm64/include/asm/pgtable.h | 4 ++--
arch/riscv/include/asm/pgtable.h | 4 ++--
arch/x86/include/asm/pgtable.h | 4 ++--
include/linux/page_table_check.h | 11 +++++++----
mm/page_table_check.c | 3 ++-
5 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f77bbaf3cf7c..0262d9e966f9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -653,7 +653,7 @@ static inline void __set_pte_at(struct mm_struct *mm,
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(mm, pmdp, pmd);
+ page_table_check_pmd_set(mm, addr, pmdp, pmd);
return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
PMD_SIZE >> PAGE_SHIFT);
}
@@ -1415,7 +1415,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
+ page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
}
#endif
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 1a9f1091bd5c..bd4092b72258 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -771,7 +771,7 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(mm, pmdp, pmd);
+ page_table_check_pmd_set(mm, addr, pmdp, pmd);
return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
}
@@ -842,7 +842,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
+ page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index e53674bb3814..366cdb9d75d9 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1280,7 +1280,7 @@ static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(mm, pmdp, pmd);
+ page_table_check_pmd_set(mm, addr, pmdp, pmd);
set_pmd(pmdp, pmd);
}
@@ -1425,7 +1425,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
+ page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
if (IS_ENABLED(CONFIG_SMP)) {
return xchg(pmdp, pmd);
} else {
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index d188428512f5..5855d690c48a 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -19,7 +19,8 @@ void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
unsigned int nr);
-void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd);
+void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd);
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
@@ -75,13 +76,14 @@ static inline void page_table_check_ptes_set(struct mm_struct *mm,
__page_table_check_ptes_set(mm, ptep, pte, nr);
}
-static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
+static inline void page_table_check_pmd_set(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
pmd_t pmd)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmd_set(mm, pmdp, pmd);
+ __page_table_check_pmd_set(mm, addr, pmdp, pmd);
}
static inline void page_table_check_pud_set(struct mm_struct *mm,
@@ -131,7 +133,8 @@ static inline void page_table_check_ptes_set(struct mm_struct *mm,
{
}
-static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
+static inline void page_table_check_pmd_set(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
pmd_t pmd)
{
}
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index f14fef81c61d..099719d6f788 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -234,7 +234,8 @@ static inline void page_table_check_pmd_flags(pmd_t pmd)
WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
}
-void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
+void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
{
if (&init_mm == mm)
return;
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v13 03/11] mm/page_table_check: Provide addr parameter to page_table_check_pte_set()
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
2025-02-11 16:13 ` [PATCH v13 01/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pud_set() Andrew Donnellan
2025-02-11 16:13 ` [PATCH v13 02/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pmd_set() Andrew Donnellan
@ 2025-02-11 16:13 ` Andrew Donnellan
2025-02-11 16:13 ` [PATCH v13 04/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pud_clear() Andrew Donnellan
` (9 subsequent siblings)
12 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:13 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
From: Rohan McLure <rmclure@linux.ibm.com>
To provide support for powerpc platforms, provide an addr parameter to
the page_table_check_pte_set() routine. This parameter is needed on some
powerpc platforms which do not encode whether a mapping is for user or
kernel in the pte. On such platforms, this can be inferred form the
addr parameter.
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
[ajd: rebase]
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
arch/arm64/include/asm/pgtable.h | 2 +-
arch/riscv/include/asm/pgtable.h | 2 +-
include/linux/page_table_check.h | 12 +++++++-----
include/linux/pgtable.h | 2 +-
mm/page_table_check.c | 4 ++--
5 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0262d9e966f9..78f579812c0c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -424,7 +424,7 @@ static inline void __set_ptes(struct mm_struct *mm,
unsigned long __always_unused addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
- page_table_check_ptes_set(mm, ptep, pte, nr);
+ page_table_check_ptes_set(mm, addr, ptep, pte, nr);
__sync_cache_and_tags(pte, nr);
for (;;) {
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index bd4092b72258..9eb5f85dac82 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -562,7 +562,7 @@ static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval, unsigned int nr)
{
- page_table_check_ptes_set(mm, ptep, pteval, nr);
+ page_table_check_ptes_set(mm, addr, ptep, pteval, nr);
for (;;) {
__set_pte_at(mm, ptep, pteval);
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 5855d690c48a..9243c920ed02 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -17,8 +17,8 @@ void __page_table_check_zero(struct page *page, unsigned int order);
void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
-void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
- unsigned int nr);
+void __page_table_check_ptes_set(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr);
void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
@@ -68,12 +68,13 @@ static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
}
static inline void page_table_check_ptes_set(struct mm_struct *mm,
- pte_t *ptep, pte_t pte, unsigned int nr)
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_ptes_set(mm, ptep, pte, nr);
+ __page_table_check_ptes_set(mm, addr, ptep, pte, nr);
}
static inline void page_table_check_pmd_set(struct mm_struct *mm,
@@ -129,7 +130,8 @@ static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
}
static inline void page_table_check_ptes_set(struct mm_struct *mm,
- pte_t *ptep, pte_t pte, unsigned int nr)
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned int nr)
{
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 94d267d02372..2b25dd17ec67 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -285,7 +285,7 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
- page_table_check_ptes_set(mm, ptep, pte, nr);
+ page_table_check_ptes_set(mm, addr, ptep, pte, nr);
arch_enter_lazy_mmu_mode();
for (;;) {
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 099719d6f788..959e11e1af24 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -209,8 +209,8 @@ static inline void page_table_check_pte_flags(pte_t pte)
WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte)));
}
-void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
- unsigned int nr)
+void __page_table_check_ptes_set(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
{
unsigned int i;
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v13 04/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pud_clear()
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
` (2 preceding siblings ...)
2025-02-11 16:13 ` [PATCH v13 03/11] mm/page_table_check: Provide addr parameter to page_table_check_pte_set() Andrew Donnellan
@ 2025-02-11 16:13 ` Andrew Donnellan
2025-03-13 23:10 ` Andrew Morton
2025-02-11 16:13 ` [PATCH v13 05/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pmd_clear() Andrew Donnellan
` (8 subsequent siblings)
12 siblings, 1 reply; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:13 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
From: Rohan McLure <rmclure@linux.ibm.com>
This reverts commit 931c38e16499 ("mm/page_table_check: remove unused
parameter in [__]page_table_check_pud_clear").
Reinstate previously unused parameters for the purpose of supporting
powerpc platforms, as many do not encode user/kernel ownership of the
page in the pte, but instead in the address of the access.
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
arch/x86/include/asm/pgtable.h | 2 +-
include/linux/page_table_check.h | 11 +++++++----
include/linux/pgtable.h | 2 +-
mm/page_table_check.c | 5 +++--
4 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 366cdb9d75d9..5ec052ef07ff 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1398,7 +1398,7 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
{
pud_t pud = native_pudp_get_and_clear(pudp);
- page_table_check_pud_clear(mm, pud);
+ page_table_check_pud_clear(mm, addr, pud);
return pud;
}
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 9243c920ed02..d01a00ffc1f9 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -16,7 +16,8 @@ extern struct page_ext_operations page_table_check_ops;
void __page_table_check_zero(struct page *page, unsigned int order);
void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
-void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
+void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
+ pud_t pud);
void __page_table_check_ptes_set(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr);
void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
@@ -59,12 +60,13 @@ static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
__page_table_check_pmd_clear(mm, pmd);
}
-static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pud_clear(mm, pud);
+ __page_table_check_pud_clear(mm, addr, pud);
}
static inline void page_table_check_ptes_set(struct mm_struct *mm,
@@ -125,7 +127,8 @@ static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
{
}
-static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
{
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 2b25dd17ec67..e41cf0622aa4 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -648,7 +648,7 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
pud_t pud = *pudp;
pud_clear(pudp);
- page_table_check_pud_clear(mm, pud);
+ page_table_check_pud_clear(mm, address, pud);
return pud;
}
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 959e11e1af24..7c2f3f93e377 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -182,7 +182,8 @@ void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
}
EXPORT_SYMBOL(__page_table_check_pmd_clear);
-void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
+ pud_t pud)
{
if (&init_mm == mm)
return;
@@ -256,7 +257,7 @@ void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
if (&init_mm == mm)
return;
- __page_table_check_pud_clear(mm, *pudp);
+ __page_table_check_pud_clear(mm, addr, *pudp);
if (pud_user_accessible_page(pud)) {
page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT,
pud_write(pud));
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* Re: [PATCH v13 04/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pud_clear()
2025-02-11 16:13 ` [PATCH v13 04/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pud_clear() Andrew Donnellan
@ 2025-03-13 23:10 ` Andrew Morton
2025-04-11 5:52 ` Andrew Donnellan
0 siblings, 1 reply; 17+ messages in thread
From: Andrew Morton @ 2025-03-13 23:10 UTC (permalink / raw)
To: Andrew Donnellan
Cc: linuxppc-dev, x86, linux-mm, linux-riscv, linux-arm-kernel,
linux-kernel, pasha.tatashin, sweettea-kernel
On Wed, 12 Feb 2025 03:13:57 +1100 Andrew Donnellan <ajd@linux.ibm.com> wrote:
> This reverts commit 931c38e16499 ("mm/page_table_check: remove unused
> parameter in [__]page_table_check_pud_clear").
>
> Reinstate previously unused parameters for the purpose of supporting
> powerpc platforms, as many do not encode user/kernel ownership of the
> page in the pte, but instead in the address of the access.
My x86-64 allmodconfig exploded.
./arch/x86/include/asm/pgtable.h: In function 'pudp_establish':
./arch/x86/include/asm/pgtable.h:1443:46: error: passing argument 2 of 'page_table_check_pud_set' makes integer from pointer without a cast [-Werror=int-conversion]
1443 | page_table_check_pud_set(vma->vm_mm, pudp, pud);
| ^~~~
| |
| pud_t *
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline pud_t pudp_establish(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp, pud_t pud)
{
page_table_check_pud_set(vma->vm_mm, pudp, pud);
...
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v13 04/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pud_clear()
2025-03-13 23:10 ` Andrew Morton
@ 2025-04-11 5:52 ` Andrew Donnellan
0 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-04-11 5:52 UTC (permalink / raw)
To: Andrew Morton
Cc: linuxppc-dev, x86, linux-mm, linux-riscv, linux-arm-kernel,
linux-kernel, pasha.tatashin, sweettea-kernel
On Thu, 2025-03-13 at 16:10 -0700, Andrew Morton wrote:
> On Wed, 12 Feb 2025 03:13:57 +1100 Andrew Donnellan
> <ajd@linux.ibm.com> wrote:
>
> > This reverts commit 931c38e16499 ("mm/page_table_check: remove
> > unused
> > parameter in [__]page_table_check_pud_clear").
> >
> > Reinstate previously unused parameters for the purpose of
> > supporting
> > powerpc platforms, as many do not encode user/kernel ownership of
> > the
> > page in the pte, but instead in the address of the access.
>
> My x86-64 allmodconfig exploded.
>
> ./arch/x86/include/asm/pgtable.h: In function 'pudp_establish':
> ./arch/x86/include/asm/pgtable.h:1443:46: error: passing argument 2
> of 'page_table_check_pud_set' makes integer from pointer without a
> cast [-Werror=int-conversion]
> 1443 | page_table_check_pud_set(vma->vm_mm, pudp, pud);
> | ^~~~
> | |
> | pud_t *
>
>
>
> #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
> static inline pud_t pudp_establish(struct vm_area_struct *vma,
> unsigned long address, pud_t *pudp, pud_t pud)
> {
> page_table_check_pud_set(vma->vm_mm, pudp, pud);
> ...
>
Sorry, my email filters sent this to the wrong folder for some reason
and I didn't see this. I've sent v14:
https://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=452247
Thanks,
--
Andrew Donnellan OzLabs, ADL Canberra
ajd@linux.ibm.com IBM Australia Limited
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v13 05/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pmd_clear()
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
` (3 preceding siblings ...)
2025-02-11 16:13 ` [PATCH v13 04/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pud_clear() Andrew Donnellan
@ 2025-02-11 16:13 ` Andrew Donnellan
2025-02-11 16:13 ` [PATCH v13 06/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pte_clear() Andrew Donnellan
` (7 subsequent siblings)
12 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:13 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
From: Rohan McLure <rmclure@linux.ibm.com>
This reverts commit 1831414cd729 ("mm/page_table_check: remove unused
parameter in [__]page_table_check_pmd_clear").
Reinstate previously unused parameters for the purpose of supporting
powerpc platforms, as many do not encode user/kernel ownership of the
page in the pte, but instead in the address of the access.
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
arch/arm64/include/asm/pgtable.h | 2 +-
arch/riscv/include/asm/pgtable.h | 2 +-
arch/x86/include/asm/pgtable.h | 2 +-
include/linux/page_table_check.h | 11 +++++++----
include/linux/pgtable.h | 2 +-
mm/page_table_check.c | 5 +++--
6 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 78f579812c0c..58108086f962 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1324,7 +1324,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
{
pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
- page_table_check_pmd_clear(mm, pmd);
+ page_table_check_pmd_clear(mm, address, pmd);
return pmd;
}
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 9eb5f85dac82..8cb0d5e2ee47 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -826,7 +826,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
{
pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
- page_table_check_pmd_clear(mm, pmd);
+ page_table_check_pmd_clear(mm, address, pmd);
return pmd;
}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5ec052ef07ff..9027467bbde7 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1387,7 +1387,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long
{
pmd_t pmd = native_pmdp_get_and_clear(pmdp);
- page_table_check_pmd_clear(mm, pmd);
+ page_table_check_pmd_clear(mm, addr, pmd);
return pmd;
}
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index d01a00ffc1f9..0a6ebfa46a31 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -15,7 +15,8 @@ extern struct page_ext_operations page_table_check_ops;
void __page_table_check_zero(struct page *page, unsigned int order);
void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
-void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
+void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
+ pmd_t pmd);
void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
pud_t pud);
void __page_table_check_ptes_set(struct mm_struct *mm, unsigned long addr,
@@ -52,12 +53,13 @@ static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
__page_table_check_pte_clear(mm, pte);
}
-static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmd_clear(mm, pmd);
+ __page_table_check_pmd_clear(mm, addr, pmd);
}
static inline void page_table_check_pud_clear(struct mm_struct *mm,
@@ -123,7 +125,8 @@ static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
{
}
-static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
{
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index e41cf0622aa4..d34d0ec2d676 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -635,7 +635,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
pmd_t pmd = *pmdp;
pmd_clear(pmdp);
- page_table_check_pmd_clear(mm, pmd);
+ page_table_check_pmd_clear(mm, address, pmd);
return pmd;
}
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 7c2f3f93e377..12781847bec7 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -171,7 +171,8 @@ void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
}
EXPORT_SYMBOL(__page_table_check_pte_clear);
-void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
+ pmd_t pmd)
{
if (&init_mm == mm)
return;
@@ -243,7 +244,7 @@ void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
page_table_check_pmd_flags(pmd);
- __page_table_check_pmd_clear(mm, *pmdp);
+ __page_table_check_pmd_clear(mm, addr, *pmdp);
if (pmd_user_accessible_page(pmd)) {
page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
pmd_write(pmd));
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v13 06/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pte_clear()
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
` (4 preceding siblings ...)
2025-02-11 16:13 ` [PATCH v13 05/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pmd_clear() Andrew Donnellan
@ 2025-02-11 16:13 ` Andrew Donnellan
2025-02-11 16:14 ` [PATCH v13 07/11] mm: Provide address parameter to p{te,md,ud}_user_accessible_page() Andrew Donnellan
` (6 subsequent siblings)
12 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:13 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
From: Rohan McLure <rmclure@linux.ibm.com>
This reverts commit aa232204c468 ("mm/page_table_check: remove unused
parameter in [__]page_table_check_pte_clear").
Reinstate previously unused parameters for the purpose of supporting
powerpc platforms, as many do not encode user/kernel ownership of the
page in the pte, but instead in the address of the access.
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
[ajd: rebase and fix additional occurrence]
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
v13: fix an additional occurrence
---
arch/arm64/include/asm/pgtable.h | 2 +-
arch/riscv/include/asm/pgtable.h | 2 +-
arch/x86/include/asm/pgtable.h | 4 ++--
include/linux/page_table_check.h | 11 +++++++----
include/linux/pgtable.h | 4 ++--
mm/page_table_check.c | 7 ++++---
6 files changed, 17 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 58108086f962..f9effb5ddf1a 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1281,7 +1281,7 @@ static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
{
pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
- page_table_check_pte_clear(mm, pte);
+ page_table_check_pte_clear(mm, address, pte);
return pte;
}
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 8cb0d5e2ee47..c8d59feb4963 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -593,7 +593,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
{
pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
- page_table_check_pte_clear(mm, pte);
+ page_table_check_pte_clear(mm, address, pte);
return pte;
}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 9027467bbde7..dbb376400ac7 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1318,7 +1318,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t pte = native_ptep_get_and_clear(ptep);
- page_table_check_pte_clear(mm, pte);
+ page_table_check_pte_clear(mm, addr, pte);
return pte;
}
@@ -1334,7 +1334,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
* care about updates and native needs no locking
*/
pte = native_local_ptep_get_and_clear(ptep);
- page_table_check_pte_clear(mm, pte);
+ page_table_check_pte_clear(mm, addr, pte);
} else {
pte = ptep_get_and_clear(mm, addr, ptep);
}
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 0a6ebfa46a31..48721a4a2b84 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -14,7 +14,8 @@ extern struct static_key_true page_table_check_disabled;
extern struct page_ext_operations page_table_check_ops;
void __page_table_check_zero(struct page *page, unsigned int order);
-void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
+void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t pte);
void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
pmd_t pmd);
void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
@@ -45,12 +46,13 @@ static inline void page_table_check_free(struct page *page, unsigned int order)
__page_table_check_zero(page, order);
}
-static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pte_clear(mm, pte);
+ __page_table_check_pte_clear(mm, addr, pte);
}
static inline void page_table_check_pmd_clear(struct mm_struct *mm,
@@ -121,7 +123,8 @@ static inline void page_table_check_free(struct page *page, unsigned int order)
{
}
-static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
{
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index d34d0ec2d676..111c507c2c53 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -481,7 +481,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
{
pte_t pte = ptep_get(ptep);
pte_clear(mm, address, ptep);
- page_table_check_pte_clear(mm, pte);
+ page_table_check_pte_clear(mm, address, pte);
return pte;
}
#endif
@@ -540,7 +540,7 @@ static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
* No need for ptep_get_and_clear(): page table check doesn't care about
* any bits that could have been set by HW concurrently.
*/
- page_table_check_pte_clear(mm, pte);
+ page_table_check_pte_clear(mm, addr, pte);
}
#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 12781847bec7..4cb3e9ae57ff 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -160,7 +160,8 @@ void __page_table_check_zero(struct page *page, unsigned int order)
page_ext_put(page_ext);
}
-void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t pte)
{
if (&init_mm == mm)
return;
@@ -222,7 +223,7 @@ void __page_table_check_ptes_set(struct mm_struct *mm, unsigned long addr,
page_table_check_pte_flags(pte);
for (i = 0; i < nr; i++)
- __page_table_check_pte_clear(mm, ptep_get(ptep + i));
+ __page_table_check_pte_clear(mm, addr, ptep_get(ptep + i));
if (pte_user_accessible_page(pte))
page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
}
@@ -280,7 +281,7 @@ void __page_table_check_pte_clear_range(struct mm_struct *mm,
if (WARN_ON(!ptep))
return;
for (i = 0; i < PTRS_PER_PTE; i++) {
- __page_table_check_pte_clear(mm, ptep_get(ptep));
+ __page_table_check_pte_clear(mm, addr, ptep_get(ptep));
addr += PAGE_SIZE;
ptep++;
}
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v13 07/11] mm: Provide address parameter to p{te,md,ud}_user_accessible_page()
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
` (5 preceding siblings ...)
2025-02-11 16:13 ` [PATCH v13 06/11] mm/page_table_check: Reinstate address parameter in [__]page_table_check_pte_clear() Andrew Donnellan
@ 2025-02-11 16:14 ` Andrew Donnellan
2025-02-11 16:14 ` [PATCH v13 08/11] powerpc: mm: Add pud_pfn() stub Andrew Donnellan
` (5 subsequent siblings)
12 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:14 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
From: Rohan McLure <rmclure@linux.ibm.com>
On several powerpc platforms, a page table entry may not imply whether
the relevant mapping is for userspace or kernelspace. Instead, such
platforms infer this by the address which is being accessed.
Add an additional address argument to each of these routines in order to
provide support for page table check on powerpc.
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
[ajd: rebase on arm64 changes]
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
arch/arm64/include/asm/pgtable.h | 6 +++---
arch/riscv/include/asm/pgtable.h | 6 +++---
arch/x86/include/asm/pgtable.h | 6 +++---
mm/page_table_check.c | 12 ++++++------
4 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f9effb5ddf1a..2079ff937627 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1208,17 +1208,17 @@ static inline int pgd_devmap(pgd_t pgd)
#endif
#ifdef CONFIG_PAGE_TABLE_CHECK
-static inline bool pte_user_accessible_page(pte_t pte)
+static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
{
return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte));
}
-static inline bool pmd_user_accessible_page(pmd_t pmd)
+static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr)
{
return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
}
-static inline bool pud_user_accessible_page(pud_t pud)
+static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr)
{
return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud));
}
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index c8d59feb4963..a43813a37fa3 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -783,17 +783,17 @@ static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
}
#ifdef CONFIG_PAGE_TABLE_CHECK
-static inline bool pte_user_accessible_page(pte_t pte)
+static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
{
return pte_present(pte) && pte_user(pte);
}
-static inline bool pmd_user_accessible_page(pmd_t pmd)
+static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr)
{
return pmd_leaf(pmd) && pmd_user(pmd);
}
-static inline bool pud_user_accessible_page(pud_t pud)
+static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr)
{
return pud_leaf(pud) && pud_user(pud);
}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index dbb376400ac7..1624b7dc9b25 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1751,17 +1751,17 @@ static inline bool arch_has_hw_nonleaf_pmd_young(void)
#endif
#ifdef CONFIG_PAGE_TABLE_CHECK
-static inline bool pte_user_accessible_page(pte_t pte)
+static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
{
return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
}
-static inline bool pmd_user_accessible_page(pmd_t pmd)
+static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr)
{
return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) && (pmd_val(pmd) & _PAGE_USER);
}
-static inline bool pud_user_accessible_page(pud_t pud)
+static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr)
{
return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) && (pud_val(pud) & _PAGE_USER);
}
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 4cb3e9ae57ff..99066fe6a127 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -166,7 +166,7 @@ void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
if (&init_mm == mm)
return;
- if (pte_user_accessible_page(pte)) {
+ if (pte_user_accessible_page(pte, addr)) {
page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT);
}
}
@@ -178,7 +178,7 @@ void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
if (&init_mm == mm)
return;
- if (pmd_user_accessible_page(pmd)) {
+ if (pmd_user_accessible_page(pmd, addr)) {
page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
}
}
@@ -190,7 +190,7 @@ void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
if (&init_mm == mm)
return;
- if (pud_user_accessible_page(pud)) {
+ if (pud_user_accessible_page(pud, addr)) {
page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT);
}
}
@@ -224,7 +224,7 @@ void __page_table_check_ptes_set(struct mm_struct *mm, unsigned long addr,
for (i = 0; i < nr; i++)
__page_table_check_pte_clear(mm, addr, ptep_get(ptep + i));
- if (pte_user_accessible_page(pte))
+ if (pte_user_accessible_page(pte, addr))
page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
}
EXPORT_SYMBOL(__page_table_check_ptes_set);
@@ -246,7 +246,7 @@ void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
page_table_check_pmd_flags(pmd);
__page_table_check_pmd_clear(mm, addr, *pmdp);
- if (pmd_user_accessible_page(pmd)) {
+ if (pmd_user_accessible_page(pmd, addr)) {
page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
pmd_write(pmd));
}
@@ -260,7 +260,7 @@ void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
return;
__page_table_check_pud_clear(mm, addr, *pudp);
- if (pud_user_accessible_page(pud)) {
+ if (pud_user_accessible_page(pud, addr)) {
page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT,
pud_write(pud));
}
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v13 08/11] powerpc: mm: Add pud_pfn() stub
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
` (6 preceding siblings ...)
2025-02-11 16:14 ` [PATCH v13 07/11] mm: Provide address parameter to p{te,md,ud}_user_accessible_page() Andrew Donnellan
@ 2025-02-11 16:14 ` Andrew Donnellan
2025-02-11 16:14 ` [PATCH v13 09/11] powerpc: mm: Implement *_user_accessible_page() for ptes Andrew Donnellan
` (4 subsequent siblings)
12 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:14 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
From: Rohan McLure <rmclure@linux.ibm.com>
The page table check feature requires that pud_pfn() be defined
on each consuming architecture. Since only 64-bit, Book3S platforms
allow for hugepages at this upper level, and since the calling code is
gated by a call to pud_user_accessible_page(), which will return zero,
include this stub as a BUILD_BUG().
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
arch/powerpc/include/asm/pgtable.h | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 2f72ad885332..bb43e4f46367 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -215,6 +215,15 @@ static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
#endif /* CONFIG_PPC64 */
+#ifndef pud_pfn
+#define pud_pfn pud_pfn
+static inline int pud_pfn(pud_t pud)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v13 09/11] powerpc: mm: Implement *_user_accessible_page() for ptes
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
` (7 preceding siblings ...)
2025-02-11 16:14 ` [PATCH v13 08/11] powerpc: mm: Add pud_pfn() stub Andrew Donnellan
@ 2025-02-11 16:14 ` Andrew Donnellan
2025-02-11 16:14 ` [PATCH v13 10/11] powerpc: mm: Use set_pte_at_unchecked() for internal usages Andrew Donnellan
` (3 subsequent siblings)
12 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:14 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
From: Rohan McLure <rmclure@linux.ibm.com>
Page table checking depends on architectures providing an
implementation of p{te,md,ud}_user_accessible_page. With
refactorisations made on powerpc/mm, the pte_access_permitted() and
similar methods verify whether a userland page is accessible with the
required permissions.
Since page table checking is the only user of
p{te,md,ud}_user_accessible_page(), implement these for all platforms,
using some of the same preliminary checks taken by pte_access_permitted()
on that platform.
Since commit 8e9bd41e4ce1 ("powerpc/nohash: Replace pte_user() by pte_read()")
pte_user() is no longer required to be present on all platforms as it
may be equivalent to or implied by pte_read(). Hence implementations of
pte_user_accessible_page() are specialised.
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
[ajd: rebase and fix commit message]
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
arch/powerpc/include/asm/book3s/32/pgtable.h | 5 +++++
arch/powerpc/include/asm/book3s/64/pgtable.h | 17 +++++++++++++++++
arch/powerpc/include/asm/nohash/pgtable.h | 5 +++++
arch/powerpc/include/asm/pgtable.h | 8 ++++++++
4 files changed, 35 insertions(+)
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 42c3af90d1f0..a2305d850fc9 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -437,6 +437,11 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
return true;
}
+static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
+{
+ return pte_present(pte) && !is_kernel_addr(addr);
+}
+
/* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 6d98e6f08d4d..754d4d525f0e 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -540,6 +540,11 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
return arch_pte_access_permitted(pte_val(pte), write, 0);
}
+static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
+{
+ return pte_present(pte) && pte_user(pte);
+}
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -1431,5 +1436,17 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
return false;
}
+#define pmd_user_accessible_page pmd_user_accessible_page
+static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr)
+{
+ return pmd_leaf(pmd) && pte_user_accessible_page(pmd_pte(pmd), addr);
+}
+
+#define pud_user_accessible_page pud_user_accessible_page
+static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr)
+{
+ return pud_leaf(pud) && pte_user_accessible_page(pud_pte(pud), addr);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 8d1f0b7062eb..1c3dfe2d6cc1 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -243,6 +243,11 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
return true;
}
+static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
+{
+ return pte_present(pte) && !is_kernel_addr(addr);
+}
+
/* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index bb43e4f46367..3cae32c74fed 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -224,6 +224,14 @@ static inline int pud_pfn(pud_t pud)
}
#endif
+#ifndef pmd_user_accessible_page
+#define pmd_user_accessible_page(pmd, addr) false
+#endif
+
+#ifndef pud_user_accessible_page
+#define pud_user_accessible_page(pud, addr) false
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v13 10/11] powerpc: mm: Use set_pte_at_unchecked() for internal usages
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
` (8 preceding siblings ...)
2025-02-11 16:14 ` [PATCH v13 09/11] powerpc: mm: Implement *_user_accessible_page() for ptes Andrew Donnellan
@ 2025-02-11 16:14 ` Andrew Donnellan
2025-02-11 16:14 ` [PATCH v13 11/11] powerpc: mm: Support page table check Andrew Donnellan
` (2 subsequent siblings)
12 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:14 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
From: Rohan McLure <rmclure@linux.ibm.com>
In the new set_ptes() API, set_pte_at() (a special case of set_ptes())
is intended to be instrumented by the page table check facility. There
are however several other routines that constitute the API for setting
page table entries, including set_pmd_at() among others. Such routines
are themselves implemented in terms of set_ptes_at().
A future patch providing support for page table checking on powerpc
must take care to avoid duplicate calls to
page_table_check_p{te,md,ud}_set(). Allow for assignment of pte entries
without instrumentation through the set_pte_at_unchecked() routine
introduced in this patch.
Cause API-facing routines that call set_pte_at() to instead call
set_pte_at_unchecked(), which will remain uninstrumented by page
table check. set_ptes() is itself implemented by calls to
__set_pte_at(), so this eliminates redundant code.
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
[ajd: don't change to unchecked for early boot/kernel mappings]
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
v13: don't use the unchecked version for early-boot kernel mappings (Pasha)
---
arch/powerpc/include/asm/pgtable.h | 2 ++
arch/powerpc/mm/book3s64/pgtable.c | 6 +++---
arch/powerpc/mm/book3s64/radix_pgtable.c | 6 +++---
arch/powerpc/mm/pgtable.c | 8 ++++++++
4 files changed, 16 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 3cae32c74fed..221cae4873b9 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -46,6 +46,8 @@ struct mm_struct;
void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte, unsigned int nr);
#define set_ptes set_ptes
+void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index ce64abea9e3e..b4708b2cabba 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -129,7 +129,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(!(pmd_leaf(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
- return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
+ return set_pte_at_unchecked(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
void set_pud_at(struct mm_struct *mm, unsigned long addr,
@@ -146,7 +146,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(!(pud_leaf(pud)));
#endif
trace_hugepage_set_pud(addr, pud_val(pud));
- return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
+ return set_pte_at_unchecked(mm, addr, pudp_ptep(pudp), pud_pte(pud));
}
static void do_serialize(void *arg)
@@ -557,7 +557,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
if (radix_enabled())
return radix__ptep_modify_prot_commit(vma, addr,
ptep, old_pte, pte);
- set_pte_at(vma->vm_mm, addr, ptep, pte);
+ set_pte_at_unchecked(vma->vm_mm, addr, ptep, pte);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 311e2112d782..1704381f5c3c 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1586,7 +1586,7 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
(atomic_read(&mm->context.copros) > 0))
radix__flush_tlb_page(vma, addr);
- set_pte_at(mm, addr, ptep, pte);
+ set_pte_at_unchecked(mm, addr, ptep, pte);
}
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
@@ -1597,7 +1597,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
if (!radix_enabled())
return 0;
- set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
+ set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, new_pud);
return 1;
}
@@ -1644,7 +1644,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
if (!radix_enabled())
return 0;
- set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
+ set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, new_pmd);
return 1;
}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 61df5aed7989..4cc9af7961ca 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -224,6 +224,14 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
}
}
+void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
+ pte = set_pte_filter(pte, addr);
+ __set_pte_at(mm, addr, ptep, pte, 0);
+}
+
void unmap_kernel_page(unsigned long va)
{
pmd_t *pmdp = pmd_off_k(va);
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v13 11/11] powerpc: mm: Support page table check
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
` (9 preceding siblings ...)
2025-02-11 16:14 ` [PATCH v13 10/11] powerpc: mm: Use set_pte_at_unchecked() for internal usages Andrew Donnellan
@ 2025-02-11 16:14 ` Andrew Donnellan
2025-03-13 2:54 ` [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
2025-03-18 5:38 ` Madhavan Srinivasan
12 siblings, 0 replies; 17+ messages in thread
From: Andrew Donnellan @ 2025-02-11 16:14 UTC (permalink / raw)
To: linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel, Christophe Leroy
From: Rohan McLure <rmclure@linux.ibm.com>
On creation and clearing of a page table mapping, instrument such calls
by invoking page_table_check_pte_set and page_table_check_pte_clear
respectively. These calls serve as a sanity check against illegal
mappings.
Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all platforms.
See also:
riscv support in commit 3fee229a8eb9 ("riscv/mm: enable
ARCH_SUPPORTS_PAGE_TABLE_CHECK")
arm64 in commit 42b2547137f5 ("arm64/mm: enable
ARCH_SUPPORTS_PAGE_TABLE_CHECK")
x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
check")
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
[ajd: rebase]
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/book3s/32/pgtable.h | 7 ++-
arch/powerpc/include/asm/book3s/64/pgtable.h | 45 +++++++++++++++-----
arch/powerpc/include/asm/nohash/pgtable.h | 8 +++-
arch/powerpc/mm/book3s64/hash_pgtable.c | 4 ++
arch/powerpc/mm/book3s64/pgtable.c | 11 +++--
arch/powerpc/mm/book3s64/radix_pgtable.c | 3 ++
arch/powerpc/mm/pgtable.c | 4 ++
8 files changed, 68 insertions(+), 15 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 424f188e62d9..a990af9c17aa 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -171,6 +171,7 @@ config PPC
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx
+ select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_MEMTEST
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index a2305d850fc9..ad7febf75471 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -202,6 +202,7 @@ void unmap_kernel_page(unsigned long va);
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
+#include <linux/page_table_check.h>
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS 0
@@ -315,7 +316,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
+ pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
+
+ page_table_check_pte_clear(mm, addr, old_pte);
+
+ return old_pte;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 754d4d525f0e..da07d604c275 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -145,6 +145,8 @@
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
#ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
+
/*
* page table defines
*/
@@ -417,8 +419,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
- return __pte(old);
+ pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+
+ page_table_check_pte_clear(mm, addr, old_pte);
+
+ return old_pte;
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
@@ -427,11 +432,16 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
pte_t *ptep, int full)
{
if (full && radix_enabled()) {
+ pte_t old_pte;
+
/*
* We know that this is a full mm pte clear and
* hence can be sure there is no parallel set_pte.
*/
- return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+ old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+ page_table_check_pte_clear(mm, addr, old_pte);
+
+ return old_pte;
}
return ptep_get_and_clear(mm, addr, ptep);
}
@@ -1309,19 +1319,34 @@ extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
- if (radix_enabled())
- return radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
- return hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
+ pmd_t old_pmd;
+
+ if (radix_enabled()) {
+ old_pmd = radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
+ } else {
+ old_pmd = hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
+ }
+
+ page_table_check_pmd_clear(mm, addr, old_pmd);
+
+ return old_pmd;
}
#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pud_t *pudp)
{
- if (radix_enabled())
- return radix__pudp_huge_get_and_clear(mm, addr, pudp);
- BUG();
- return *pudp;
+ pud_t old_pud;
+
+ if (radix_enabled()) {
+ old_pud = radix__pudp_huge_get_and_clear(mm, addr, pudp);
+ } else {
+ BUG();
+ }
+
+ page_table_check_pud_clear(mm, addr, old_pud);
+
+ return old_pud;
}
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 1c3dfe2d6cc1..36c79f39f47d 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -29,6 +29,8 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
#ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
+
extern int icache_44x_need_flush;
#ifndef pte_huge_size
@@ -122,7 +124,11 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+ pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+
+ page_table_check_pte_clear(mm, addr, old_pte);
+
+ return old_pte;
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index 988948d69bc1..c1c25d46dd16 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
+#include <linux/page_table_check.h>
#include <linux/stop_machine.h>
#include <asm/sections.h>
@@ -231,6 +232,9 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
pmd = *pmdp;
pmd_clear(pmdp);
+
+ page_table_check_pmd_clear(vma->vm_mm, address, pmd);
+
/*
* Wait for all pending hash_page to finish. This is needed
* in case of subpage collapse. When we collapse normal pages
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index b4708b2cabba..3702d5bc7a5a 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -10,6 +10,7 @@
#include <linux/pkeys.h>
#include <linux/debugfs.h>
#include <linux/proc_fs.h>
+#include <linux/page_table_check.h>
#include <misc/cxl-base.h>
#include <asm/pgalloc.h>
@@ -129,6 +130,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(!(pmd_leaf(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
+ page_table_check_pmd_set(mm, addr, pmdp, pmd);
return set_pte_at_unchecked(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
@@ -146,6 +148,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(!(pud_leaf(pud)));
#endif
trace_hugepage_set_pud(addr, pud_val(pud));
+ page_table_check_pud_set(mm, addr, pudp, pud);
return set_pte_at_unchecked(mm, addr, pudp_ptep(pudp), pud_pte(pud));
}
@@ -181,12 +184,14 @@ void serialize_against_pte_lookup(struct mm_struct *mm)
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
- unsigned long old_pmd;
+ pmd_t old_pmd;
VM_WARN_ON_ONCE(!pmd_present(*pmdp));
- old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
+ old_pmd = __pmd(pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID));
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
- return __pmd(old_pmd);
+ page_table_check_pmd_clear(vma->vm_mm, address, old_pmd);
+
+ return old_pmd;
}
pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 1704381f5c3c..cc0dda11a640 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -14,6 +14,7 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/mm.h>
+#include <linux/page_table_check.h>
#include <linux/hugetlb.h>
#include <linux/string_helpers.h>
#include <linux/memory.h>
@@ -1454,6 +1455,8 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
pmd = *pmdp;
pmd_clear(pmdp);
+ page_table_check_pmd_clear(vma->vm_mm, address, pmd);
+
radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
return pmd;
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 4cc9af7961ca..ed46151ae1d9 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -22,6 +22,7 @@
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/page_table_check.h>
#include <linux/hugetlb.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
@@ -206,6 +207,9 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
* and not hw_valid ptes. Hence there is no translation cache flush
* involved that need to be batched.
*/
+
+ page_table_check_ptes_set(mm, addr, ptep, pte, nr);
+
for (;;) {
/*
--
2.48.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* Re: [PATCH v13 00/11] Support page table check on PowerPC
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
` (10 preceding siblings ...)
2025-02-11 16:14 ` [PATCH v13 11/11] powerpc: mm: Support page table check Andrew Donnellan
@ 2025-03-13 2:54 ` Andrew Donnellan
2025-03-13 22:47 ` Andrew Morton
2025-03-18 5:38 ` Madhavan Srinivasan
12 siblings, 1 reply; 17+ messages in thread
From: Andrew Donnellan @ 2025-03-13 2:54 UTC (permalink / raw)
To: linuxppc-dev, linux-mm, pasha.tatashin, akpm, maddy
Cc: x86, linux-riscv, linux-arm-kernel, linux-kernel, sweettea-kernel
On Wed, 2025-02-12 at 03:13 +1100, Andrew Donnellan wrote:
> Support page table check on all PowerPC platforms. This works by
> serialising assignments, reassignments and clears of page table
> entries at each level in order to ensure that anonymous mappings
> have at most one writable consumer, and likewise that file-backed
> mappings are not simultaneously also anonymous mappings.
>
> In order to support this infrastructure, a number of stubs must be
> defined for all powerpc platforms. Additionally, seperate
> set_pte_at()
> and set_pte_at_unchecked(), to allow for internal, uninstrumented
> mappings.
Talking to Maddy about this off-list - given that this series touches
on generic code and several architectures, would it be best to take it
through the mm tree rather than powerpc?
--
Andrew Donnellan OzLabs, ADL Canberra
ajd@linux.ibm.com IBM Australia Limited
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v13 00/11] Support page table check on PowerPC
2025-03-13 2:54 ` [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
@ 2025-03-13 22:47 ` Andrew Morton
0 siblings, 0 replies; 17+ messages in thread
From: Andrew Morton @ 2025-03-13 22:47 UTC (permalink / raw)
To: Andrew Donnellan
Cc: linuxppc-dev, linux-mm, pasha.tatashin, maddy, x86, linux-riscv,
linux-arm-kernel, linux-kernel, sweettea-kernel
On Thu, 13 Mar 2025 13:54:39 +1100 Andrew Donnellan <ajd@linux.ibm.com> wrote:
> On Wed, 2025-02-12 at 03:13 +1100, Andrew Donnellan wrote:
> > Support page table check on all PowerPC platforms. This works by
> > serialising assignments, reassignments and clears of page table
> > entries at each level in order to ensure that anonymous mappings
> > have at most one writable consumer, and likewise that file-backed
> > mappings are not simultaneously also anonymous mappings.
> >
> > In order to support this infrastructure, a number of stubs must be
> > defined for all powerpc platforms. Additionally, seperate
> > set_pte_at()
> > and set_pte_at_unchecked(), to allow for internal, uninstrumented
> > mappings.
>
> Talking to Maddy about this off-list - given that this series touches
> on generic code and several architectures, would it be best to take it
> through the mm tree rather than powerpc?
Sure, I can do that.
Are the rest of the ppc team OK with these changes? I'm not seeing
much acking from the usual suspects?
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v13 00/11] Support page table check on PowerPC
2025-02-11 16:13 [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
` (11 preceding siblings ...)
2025-03-13 2:54 ` [PATCH v13 00/11] Support page table check on PowerPC Andrew Donnellan
@ 2025-03-18 5:38 ` Madhavan Srinivasan
12 siblings, 0 replies; 17+ messages in thread
From: Madhavan Srinivasan @ 2025-03-18 5:38 UTC (permalink / raw)
To: Andrew Donnellan, linuxppc-dev
Cc: x86, linux-mm, linux-riscv, linux-arm-kernel, linux-kernel,
pasha.tatashin, sweettea-kernel
On 2/11/25 9:43 PM, Andrew Donnellan wrote:
> Support page table check on all PowerPC platforms. This works by
> serialising assignments, reassignments and clears of page table
> entries at each level in order to ensure that anonymous mappings
> have at most one writable consumer, and likewise that file-backed
> mappings are not simultaneously also anonymous mappings.
>
> In order to support this infrastructure, a number of stubs must be
> defined for all powerpc platforms. Additionally, seperate set_pte_at()
> and set_pte_at_unchecked(), to allow for internal, uninstrumented mappings.
>
> (This series was written by Rohan McLure, who has left IBM and is no longer
> working on powerpc - I've taken far too long to pick this up and finally
> send it.)
>
For powerpc changes
Acked-by: Madhavan Srinivasan <maddy@linux.ibm.com>
> v13:
> * Rebase on mainline
> * Don't use set_pte_at_unchecked() for early boot purposes (Pasha)
>
> v12:
> * Rename commits that revert changes to instead reflect that we are
> reinstating old behaviour due to it providing more flexibility
> * Add return line to pud_pfn() stub
> * Instrument ptep_get_and_clear() for nohash
> Link: https://lore.kernel.org/linuxppc-dev/20240402051154.476244-1-rmclure@linux.ibm.com/
>
> v11:
> * The pud_pfn() stub, which previously had no legitimate users on any
> powerpc platform, now has users in Book3s64 with transparent pages.
> Include a stub of the same name for each platform that does not
> define their own.
> * Drop patch that standardised use of p*d_leaf(), as already included
> upstream in v6.9.
> * Provide fallback definitions of p{m,u}d_user_accessible_page() that
> do not reference p*d_leaf(), p*d_pte(), as they are defined after
> powerpc/mm headers by linux/mm headers.
> * Ensure that set_pte_at_unchecked() has the same checks as
> set_pte_at().
> Link: https://lore.kernel.org/linuxppc-dev/20240328045535.194800-14-rmclure@linux.ibm.com/
>
> v10:
> * Revert patches that removed address and mm parameters from page table
> check routines, including consuming code from arm64, x86_64 and
> riscv.
> * Implement *_user_accessible_page() routines in terms of pte_user()
> where available (64-bit, book3s) but otherwise by checking the
> address (on platforms where the pte does not imply whether the
> mapping is for user or kernel)
> * Internal set_pte_at() calls replaced with set_pte_at_unchecked(), which
> is identical, but prevents double instrumentation.
> Link: https://lore.kernel.org/linuxppc-dev/20240313042118.230397-9-rmclure@linux.ibm.com/T/
>
> v9:
> * Adapt to using the set_ptes() API, using __set_pte_at() where we need
> must avoid instrumentation.
> * Use the logic of *_access_permitted() for implementing
> *_user_accessible_page(), which are required routines for page table
> check.
> * Even though we no longer need p{m,u,4}d_leaf(), still default
> implement these to assist in refactoring out extant
> p{m,u,4}_is_leaf().
> * Add p{m,u}_pte() stubs where asm-generic does not provide them, as
> page table check wants all *user_accessible_page() variants, and we
> would like to default implement the variants in terms of
> pte_user_accessible_page().
> * Avoid the ugly pmdp_collapse_flush() macro nonsense! Just instrument
> its constituent calls instead for radix and hash.
> Link: https://lore.kernel.org/linuxppc-dev/20231130025404.37179-2-rmclure@linux.ibm.com/
>
> v8:
> * Fix linux/page_table_check.h include in asm/pgtable.h breaking
> 32-bit.
> Link: https://lore.kernel.org/linuxppc-dev/20230215231153.2147454-1-rmclure@linux.ibm.com/
>
> v7:
> * Remove use of extern in set_pte prototypes
> * Clean up pmdp_collapse_flush macro
> * Replace set_pte_at with static inline function
> * Fix commit message for patch 7
> Link: https://lore.kernel.org/linuxppc-dev/20230215020155.1969194-1-rmclure@linux.ibm.com/
>
> v6:
> * Support huge pages and p{m,u}d accounting.
> * Remove instrumentation from set_pte from kernel internal pages.
> * 64s: Implement pmdp_collapse_flush in terms of __pmdp_collapse_flush
> as access to the mm_struct * is required.
> Link: https://lore.kernel.org/linuxppc-dev/20230214015939.1853438-1-rmclure@linux.ibm.com/
>
> v5:
> Link: https://lore.kernel.org/linuxppc-dev/20221118002146.25979-1-rmclure@linux.ibm.com/
>
> Rohan McLure (11):
> mm/page_table_check: Reinstate address parameter in
> [__]page_table_check_pud_set()
> mm/page_table_check: Reinstate address parameter in
> [__]page_table_check_pmd_set()
> mm/page_table_check: Provide addr parameter to
> page_table_check_pte_set()
> mm/page_table_check: Reinstate address parameter in
> [__]page_table_check_pud_clear()
> mm/page_table_check: Reinstate address parameter in
> [__]page_table_check_pmd_clear()
> mm/page_table_check: Reinstate address parameter in
> [__]page_table_check_pte_clear()
> mm: Provide address parameter to p{te,md,ud}_user_accessible_page()
> powerpc: mm: Add pud_pfn() stub
> powerpc: mm: Implement *_user_accessible_page() for ptes
> powerpc: mm: Use set_pte_at_unchecked() for internal usages
> powerpc: mm: Support page table check
>
> arch/arm64/include/asm/pgtable.h | 18 +++---
> arch/powerpc/Kconfig | 1 +
> arch/powerpc/include/asm/book3s/32/pgtable.h | 12 +++-
> arch/powerpc/include/asm/book3s/64/pgtable.h | 62 +++++++++++++++---
> arch/powerpc/include/asm/nohash/pgtable.h | 13 +++-
> arch/powerpc/include/asm/pgtable.h | 19 ++++++
> arch/powerpc/mm/book3s64/hash_pgtable.c | 4 ++
> arch/powerpc/mm/book3s64/pgtable.c | 17 +++--
> arch/powerpc/mm/book3s64/radix_pgtable.c | 9 ++-
> arch/powerpc/mm/pgtable.c | 12 ++++
> arch/riscv/include/asm/pgtable.h | 18 +++---
> arch/x86/include/asm/pgtable.h | 20 +++---
> include/linux/page_table_check.h | 67 ++++++++++++--------
> include/linux/pgtable.h | 10 +--
> mm/page_table_check.c | 39 +++++++-----
> 15 files changed, 225 insertions(+), 96 deletions(-)
>
^ permalink raw reply [flat|nested] 17+ messages in thread