* [PATCH 1/2] x86/mm: Kill a 32-bit #ifdef for shared PMD handling
2025-04-18 15:56 [PATCH 0/2] Minor fixups for PAE simplification Dave Hansen
@ 2025-04-18 15:56 ` Dave Hansen
2025-04-18 18:18 ` Edgecombe, Rick P
2025-04-18 15:56 ` [PATCH 2/2] x86/mm: Move duplicated 32-bit page table sync mask to common code Dave Hansen
1 sibling, 1 reply; 6+ messages in thread
From: Dave Hansen @ 2025-04-18 15:56 UTC (permalink / raw)
To: linux-kernel
Cc: x86, tglx, bp, joro, luto, peterz, kirill.shutemov,
rick.p.edgecombe, jgross, Dave Hansen
From: Dave Hansen <dave.hansen@linux.intel.com>
This block of code used to be:
if (SHARED_KERNEL_PMD)
But it was zapped when 32-bit kernels transitioned to private
(non-shared) PMDs. It also made it rather unclear what the block
of code is doing in the first place.
Remove the #ifdef and replace it with IS_ENABLED(). Unindent the
code block and add an actually useful comment about what it is
doing.
Suggested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---
b/arch/x86/mm/pat/set_memory.c | 41 +++++++++++++++++++++--------------------
1 file changed, 21 insertions(+), 20 deletions(-)
diff -puN arch/x86/mm/pat/set_memory.c~kill-CONFIG_X86_32-ifdef arch/x86/mm/pat/set_memory.c
--- a/arch/x86/mm/pat/set_memory.c~kill-CONFIG_X86_32-ifdef 2025-04-18 08:37:32.149932662 -0700
+++ b/arch/x86/mm/pat/set_memory.c 2025-04-18 08:37:32.152932772 -0700
@@ -881,31 +881,32 @@ phys_addr_t slow_virt_to_phys(void *__vi
}
EXPORT_SYMBOL_GPL(slow_virt_to_phys);
-/*
- * Set the new pmd in all the pgds we know about:
- */
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
{
+ struct page *page;
+
/* change init_mm */
set_pte_atomic(kpte, pte);
-#ifdef CONFIG_X86_32
- {
- struct page *page;
-
- list_for_each_entry(page, &pgd_list, lru) {
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
-
- pgd = (pgd_t *)page_address(page) + pgd_index(address);
- p4d = p4d_offset(pgd, address);
- pud = pud_offset(p4d, address);
- pmd = pmd_offset(pud, address);
- set_pte_atomic((pte_t *)pmd, pte);
- }
+
+ if (IS_ENABLED(CONFIG_X86_64))
+ return;
+
+ /*
+ * 32-bit mm_structs don't share kernel PMD pages.
+ * Propagate the change to each relevant PMD entry:
+ */
+ list_for_each_entry(page, &pgd_list, lru) {
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
+ p4d = p4d_offset(pgd, address);
+ pud = pud_offset(p4d, address);
+ pmd = pmd_offset(pud, address);
+ set_pte_atomic((pte_t *)pmd, pte);
}
-#endif
}
static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
_
^ permalink raw reply [flat|nested] 6+ messages in thread* [PATCH 2/2] x86/mm: Move duplicated 32-bit page table sync mask to common code
2025-04-18 15:56 [PATCH 0/2] Minor fixups for PAE simplification Dave Hansen
2025-04-18 15:56 ` [PATCH 1/2] x86/mm: Kill a 32-bit #ifdef for shared PMD handling Dave Hansen
@ 2025-04-18 15:56 ` Dave Hansen
1 sibling, 0 replies; 6+ messages in thread
From: Dave Hansen @ 2025-04-18 15:56 UTC (permalink / raw)
To: linux-kernel
Cc: x86, tglx, bp, joro, luto, peterz, kirill.shutemov,
rick.p.edgecombe, jgross, Dave Hansen
From: Dave Hansen <dave.hansen@linux.intel.com>
The vmalloc() code needs to know when to propagate changes to across
mms because the rules differ based on architecture and kernel config.
It does this with a bitmap: ARCH_PAGE_TABLE_SYNC_MASK which tells
vmalloc when to call back in to arch-specific code to propagate
page table changes.
The 32-bit x86 rules are less exciting now and are unified across
PAE (3-level) and 2-level paging so the two for their constants
define the same value.
Move the common value to the common pgtable_32_types.h.
Suggested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---
b/arch/x86/include/asm/pgtable-2level_types.h | 2 --
b/arch/x86/include/asm/pgtable-3level_types.h | 2 --
b/arch/x86/include/asm/pgtable_32_types.h | 2 ++
3 files changed, 2 insertions(+), 4 deletions(-)
diff -puN arch/x86/include/asm/pgtable-2level_types.h~move-ARCH_PAGE_TABLE_SYNC_MASK arch/x86/include/asm/pgtable-2level_types.h
--- a/arch/x86/include/asm/pgtable-2level_types.h~move-ARCH_PAGE_TABLE_SYNC_MASK 2025-04-18 08:37:32.631950373 -0700
+++ b/arch/x86/include/asm/pgtable-2level_types.h 2025-04-18 08:37:32.640950703 -0700
@@ -18,8 +18,6 @@ typedef union {
} pte_t;
#endif /* !__ASSEMBLER__ */
-#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
-
/*
* Traditional i386 two-level paging structure:
*/
diff -puN arch/x86/include/asm/pgtable_32_types.h~move-ARCH_PAGE_TABLE_SYNC_MASK arch/x86/include/asm/pgtable_32_types.h
--- a/arch/x86/include/asm/pgtable_32_types.h~move-ARCH_PAGE_TABLE_SYNC_MASK 2025-04-18 08:37:32.635950520 -0700
+++ b/arch/x86/include/asm/pgtable_32_types.h 2025-04-18 08:37:32.640950703 -0700
@@ -20,4 +20,6 @@
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
+
#endif /* _ASM_X86_PGTABLE_32_TYPES_H */
diff -puN arch/x86/include/asm/pgtable-3level_types.h~move-ARCH_PAGE_TABLE_SYNC_MASK arch/x86/include/asm/pgtable-3level_types.h
--- a/arch/x86/include/asm/pgtable-3level_types.h~move-ARCH_PAGE_TABLE_SYNC_MASK 2025-04-18 08:37:32.637950593 -0700
+++ b/arch/x86/include/asm/pgtable-3level_types.h 2025-04-18 08:37:32.640950703 -0700
@@ -27,8 +27,6 @@ typedef union {
} pmd_t;
#endif /* !__ASSEMBLER__ */
-#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
-
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
_
^ permalink raw reply [flat|nested] 6+ messages in thread