* [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
@ 2022-09-21 2:02 Nicholas Miehlbradt
2022-09-21 2:02 ` [PATCH v2 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
` (3 more replies)
0 siblings, 4 replies; 7+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-21 2:02 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Nicholas Miehlbradt
There is support for DEBUG_PAGEALLOC on hash but not on radix.
Add support on radix.
Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
v2: Revert change to radix_memory_block_size, instead set the size
in radix_init_pgtable and radix__create_section_mapping directly.
---
arch/powerpc/mm/book3s64/radix_pgtable.c | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index db2f3d193448..623455c195d8 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -30,6 +30,7 @@
#include <asm/trace.h>
#include <asm/uaccess.h>
#include <asm/ultravisor.h>
+#include <asm/set_memory.h>
#include <trace/events/thp.h>
@@ -332,6 +333,10 @@ static void __init radix_init_pgtable(void)
unsigned long rts_field;
phys_addr_t start, end;
u64 i;
+ unsigned long size = radix_mem_block_size;
+
+ if (debug_pagealloc_enabled())
+ size = PAGE_SIZE;
/* We don't support slb for radix */
slb_set_size(0);
@@ -352,7 +357,7 @@ static void __init radix_init_pgtable(void)
}
WARN_ON(create_physical_mapping(start, end,
- radix_mem_block_size,
+ size,
-1, PAGE_KERNEL));
}
@@ -844,13 +849,18 @@ int __meminit radix__create_section_mapping(unsigned long start,
unsigned long end, int nid,
pgprot_t prot)
{
+ unsigned long size = radix_mem_block_size;
+
+ if (debug_pagealloc_enabled())
+ size = PAGE_SIZE;
+
if (end >= RADIX_VMALLOC_START) {
pr_warn("Outside the supported range\n");
return -1;
}
return create_physical_mapping(__pa(start), __pa(end),
- radix_mem_block_size, nid, prot);
+ size, nid, prot);
}
int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
@@ -899,7 +909,14 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
#ifdef CONFIG_DEBUG_PAGEALLOC
void radix__kernel_map_pages(struct page *page, int numpages, int enable)
{
- pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
+ unsigned long addr;
+
+ addr = (unsigned long)page_address(page);
+
+ if (enable)
+ set_memory_p(addr, numpages);
+ else
+ set_memory_np(addr, numpages);
}
#endif
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v2 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils
2022-09-21 2:02 [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
@ 2022-09-21 2:02 ` Nicholas Miehlbradt
2022-09-21 2:02 ` [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Nicholas Miehlbradt
` (2 subsequent siblings)
3 siblings, 0 replies; 7+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-21 2:02 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Nicholas Miehlbradt
From: Christophe Leroy <christophe.leroy@csgroup.eu>
debug_pagealloc_enabled() is always defined and constant folds to
'false' when CONFIG_DEBUG_PAGEALLOC is not enabled.
Remove the #ifdefs, the code and associated static variables will
be optimised out by the compiler when CONFIG_DEBUG_PAGEALLOC is
not defined.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
arch/powerpc/mm/book3s64/hash_utils.c | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index fc92613dc2bf..e63ff401a6ea 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -123,11 +123,8 @@ EXPORT_SYMBOL_GPL(mmu_slb_size);
#ifdef CONFIG_PPC_64K_PAGES
int mmu_ci_restrictions;
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count;
-static DEFINE_SPINLOCK(linear_map_hash_lock);
-#endif /* CONFIG_DEBUG_PAGEALLOC */
struct mmu_hash_ops mmu_hash_ops;
EXPORT_SYMBOL(mmu_hash_ops);
@@ -427,11 +424,9 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
break;
cond_resched();
-#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
-#endif /* CONFIG_DEBUG_PAGEALLOC */
}
return ret < 0 ? ret : 0;
}
@@ -1066,7 +1061,6 @@ static void __init htab_initialize(void)
prot = pgprot_val(PAGE_KERNEL);
-#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled()) {
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
linear_map_hash_slots = memblock_alloc_try_nid(
@@ -1076,7 +1070,6 @@ static void __init htab_initialize(void)
panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
__func__, linear_map_hash_count, &ppc64_rma_size);
}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
/* create bolted the linear mapping in the hash table */
for_each_mem_range(i, &base, &end) {
@@ -1991,6 +1984,8 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
}
#ifdef CONFIG_DEBUG_PAGEALLOC
+static DEFINE_SPINLOCK(linear_map_hash_lock);
+
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash;
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page()
2022-09-21 2:02 [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
2022-09-21 2:02 ` [PATCH v2 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
@ 2022-09-21 2:02 ` Nicholas Miehlbradt
2022-09-21 2:02 ` [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
2022-09-21 3:31 ` [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Michael Ellerman
3 siblings, 0 replies; 7+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-21 2:02 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Nicholas Miehlbradt
From: Christophe Leroy <christophe.leroy@csgroup.eu>
If the page is already mapped resp. already unmapped, bail out.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
arch/powerpc/mm/book3s64/hash_utils.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index e63ff401a6ea..b37412fe5930 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -2000,6 +2000,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
if (!vsid)
return;
+ if (linear_map_hash_slots[lmi] & 0x80)
+ return;
+
ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);
@@ -2019,7 +2022,10 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
spin_lock(&linear_map_hash_lock);
- BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
+ if (!(linear_map_hash_slots[lmi] & 0x80)) {
+ spin_unlock(&linear_map_hash_lock);
+ return;
+ }
hidx = linear_map_hash_slots[lmi] & 0x7f;
linear_map_hash_slots[lmi] = 0;
spin_unlock(&linear_map_hash_lock);
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64
2022-09-21 2:02 [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
2022-09-21 2:02 ` [PATCH v2 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
2022-09-21 2:02 ` [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Nicholas Miehlbradt
@ 2022-09-21 2:02 ` Nicholas Miehlbradt
2022-09-21 7:07 ` Christophe Leroy
2022-09-21 3:31 ` [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Michael Ellerman
3 siblings, 1 reply; 7+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-21 2:02 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Nicholas Miehlbradt
KFENCE support was added for ppc32 in commit 90cbac0e995d
("powerpc: Enable KFENCE for PPC32").
Enable KFENCE on ppc64 architecture with hash and radix MMUs.
It uses the same mechanism as debug pagealloc to
protect/unprotect pages. All KFENCE kunit tests pass on both
MMUs.
KFENCE memory is initially allocated using memblock but is
later marked as SLAB allocated. This necessitates the change
to __pud_free to ensure that the KFENCE pages are freed
appropriately.
Based on previous work by Christophe Leroy and Jordan Niethe.
Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
v2: Refactor
---
arch/powerpc/Kconfig | 2 +-
arch/powerpc/include/asm/book3s/64/pgalloc.h | 6 ++++--
arch/powerpc/include/asm/book3s/64/pgtable.h | 2 +-
arch/powerpc/include/asm/kfence.h | 15 +++++++++++++++
arch/powerpc/mm/book3s64/hash_utils.c | 10 +++++-----
arch/powerpc/mm/book3s64/radix_pgtable.c | 8 +++++---
6 files changed, 31 insertions(+), 12 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a4f8a5276e5c..f7dd0f49510d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -194,7 +194,7 @@ config PPC
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN if PPC_RADIX_MMU
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
- select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x
+ select HAVE_ARCH_KFENCE if ARCH_SUPPORTS_DEBUG_PAGEALLOC
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index e1af0b394ceb..dd2cff53a111 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud)
/*
* Early pud pages allocated via memblock allocator
- * can't be directly freed to slab
+ * can't be directly freed to slab. KFENCE pages have
+ * both reserved and slab flags set so need to be freed
+ * kmem_cache_free.
*/
- if (PageReserved(page))
+ if (PageReserved(page) && !PageSlab(page))
free_reserved_page(page);
else
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index cb9d5fd39d7f..fd5d800f2836 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1123,7 +1123,7 @@ static inline void vmemmap_remove_mapping(unsigned long start,
}
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (radix_enabled())
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
index a9846b68c6b9..cff60983e88d 100644
--- a/arch/powerpc/include/asm/kfence.h
+++ b/arch/powerpc/include/asm/kfence.h
@@ -11,11 +11,25 @@
#include <linux/mm.h>
#include <asm/pgtable.h>
+#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC64_ELF_ABI_V2)
+#define ARCH_FUNC_PREFIX "."
+#endif
+
static inline bool arch_kfence_init_pool(void)
{
return true;
}
+#ifdef CONFIG_PPC64
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ struct page *page = virt_to_page(addr);
+
+ __kernel_map_pages(page, 1, !protect);
+
+ return true;
+}
+#else
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *kpte = virt_to_kpte(addr);
@@ -29,5 +43,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true;
}
+#endif
#endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index b37412fe5930..9cceaa5998a3 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -424,7 +424,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
break;
cond_resched();
- if (debug_pagealloc_enabled() &&
+ if (debug_pagealloc_enabled_or_kfence() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
}
@@ -773,7 +773,7 @@ static void __init htab_init_page_sizes(void)
bool aligned = true;
init_hpte_page_sizes();
- if (!debug_pagealloc_enabled()) {
+ if (!debug_pagealloc_enabled_or_kfence()) {
/*
* Pick a size for the linear mapping. Currently, we only
* support 16M, 1M and 4K which is the default
@@ -1061,7 +1061,7 @@ static void __init htab_initialize(void)
prot = pgprot_val(PAGE_KERNEL);
- if (debug_pagealloc_enabled()) {
+ if (debug_pagealloc_enabled_or_kfence()) {
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
linear_map_hash_slots = memblock_alloc_try_nid(
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
@@ -1983,7 +1983,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
return slot;
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static DEFINE_SPINLOCK(linear_map_hash_lock);
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -2056,7 +2056,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
}
local_irq_restore(flags);
}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 623455c195d8..e88f9c45f34a 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -34,6 +34,8 @@
#include <trace/events/thp.h>
+#include <mm/mmu_decl.h>
+
unsigned int mmu_base_pid;
unsigned long radix_mem_block_size __ro_after_init;
@@ -335,7 +337,7 @@ static void __init radix_init_pgtable(void)
u64 i;
unsigned long size = radix_mem_block_size;
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_or_kfence())
size = PAGE_SIZE;
/* We don't support slb for radix */
@@ -851,7 +853,7 @@ int __meminit radix__create_section_mapping(unsigned long start,
{
unsigned long size = radix_mem_block_size;
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_or_kfence())
size = PAGE_SIZE;
if (end >= RADIX_VMALLOC_START) {
@@ -906,7 +908,7 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
#endif
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
void radix__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long addr;
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread* Re: [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64
2022-09-21 2:02 ` [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
@ 2022-09-21 7:07 ` Christophe Leroy
0 siblings, 0 replies; 7+ messages in thread
From: Christophe Leroy @ 2022-09-21 7:07 UTC (permalink / raw)
To: Nicholas Miehlbradt, linuxppc-dev@lists.ozlabs.org
Le 21/09/2022 à 04:02, Nicholas Miehlbradt a écrit :
> KFENCE support was added for ppc32 in commit 90cbac0e995d
> ("powerpc: Enable KFENCE for PPC32").
> Enable KFENCE on ppc64 architecture with hash and radix MMUs.
> It uses the same mechanism as debug pagealloc to
> protect/unprotect pages. All KFENCE kunit tests pass on both
> MMUs.
>
> KFENCE memory is initially allocated using memblock but is
> later marked as SLAB allocated. This necessitates the change
> to __pud_free to ensure that the KFENCE pages are freed
> appropriately.
>
> Based on previous work by Christophe Leroy and Jordan Niethe.
>
> Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
> ---
> v2: Refactor
> ---
> arch/powerpc/Kconfig | 2 +-
> arch/powerpc/include/asm/book3s/64/pgalloc.h | 6 ++++--
> arch/powerpc/include/asm/book3s/64/pgtable.h | 2 +-
> arch/powerpc/include/asm/kfence.h | 15 +++++++++++++++
> arch/powerpc/mm/book3s64/hash_utils.c | 10 +++++-----
> arch/powerpc/mm/book3s64/radix_pgtable.c | 8 +++++---
> 6 files changed, 31 insertions(+), 12 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
> index a9846b68c6b9..cff60983e88d 100644
> --- a/arch/powerpc/include/asm/kfence.h
> +++ b/arch/powerpc/include/asm/kfence.h
> @@ -11,11 +11,25 @@
> #include <linux/mm.h>
> #include <asm/pgtable.h>
>
> +#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC64_ELF_ABI_V2)
CONFIG_PPC64 && !CONFIG_PPC64_ELF_ABI_V2
is the same as
CONFIG_PPC64_ELF_ABI_V1
> +#define ARCH_FUNC_PREFIX "."
> +#endif
> +
Christophe
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
2022-09-21 2:02 [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
` (2 preceding siblings ...)
2022-09-21 2:02 ` [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
@ 2022-09-21 3:31 ` Michael Ellerman
3 siblings, 0 replies; 7+ messages in thread
From: Michael Ellerman @ 2022-09-21 3:31 UTC (permalink / raw)
To: Nicholas Miehlbradt, linuxppc-dev; +Cc: Nicholas Miehlbradt
Nicholas Miehlbradt <nicholas@linux.ibm.com> writes:
> There is support for DEBUG_PAGEALLOC on hash but not on radix.
> Add support on radix.
>
> Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
> ---
> v2: Revert change to radix_memory_block_size, instead set the size
> in radix_init_pgtable and radix__create_section_mapping directly.
> ---
> arch/powerpc/mm/book3s64/radix_pgtable.c | 23 ++++++++++++++++++++---
> 1 file changed, 20 insertions(+), 3 deletions(-)
>
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index db2f3d193448..623455c195d8 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -30,6 +30,7 @@
> #include <asm/trace.h>
> #include <asm/uaccess.h>
> #include <asm/ultravisor.h>
> +#include <asm/set_memory.h>
>
> #include <trace/events/thp.h>
>
> @@ -332,6 +333,10 @@ static void __init radix_init_pgtable(void)
> unsigned long rts_field;
> phys_addr_t start, end;
> u64 i;
> + unsigned long size = radix_mem_block_size;
> +
> + if (debug_pagealloc_enabled())
> + size = PAGE_SIZE;
>
> /* We don't support slb for radix */
> slb_set_size(0);
> @@ -352,7 +357,7 @@ static void __init radix_init_pgtable(void)
> }
>
> WARN_ON(create_physical_mapping(start, end,
> - radix_mem_block_size,
> + size,
> -1, PAGE_KERNEL));
> }
There's only two calls to create_physical_mapping().
Both pass the same value for size, and now both needed updating for
debug page alloc.
Seems like create_physical_mapping() doesn't need the size passed to it
at all, it may as well just use the right value.
> @@ -844,13 +849,18 @@ int __meminit radix__create_section_mapping(unsigned long start,
> unsigned long end, int nid,
> pgprot_t prot)
> {
> + unsigned long size = radix_mem_block_size;
> +
> + if (debug_pagealloc_enabled())
> + size = PAGE_SIZE;
> +
> if (end >= RADIX_VMALLOC_START) {
> pr_warn("Outside the supported range\n");
> return -1;
> }
>
> return create_physical_mapping(__pa(start), __pa(end),
> - radix_mem_block_size, nid, prot);
> + size, nid, prot);
> }
cheers
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v2 1/4] powerpc: Enable KFENCE for PPC32
@ 2021-03-04 14:35 Christophe Leroy
2021-03-04 14:35 ` [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Christophe Leroy
0 siblings, 1 reply; 7+ messages in thread
From: Christophe Leroy @ 2021-03-04 14:35 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
Alexander Potapenko, Marco Elver, Dmitry Vyukov
Cc: linuxppc-dev, linux-kernel, kasan-dev
Add architecture specific implementation details for KFENCE and enable
KFENCE for the ppc32 architecture. In particular, this implements the
required interface in <asm/kfence.h>.
KFENCE requires that attributes for pages from its memory pool can
individually be set. Therefore, force the Read/Write linear map to be
mapped at page granularity.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Acked-by: Marco Elver <elver@google.com>
---
v2: Added debug_pagealloc_enabled_or_kfence()
---
arch/powerpc/Kconfig | 13 ++++++------
arch/powerpc/include/asm/kfence.h | 33 +++++++++++++++++++++++++++++++
arch/powerpc/mm/book3s32/mmu.c | 2 +-
arch/powerpc/mm/fault.c | 7 ++++++-
arch/powerpc/mm/init_32.c | 3 +++
arch/powerpc/mm/mmu_decl.h | 5 +++++
arch/powerpc/mm/nohash/8xx.c | 4 ++--
7 files changed, 57 insertions(+), 10 deletions(-)
create mode 100644 arch/powerpc/include/asm/kfence.h
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 386ae12d8523..d46db0bfb998 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -185,6 +185,7 @@ config PPC
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KFENCE if PPC32
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_NVRAM_OPS
@@ -786,7 +787,7 @@ config THREAD_SHIFT
config DATA_SHIFT_BOOL
bool "Set custom data alignment"
depends on ADVANCED_OPTIONS
- depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC
+ depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX)
help
This option allows you to set the kernel data alignment. When
@@ -798,13 +799,13 @@ config DATA_SHIFT_BOOL
config DATA_SHIFT
int "Data shift" if DATA_SHIFT_BOOL
default 24 if STRICT_KERNEL_RWX && PPC64
- range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_BOOK3S_32
- range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_8xx
+ range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
+ range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
- default 18 if DEBUG_PAGEALLOC && PPC_BOOK3S_32
+ default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
default 23 if STRICT_KERNEL_RWX && PPC_8xx
- default 23 if DEBUG_PAGEALLOC && PPC_8xx && PIN_TLB_DATA
- default 19 if DEBUG_PAGEALLOC && PPC_8xx
+ default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
+ default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
default PPC_PAGE_SHIFT
help
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
new file mode 100644
index 000000000000..a9846b68c6b9
--- /dev/null
+++ b/arch/powerpc/include/asm/kfence.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * powerpc KFENCE support.
+ *
+ * Copyright (C) 2020 CS GROUP France
+ */
+
+#ifndef __ASM_POWERPC_KFENCE_H
+#define __ASM_POWERPC_KFENCE_H
+
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+static inline bool arch_kfence_init_pool(void)
+{
+ return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *kpte = virt_to_kpte(addr);
+
+ if (protect) {
+ pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ } else {
+ pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
+ }
+
+ return true;
+}
+
+#endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index d7eb266a3f7a..a0db398b5c26 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -162,7 +162,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
- if (debug_pagealloc_enabled() || __map_without_bats) {
+ if (debug_pagealloc_enabled_or_kfence() || __map_without_bats) {
pr_debug_once("Read-Write memory mapped without BATs\n");
if (base >= border)
return base;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index bb368257b55c..bea13682c909 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -32,6 +32,7 @@
#include <linux/context_tracking.h>
#include <linux/hugetlb.h>
#include <linux/uaccess.h>
+#include <linux/kfence.h>
#include <asm/firmware.h>
#include <asm/interrupt.h>
@@ -418,8 +419,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
* take a page fault to a kernel address or a page fault to a user
* address outside of dedicated places
*/
- if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write)))
+ if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
+ if (kfence_handle_page_fault(address, is_write, regs))
+ return 0;
+
return SIGSEGV;
+ }
/*
* If we're in an interrupt, have no user context or are running
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 02c7db4087cb..3d690be48e84 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -97,6 +97,9 @@ static void __init MMU_setup(void)
if (IS_ENABLED(CONFIG_PPC_8xx))
return;
+ if (IS_ENABLED(CONFIG_KFENCE))
+ __map_without_ltlbs = 1;
+
if (debug_pagealloc_enabled())
__map_without_ltlbs = 1;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 998810e68562..7dac910c0b21 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -185,3 +185,8 @@ void ptdump_check_wx(void);
#else
static inline void ptdump_check_wx(void) { }
#endif
+
+static inline bool debug_pagealloc_enabled_or_kfence(void)
+{
+ return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled();
+}
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 19a3eec1d8c5..71bfdbedacee 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -149,7 +149,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
unsigned long sinittext = __pa(_sinittext);
- bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled();
+ bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
unsigned long boundary = strict_boundary ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
@@ -161,7 +161,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return 0;
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
- if (debug_pagealloc_enabled()) {
+ if (debug_pagealloc_enabled_or_kfence()) {
top = boundary;
} else {
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
--
2.25.0
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page()
2021-03-04 14:35 [PATCH v2 1/4] powerpc: Enable KFENCE for PPC32 Christophe Leroy
@ 2021-03-04 14:35 ` Christophe Leroy
0 siblings, 0 replies; 7+ messages in thread
From: Christophe Leroy @ 2021-03-04 14:35 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
Alexander Potapenko, Marco Elver, Dmitry Vyukov
Cc: linuxppc-dev, linux-kernel, kasan-dev
If the page is already mapped resp. already unmapped, bail out.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
v2: New
---
arch/powerpc/mm/book3s64/hash_utils.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index f1b5a5f1d3a9..cb09a49be798 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1944,6 +1944,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
if (!vsid)
return;
+ if (linear_map_hash_slots[lmi] & 0x80)
+ return;
+
ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);
@@ -1963,7 +1966,10 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
spin_lock(&linear_map_hash_lock);
- BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
+ if (!(linear_map_hash_slots[lmi] & 0x80)) {
+ spin_unlock(&linear_map_hash_lock);
+ return;
+ }
hidx = linear_map_hash_slots[lmi] & 0x7f;
linear_map_hash_slots[lmi] = 0;
spin_unlock(&linear_map_hash_lock);
--
2.25.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
end of thread, other threads:[~2022-09-21 7:15 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-09-21 2:02 [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
2022-09-21 2:02 ` [PATCH v2 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
2022-09-21 2:02 ` [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Nicholas Miehlbradt
2022-09-21 2:02 ` [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
2022-09-21 7:07 ` Christophe Leroy
2022-09-21 3:31 ` [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Michael Ellerman
-- strict thread matches above, loose matches on Subject: below --
2021-03-04 14:35 [PATCH v2 1/4] powerpc: Enable KFENCE for PPC32 Christophe Leroy
2021-03-04 14:35 ` [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Christophe Leroy
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).