* [PATCH v1] arm: Support for the PXN CPU feature on ARMv7
@ 2014-11-22 10:42 Jungseung Lee
2014-11-24 15:06 ` Catalin Marinas
0 siblings, 1 reply; 5+ messages in thread
From: Jungseung Lee @ 2014-11-22 10:42 UTC (permalink / raw)
To: linux-arm-kernel
Modern ARMv7-A/R cores optionally implement below new
hardware feature:
- PXN:
Privileged execute-never(PXN) is a security feature. PXN bit
determines whether the processor can execute software from
the region. This is effective solution against ret2usr attack.
On an implementation that does not include the LPAE, PXN is
optionally supported.
This patch set PXN bit on user page table for preventing
user code execution with privilege mode.
Signed-off-by: Jungseung Lee <js07.lee@gmail.com>
---
Changes in v1:
Define cpu_has_classic_pxn().
Set PXN bit at the PTE directly under LPAE.
arch/arm/include/asm/pgalloc.h | 20 +++++++++++++++++++-
arch/arm/include/asm/pgtable-2level-hwdef.h | 2 ++
arch/arm/include/asm/pgtable-3level-hwdef.h | 1 +
arch/arm/include/asm/pgtable.h | 3 +++
4 files changed, 25 insertions(+), 1 deletion(-)
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 78a7793..e63c633 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -17,6 +17,7 @@
#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+#include <asm/cputype.h>
#define check_pgt_cache() do { } while (0)
@@ -25,6 +26,19 @@
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
+static inline bool cpu_has_classic_pxn(void)
+{
+#ifdef CONFIG_CPU_V7
+ unsigned int vmsa;
+
+ /* LPAE implies atomic ldrd/strd instructions */
+ vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
+ if (vmsa == 4)
+ return 1;
+#endif
+ return 0;
+}
+
#ifdef CONFIG_ARM_LPAE
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -157,7 +171,11 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
{
- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
+ if (cpu_has_classic_pxn())
+ __pmd_populate(pmdp, page_to_phys(ptep),
+ _PAGE_USER_TABLE | PMD_PXNTABLE);
+ else
+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5cfba15..5e68278 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -20,12 +20,14 @@
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/*
* - section
*/
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 9fd61c7..f8f1cff 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -76,6 +76,7 @@
#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
/*
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 3b30062..04db03c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -247,6 +247,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
if (!pte_special(pteval))
__sync_icache_dcache(pteval);
ext |= PTE_EXT_NG;
+#ifdef CONFIG_ARM_LPAE
+ ext |= PTE_EXT_PXN;
+#endif
}
set_pte_ext(ptep, pteval, ext);
--
1.9.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH v1] arm: Support for the PXN CPU feature on ARMv7
2014-11-22 10:42 [PATCH v1] arm: Support for the PXN CPU feature on ARMv7 Jungseung Lee
@ 2014-11-24 15:06 ` Catalin Marinas
2014-11-24 15:26 ` Arnd Bergmann
2014-11-25 16:21 ` Jungseung Lee
0 siblings, 2 replies; 5+ messages in thread
From: Catalin Marinas @ 2014-11-24 15:06 UTC (permalink / raw)
To: linux-arm-kernel
On Sat, Nov 22, 2014 at 10:42:36AM +0000, Jungseung Lee wrote:
> diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
> index 78a7793..e63c633 100644
> --- a/arch/arm/include/asm/pgalloc.h
> +++ b/arch/arm/include/asm/pgalloc.h
> @@ -17,6 +17,7 @@
> #include <asm/processor.h>
> #include <asm/cacheflush.h>
> #include <asm/tlbflush.h>
> +#include <asm/cputype.h>
>
> #define check_pgt_cache() do { } while (0)
>
> @@ -25,6 +26,19 @@
> #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
> #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
>
> +static inline bool cpu_has_classic_pxn(void)
> +{
> +#ifdef CONFIG_CPU_V7
I don't think shouldn't trigger this when LPAE is enabled.
> + unsigned int vmsa;
> +
> + /* LPAE implies atomic ldrd/strd instructions */
How is this comment relevant? Copy/paste?
> + vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
> + if (vmsa == 4)
> + return 1;
> +#endif
> + return 0;
> +}
In general we put the #ifdef outside the function:
#if __LINUX_ARM_ARCH__ >= 7 && !defined(CONFIG_ARM_LPAE)
static inline bool cpu_has_classic_pxn(void)
{
unsigned int vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
return vmsa == 4;
}
#else
static inline bool cpu_has_classic_pxn(void)
{
return false;
}
#endif
I used __LINUX_ARM_ARCH__ instead of CONFIG_CPU_V7 to cope with kernel
images built for both v6 and v7. You could also check for
cpu_architecture() >= 7, though with a bit of performance impact.
> static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
> @@ -157,7 +171,11 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
> static inline void
> pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
> {
> - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
> + if (cpu_has_classic_pxn())
> + __pmd_populate(pmdp, page_to_phys(ptep),
> + _PAGE_USER_TABLE | PMD_PXNTABLE);
> + else
> + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
> }
Nitpick (personal preference):
pmdval_t pmdval = _PAGE_USER_TABLE;
if (cpu_has_classic_pxn())
pmdval |= PMD_PXNTABLE;
__pmd_populate(pmdp, page_to_phys(ptep), pmdval);
> #define pmd_pgtable(pmd) pmd_page(pmd)
>
> diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
> index 5cfba15..5e68278 100644
> --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
> +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
> @@ -20,12 +20,14 @@
> #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
> #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
> #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
> +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
> #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
> #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
> #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
> /*
> * - section
> */
> +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
> #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
> #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
> #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
> diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
> index 9fd61c7..f8f1cff 100644
> --- a/arch/arm/include/asm/pgtable-3level-hwdef.h
> +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
> @@ -76,6 +76,7 @@
> #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
> #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
> #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
> +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
> #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
>
> /*
> diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
> index 3b30062..04db03c 100644
> --- a/arch/arm/include/asm/pgtable.h
> +++ b/arch/arm/include/asm/pgtable.h
> @@ -247,6 +247,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
> if (!pte_special(pteval))
> __sync_icache_dcache(pteval);
> ext |= PTE_EXT_NG;
> +#ifdef CONFIG_ARM_LPAE
> + ext |= PTE_EXT_PXN;
> +#endif
I think you can avoid touching set_pte_at() and instead:
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f98cec7fe1e..5b0a0472e689 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -605,6 +605,11 @@ static void __init build_mem_type_table(void)
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;
+
+ /*
+ * Set PXN for user mappings
+ */
+ user_pgprot |= PTE_EXT_PXN;
#endif
for (i = 0; i < 16; i++) {
--
Catalin
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH v1] arm: Support for the PXN CPU feature on ARMv7
2014-11-24 15:06 ` Catalin Marinas
@ 2014-11-24 15:26 ` Arnd Bergmann
2014-11-24 15:57 ` Catalin Marinas
2014-11-25 16:21 ` Jungseung Lee
1 sibling, 1 reply; 5+ messages in thread
From: Arnd Bergmann @ 2014-11-24 15:26 UTC (permalink / raw)
To: linux-arm-kernel
On Monday 24 November 2014 15:06:53 Catalin Marinas wrote:
> On Sat, Nov 22, 2014 at 10:42:36AM +0000, Jungseung Lee wrote:
> > diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
> In general we put the #ifdef outside the function:
>
> #if __LINUX_ARM_ARCH__ >= 7 && !defined(CONFIG_ARM_LPAE)
> static inline bool cpu_has_classic_pxn(void)
> {
> unsigned int vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
> return vmsa == 4;
> }
> #else
> static inline bool cpu_has_classic_pxn(void)
> {
> return false;
> }
> #endif
>
> I used __LINUX_ARM_ARCH__ instead of CONFIG_CPU_V7 to cope with kernel
> images built for both v6 and v7. You could also check for
> cpu_architecture() >= 7, though with a bit of performance impact.
Regarding the style, I think the best way to express it is
if (__LINUX_ARM_ARCH__ >= 7 && !IS_ENABLED(CONFIG_ARM_LPAE) && (vmsa == 4))
return true;
return false;
This way you can avoid the #ifdef and still get the compile-time
optimization.
Checking for (__LINUX_ARM_ARCH__ >= 6 && cpu_architecture >= 7) would
also make it work for a combined armv6/v7 kernel.
Arnd
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v1] arm: Support for the PXN CPU feature on ARMv7
2014-11-24 15:26 ` Arnd Bergmann
@ 2014-11-24 15:57 ` Catalin Marinas
0 siblings, 0 replies; 5+ messages in thread
From: Catalin Marinas @ 2014-11-24 15:57 UTC (permalink / raw)
To: linux-arm-kernel
On Mon, Nov 24, 2014 at 03:26:10PM +0000, Arnd Bergmann wrote:
> On Monday 24 November 2014 15:06:53 Catalin Marinas wrote:
> > On Sat, Nov 22, 2014 at 10:42:36AM +0000, Jungseung Lee wrote:
> > > diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
>
> > In general we put the #ifdef outside the function:
> >
> > #if __LINUX_ARM_ARCH__ >= 7 && !defined(CONFIG_ARM_LPAE)
> > static inline bool cpu_has_classic_pxn(void)
> > {
> > unsigned int vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
> > return vmsa == 4;
> > }
> > #else
> > static inline bool cpu_has_classic_pxn(void)
> > {
> > return false;
> > }
> > #endif
> >
> > I used __LINUX_ARM_ARCH__ instead of CONFIG_CPU_V7 to cope with kernel
> > images built for both v6 and v7. You could also check for
> > cpu_architecture() >= 7, though with a bit of performance impact.
>
> Regarding the style, I think the best way to express it is
>
> if (__LINUX_ARM_ARCH__ >= 7 && !IS_ENABLED(CONFIG_ARM_LPAE) && (vmsa == 4))
> return true;
> return false;
We want to avoid read_cpuid_ext() if pre-v7. But something along these
lines would work.
> This way you can avoid the #ifdef and still get the compile-time
> optimization.
>
> Checking for (__LINUX_ARM_ARCH__ >= 6 && cpu_architecture >= 7) would
> also make it work for a combined armv6/v7 kernel.
Yes.
--
Catalin
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v1] arm: Support for the PXN CPU feature on ARMv7
2014-11-24 15:06 ` Catalin Marinas
2014-11-24 15:26 ` Arnd Bergmann
@ 2014-11-25 16:21 ` Jungseung Lee
1 sibling, 0 replies; 5+ messages in thread
From: Jungseung Lee @ 2014-11-25 16:21 UTC (permalink / raw)
To: linux-arm-kernel
Hello catalin,
2014-11-25 0:06 GMT+09:00 Catalin Marinas <catalin.marinas@arm.com>:
> On Sat, Nov 22, 2014 at 10:42:36AM +0000, Jungseung Lee wrote:
>> diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
>> index 78a7793..e63c633 100644
>> --- a/arch/arm/include/asm/pgalloc.h
>> +++ b/arch/arm/include/asm/pgalloc.h
>> @@ -17,6 +17,7 @@
>> #include <asm/processor.h>
>> #include <asm/cacheflush.h>
>> #include <asm/tlbflush.h>
>> +#include <asm/cputype.h>
>>
>> #define check_pgt_cache() do { } while (0)
>>
>> @@ -25,6 +26,19 @@
>> #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
>> #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
>>
>> +static inline bool cpu_has_classic_pxn(void)
>> +{
>> +#ifdef CONFIG_CPU_V7
>
> I don't think shouldn't trigger this when LPAE is enabled.
>
>> + unsigned int vmsa;
>> +
>> + /* LPAE implies atomic ldrd/strd instructions */
>
> How is this comment relevant? Copy/paste?
>
I am ashamed of the code .. , I will fix that.
>> + vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
>> + if (vmsa == 4)
>> + return 1;
>> +#endif
>> + return 0;
>> +}
>
> In general we put the #ifdef outside the function:
>
> #if __LINUX_ARM_ARCH__ >= 7 && !defined(CONFIG_ARM_LPAE)
> static inline bool cpu_has_classic_pxn(void)
> {
> unsigned int vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
> return vmsa == 4;
> }
> #else
> static inline bool cpu_has_classic_pxn(void)
> {
> return false;
> }
> #endif
>
Is it acceptable to use static local variable for storing vmsa?
I think It would be better to check vmsa just one time.
> I used __LINUX_ARM_ARCH__ instead of CONFIG_CPU_V7 to cope with kernel
> images built for both v6 and v7. You could also check for
> cpu_architecture() >= 7, though with a bit of performance impact.
>
>> static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
>> @@ -157,7 +171,11 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
>> static inline void
>> pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
>> {
>> - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
>> + if (cpu_has_classic_pxn())
>> + __pmd_populate(pmdp, page_to_phys(ptep),
>> + _PAGE_USER_TABLE | PMD_PXNTABLE);
>> + else
>> + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
>> }
>
> Nitpick (personal preference):
>
> pmdval_t pmdval = _PAGE_USER_TABLE;
> if (cpu_has_classic_pxn())
> pmdval |= PMD_PXNTABLE;
> __pmd_populate(pmdp, page_to_phys(ptep), pmdval);
>
>> #define pmd_pgtable(pmd) pmd_page(pmd)
>>
>> diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
>> index 5cfba15..5e68278 100644
>> --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
>> +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
>> @@ -20,12 +20,14 @@
>> #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
>> #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
>> #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
>> +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
>> #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
>> #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
>> #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
>> /*
>> * - section
>> */
>> +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
>> #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
>> #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
>> #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
>> diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
>> index 9fd61c7..f8f1cff 100644
>> --- a/arch/arm/include/asm/pgtable-3level-hwdef.h
>> +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
>> @@ -76,6 +76,7 @@
>> #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
>> #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
>> #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
>> +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
>> #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
>>
>> /*
>> diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
>> index 3b30062..04db03c 100644
>> --- a/arch/arm/include/asm/pgtable.h
>> +++ b/arch/arm/include/asm/pgtable.h
>> @@ -247,6 +247,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
>> if (!pte_special(pteval))
>> __sync_icache_dcache(pteval);
>> ext |= PTE_EXT_NG;
>> +#ifdef CONFIG_ARM_LPAE
>> + ext |= PTE_EXT_PXN;
>> +#endif
>
> I think you can avoid touching set_pte_at() and instead:
>
> diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
> index 9f98cec7fe1e..5b0a0472e689 100644
> --- a/arch/arm/mm/mmu.c
> +++ b/arch/arm/mm/mmu.c
> @@ -605,6 +605,11 @@ static void __init build_mem_type_table(void)
> }
> kern_pgprot |= PTE_EXT_AF;
> vecs_pgprot |= PTE_EXT_AF;
> +
> + /*
> + * Set PXN for user mappings
> + */
> + user_pgprot |= PTE_EXT_PXN;
> #endif
>
> for (i = 0; i < 16; i++) {
>
> --
> Catalin
thanks for the review
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2014-11-25 16:21 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-11-22 10:42 [PATCH v1] arm: Support for the PXN CPU feature on ARMv7 Jungseung Lee
2014-11-24 15:06 ` Catalin Marinas
2014-11-24 15:26 ` Arnd Bergmann
2014-11-24 15:57 ` Catalin Marinas
2014-11-25 16:21 ` Jungseung Lee
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).