* [RFC V1 05/31] arm64/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT [not found] <1643029028-12710-1-git-send-email-anshuman.khandual@arm.com> @ 2022-01-24 12:56 ` Anshuman Khandual 2022-01-24 12:56 ` [RFC V1 09/31] arm/mm: " Anshuman Khandual 1 sibling, 0 replies; 4+ messages in thread From: Anshuman Khandual @ 2022-01-24 12:56 UTC (permalink / raw) To: linux-mm Cc: linux-kernel, hch, akpm, Anshuman Khandual, Catalin Marinas, Will Deacon, linux-arm-kernel This defines and exports a platform specific custom vm_get_page_prot() via subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. This also localizes both arch_filter_pgprot and arch_vm_get_page_prot() helpers, unsubscribing from ARCH_HAS_FILTER_PGPROT as well. Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> --- arch/arm64/Kconfig | 2 +- arch/arm64/include/asm/mman.h | 3 +- arch/arm64/include/asm/pgtable-prot.h | 18 ---------- arch/arm64/include/asm/pgtable.h | 2 +- arch/arm64/mm/mmap.c | 50 +++++++++++++++++++++++++++ 5 files changed, 53 insertions(+), 22 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index cad609528e58..fce2d0fc4ecc 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -23,7 +23,6 @@ config ARM64 select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_FAST_MULTIPLIER - select ARCH_HAS_FILTER_PGPROT select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE @@ -44,6 +43,7 @@ config ARM64 select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_ELF_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index e3e28f7daf62..85f41f72a8b3 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -35,7 +35,7 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) } #define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) +static inline pgprot_t arm64_arch_vm_get_page_prot(unsigned long vm_flags) { pteval_t prot = 0; @@ -57,7 +57,6 @@ static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) return __pgprot(prot); } -#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) static inline bool arch_validate_prot(unsigned long prot, unsigned long addr __always_unused) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 7032f04c8ac6..d8ee0aa7886d 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -88,24 +88,6 @@ extern bool arm64_use_ng_mappings; #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_READONLY -#define __P011 PAGE_READONLY -#define __P100 PAGE_EXECONLY -#define __P101 PAGE_READONLY_EXEC -#define __P110 PAGE_READONLY_EXEC -#define __P111 PAGE_READONLY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_EXECONLY -#define __S101 PAGE_READONLY_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - #endif /* __ASSEMBLY__ */ #endif /* __ASM_PGTABLE_PROT_H */ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index c4ba047a82d2..5a73501a45ed 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1017,7 +1017,7 @@ static inline bool arch_wants_old_prefaulted_pte(void) } #define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte -static inline pgprot_t arch_filter_pgprot(pgprot_t prot) +static inline pgprot_t arm64_arch_filter_pgprot(pgprot_t prot) { if (cpus_have_const_cap(ARM64_HAS_EPAN)) return prot; diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index a38f54cd638c..ad605eb86d23 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -10,6 +10,7 @@ #include <linux/types.h> #include <asm/page.h> +#include <asm/mman.h> /* * You really shouldn't be using read() or write() on /dev/mem. This might go @@ -38,3 +39,52 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); } + +static inline pgprot_t __vm_get_page_prot(unsigned long vm_flags) +{ + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) { + case VM_NONE: + return PAGE_NONE; + case VM_READ: + return PAGE_READONLY; + case VM_WRITE: + return PAGE_READONLY; + case VM_READ | VM_WRITE: + return PAGE_READONLY; + case VM_EXEC: + return PAGE_EXECONLY; + case VM_EXEC | VM_READ: + return PAGE_READONLY_EXEC; + case VM_EXEC | VM_WRITE: + return PAGE_READONLY_EXEC; + case VM_EXEC | VM_READ | VM_WRITE: + return PAGE_READONLY_EXEC; + case VM_SHARED: + return PAGE_NONE; + case VM_SHARED | VM_READ: + return PAGE_READONLY; + case VM_SHARED | VM_WRITE: + return PAGE_SHARED; + case VM_SHARED | VM_READ | VM_WRITE: + return PAGE_SHARED; + case VM_SHARED | VM_EXEC: + return PAGE_EXECONLY; + case VM_SHARED | VM_EXEC | VM_READ: + return PAGE_READONLY_EXEC; + case VM_SHARED | VM_EXEC | VM_WRITE: + return PAGE_SHARED_EXEC; + case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE: + return PAGE_SHARED_EXEC; + default: + BUILD_BUG(); + } +} + +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + pgprot_t ret = __pgprot(pgprot_val(__vm_get_page_prot(vm_flags)) | + pgprot_val(arm64_arch_vm_get_page_prot(vm_flags))); + + return arm64_arch_filter_pgprot(ret); +} +EXPORT_SYMBOL(vm_get_page_prot); -- 2.25.1 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel ^ permalink raw reply related [flat|nested] 4+ messages in thread
* [RFC V1 09/31] arm/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT [not found] <1643029028-12710-1-git-send-email-anshuman.khandual@arm.com> 2022-01-24 12:56 ` [RFC V1 05/31] arm64/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT Anshuman Khandual @ 2022-01-24 12:56 ` Anshuman Khandual 2022-01-24 17:06 ` Russell King (Oracle) 1 sibling, 1 reply; 4+ messages in thread From: Anshuman Khandual @ 2022-01-24 12:56 UTC (permalink / raw) To: linux-mm Cc: linux-kernel, hch, akpm, Anshuman Khandual, Russell King, Arnd Bergmann, linux-arm-kernel This defines and exports a platform specific custom vm_get_page_prot() via subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Cc: Russell King <linux@armlinux.org.uk> Cc: Arnd Bergmann <arnd@arndb.de> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> --- arch/arm/Kconfig | 1 + arch/arm/include/asm/pgtable.h | 18 ------------ arch/arm/mm/mmu.c | 50 ++++++++++++++++++++++++++++++---- 3 files changed, 45 insertions(+), 24 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index fabe39169b12..c12362d20c44 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -23,6 +23,7 @@ config ARM select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index cd1f84bb40ae..ec062dd6082a 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -137,24 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, * 2) If we could do execute protection, then read is implied * 3) write implies read permissions */ -#define __P000 __PAGE_NONE -#define __P001 __PAGE_READONLY -#define __P010 __PAGE_COPY -#define __P011 __PAGE_COPY -#define __P100 __PAGE_READONLY_EXEC -#define __P101 __PAGE_READONLY_EXEC -#define __P110 __PAGE_COPY_EXEC -#define __P111 __PAGE_COPY_EXEC - -#define __S000 __PAGE_NONE -#define __S001 __PAGE_READONLY -#define __S010 __PAGE_SHARED -#define __S011 __PAGE_SHARED -#define __S100 __PAGE_READONLY_EXEC -#define __S101 __PAGE_READONLY_EXEC -#define __S110 __PAGE_SHARED_EXEC -#define __S111 __PAGE_SHARED_EXEC - #ifndef __ASSEMBLY__ /* * ZERO_PAGE is a global shared page that is always zero: used diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 274e4f73fd33..3007d07bc0e7 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -403,6 +403,8 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); } +static pteval_t user_pgprot; + /* * Adjust the PMD section entries according to the CPU in use. */ @@ -410,7 +412,7 @@ static void __init build_mem_type_table(void) { struct cachepolicy *cp; unsigned int cr = get_cr(); - pteval_t user_pgprot, kern_pgprot, vecs_pgprot; + pteval_t kern_pgprot, vecs_pgprot; int cpu_arch = cpu_architecture(); int i; @@ -627,11 +629,6 @@ static void __init build_mem_type_table(void) user_pgprot |= PTE_EXT_PXN; #endif - for (i = 0; i < 16; i++) { - pteval_t v = pgprot_val(protection_map[i]); - protection_map[i] = __pgprot(v | user_pgprot); - } - mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; @@ -670,6 +667,47 @@ static void __init build_mem_type_table(void) } } +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) { + case VM_NONE: + return __pgprot(pgprot_val(__PAGE_NONE) | user_pgprot); + case VM_READ: + return __pgprot(pgprot_val(__PAGE_READONLY) | user_pgprot); + case VM_WRITE: + return __pgprot(pgprot_val(__PAGE_COPY) | user_pgprot); + case VM_READ | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_COPY) | user_pgprot); + case VM_EXEC: + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); + case VM_EXEC | VM_READ: + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); + case VM_EXEC | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_COPY_EXEC) | user_pgprot); + case VM_EXEC | VM_READ | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_COPY_EXEC) | user_pgprot); + case VM_SHARED: + return __pgprot(pgprot_val(__PAGE_NONE) | user_pgprot); + case VM_SHARED | VM_READ: + return __pgprot(pgprot_val(__PAGE_READONLY) | user_pgprot); + case VM_SHARED | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_SHARED) | user_pgprot); + case VM_SHARED | VM_READ | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_SHARED) | user_pgprot); + case VM_SHARED | VM_EXEC: + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); + case VM_SHARED | VM_EXEC | VM_READ: + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); + case VM_SHARED | VM_EXEC | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_SHARED_EXEC) | user_pgprot); + case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_SHARED_EXEC) | user_pgprot); + default: + BUILD_BUG(); + } +} +EXPORT_SYMBOL(vm_get_page_prot); + #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) -- 2.25.1 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel ^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [RFC V1 09/31] arm/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT 2022-01-24 12:56 ` [RFC V1 09/31] arm/mm: " Anshuman Khandual @ 2022-01-24 17:06 ` Russell King (Oracle) 2022-01-25 3:36 ` Anshuman Khandual 0 siblings, 1 reply; 4+ messages in thread From: Russell King (Oracle) @ 2022-01-24 17:06 UTC (permalink / raw) To: Anshuman Khandual Cc: linux-mm, linux-kernel, hch, akpm, Arnd Bergmann, linux-arm-kernel On Mon, Jan 24, 2022 at 06:26:46PM +0530, Anshuman Khandual wrote: > This defines and exports a platform specific custom vm_get_page_prot() via > subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX > macros can be dropped which are no longer needed. What is the fundamental advantage of this approach? > > Cc: Russell King <linux@armlinux.org.uk> > Cc: Arnd Bergmann <arnd@arndb.de> > Cc: linux-arm-kernel@lists.infradead.org > Cc: linux-kernel@vger.kernel.org > Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> > --- > arch/arm/Kconfig | 1 + > arch/arm/include/asm/pgtable.h | 18 ------------ > arch/arm/mm/mmu.c | 50 ++++++++++++++++++++++++++++++---- > 3 files changed, 45 insertions(+), 24 deletions(-) > > diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig > index fabe39169b12..c12362d20c44 100644 > --- a/arch/arm/Kconfig > +++ b/arch/arm/Kconfig > @@ -23,6 +23,7 @@ config ARM > select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU > select ARCH_HAS_TEARDOWN_DMA_OPS if MMU > select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST > + select ARCH_HAS_VM_GET_PAGE_PROT > select ARCH_HAVE_CUSTOM_GPIO_H > select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K > select ARCH_HAS_GCOV_PROFILE_ALL > diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h > index cd1f84bb40ae..ec062dd6082a 100644 > --- a/arch/arm/include/asm/pgtable.h > +++ b/arch/arm/include/asm/pgtable.h > @@ -137,24 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, > * 2) If we could do execute protection, then read is implied > * 3) write implies read permissions > */ > -#define __P000 __PAGE_NONE > -#define __P001 __PAGE_READONLY > -#define __P010 __PAGE_COPY > -#define __P011 __PAGE_COPY > -#define __P100 __PAGE_READONLY_EXEC > -#define __P101 __PAGE_READONLY_EXEC > -#define __P110 __PAGE_COPY_EXEC > -#define __P111 __PAGE_COPY_EXEC > - > -#define __S000 __PAGE_NONE > -#define __S001 __PAGE_READONLY > -#define __S010 __PAGE_SHARED > -#define __S011 __PAGE_SHARED > -#define __S100 __PAGE_READONLY_EXEC > -#define __S101 __PAGE_READONLY_EXEC > -#define __S110 __PAGE_SHARED_EXEC > -#define __S111 __PAGE_SHARED_EXEC > - > #ifndef __ASSEMBLY__ > /* > * ZERO_PAGE is a global shared page that is always zero: used > diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c > index 274e4f73fd33..3007d07bc0e7 100644 > --- a/arch/arm/mm/mmu.c > +++ b/arch/arm/mm/mmu.c > @@ -403,6 +403,8 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) > local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); > } > > +static pteval_t user_pgprot; > + > /* > * Adjust the PMD section entries according to the CPU in use. > */ > @@ -410,7 +412,7 @@ static void __init build_mem_type_table(void) > { > struct cachepolicy *cp; > unsigned int cr = get_cr(); > - pteval_t user_pgprot, kern_pgprot, vecs_pgprot; > + pteval_t kern_pgprot, vecs_pgprot; > int cpu_arch = cpu_architecture(); > int i; > > @@ -627,11 +629,6 @@ static void __init build_mem_type_table(void) > user_pgprot |= PTE_EXT_PXN; > #endif > > - for (i = 0; i < 16; i++) { > - pteval_t v = pgprot_val(protection_map[i]); > - protection_map[i] = __pgprot(v | user_pgprot); > - } > - > mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; > mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; > > @@ -670,6 +667,47 @@ static void __init build_mem_type_table(void) > } > } > > +pgprot_t vm_get_page_prot(unsigned long vm_flags) > +{ > + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) { > + case VM_NONE: > + return __pgprot(pgprot_val(__PAGE_NONE) | user_pgprot); > + case VM_READ: > + return __pgprot(pgprot_val(__PAGE_READONLY) | user_pgprot); > + case VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_COPY) | user_pgprot); > + case VM_READ | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_COPY) | user_pgprot); > + case VM_EXEC: > + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); > + case VM_EXEC | VM_READ: > + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); > + case VM_EXEC | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_COPY_EXEC) | user_pgprot); > + case VM_EXEC | VM_READ | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_COPY_EXEC) | user_pgprot); > + case VM_SHARED: > + return __pgprot(pgprot_val(__PAGE_NONE) | user_pgprot); > + case VM_SHARED | VM_READ: > + return __pgprot(pgprot_val(__PAGE_READONLY) | user_pgprot); > + case VM_SHARED | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_SHARED) | user_pgprot); > + case VM_SHARED | VM_READ | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_SHARED) | user_pgprot); > + case VM_SHARED | VM_EXEC: > + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); > + case VM_SHARED | VM_EXEC | VM_READ: > + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); > + case VM_SHARED | VM_EXEC | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_SHARED_EXEC) | user_pgprot); > + case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_SHARED_EXEC) | user_pgprot); > + default: > + BUILD_BUG(); > + } > +} > +EXPORT_SYMBOL(vm_get_page_prot); > + > #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE > pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, > unsigned long size, pgprot_t vma_prot) > -- > 2.25.1 > > -- RMK's Patch system: https://www.armlinux.org.uk/developer/patches/ FTTP is here! 40Mbps down 10Mbps up. Decent connectivity at last! _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel ^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [RFC V1 09/31] arm/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT 2022-01-24 17:06 ` Russell King (Oracle) @ 2022-01-25 3:36 ` Anshuman Khandual 0 siblings, 0 replies; 4+ messages in thread From: Anshuman Khandual @ 2022-01-25 3:36 UTC (permalink / raw) To: Russell King (Oracle) Cc: linux-mm, linux-kernel, hch, akpm, Arnd Bergmann, linux-arm-kernel On 1/24/22 10:36 PM, Russell King (Oracle) wrote: > On Mon, Jan 24, 2022 at 06:26:46PM +0530, Anshuman Khandual wrote: >> This defines and exports a platform specific custom vm_get_page_prot() via >> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX >> macros can be dropped which are no longer needed. > > What is the fundamental advantage of this approach? > Remove multiple 'core MM <--> platform' abstraction layers to map vm_flags access permission combination into page protection. From the cover letter .. ---------- Currently there are multiple layers of abstraction i.e __SXXX/__PXXX macros , protection_map[], arch_vm_get_page_prot() and arch_filter_pgprot() built between the platform and generic MM, finally defining vm_get_page_prot(). Hence this series proposes to drop all these abstraction levels and instead just move the responsibility of defining vm_get_page_prot() to the platform itself making it clean and simple. ---------- _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel ^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2022-01-25 3:38 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <1643029028-12710-1-git-send-email-anshuman.khandual@arm.com>
2022-01-24 12:56 ` [RFC V1 05/31] arm64/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 09/31] arm/mm: " Anshuman Khandual
2022-01-24 17:06 ` Russell King (Oracle)
2022-01-25 3:36 ` Anshuman Khandual
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).