* [PATCH 1/2] powerpc/64s/radix: Enable HAVE_ARCH_HUGE_VMAP
@ 2019-06-07 6:19 Nicholas Piggin
2019-06-07 6:19 ` [PATCH 2/2] powerpc/64s/radix: ioremap use huge page mappings Nicholas Piggin
0 siblings, 1 reply; 4+ messages in thread
From: Nicholas Piggin @ 2019-06-07 6:19 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Nicholas Piggin
This sets the HAVE_ARCH_HUGE_VMAP option, and defines the required
page table functions.
This will not enable huge iomaps, because powerpc/64 ioremap does not
call ioremap_page_range. That is done in a later change.
HAVE_ARCH_HUGE_VMAP facilities will be used to enable huge pages for
vmalloc memory in a set of generic kernel changes. Combined, this
improves cached `git diff` performance by about 5% on a 2-node POWER9
with 32MB dentry cache hash, by allowing the dentry/inode hashes to
be mapped with 2MB pages:
Profiling git diff dTLB misses with a vanilla kernel:
81.75% git [kernel.vmlinux] [k] __d_lookup_rcu
7.21% git [kernel.vmlinux] [k] strncpy_from_user
1.77% git [kernel.vmlinux] [k] find_get_entry
1.59% git [kernel.vmlinux] [k] kmem_cache_free
40,168 dTLB-miss
0.100342754 seconds time elapsed
With powerpc huge vmap and generic huge vmap vmalloc:
2,987 dTLB-miss
0.095933138 seconds time elapsed
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/mm/book3s64/radix_pgtable.c | 93 ++++++++++++++++++++++++
2 files changed, 94 insertions(+)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8c1c636308c8..f0e5b38d52e8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -167,6 +167,7 @@ config PPC
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if PPC32
select HAVE_ARCH_KGDB
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index c9bcf428dd2b..3bc9ade56277 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1122,3 +1122,96 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
set_pte_at(mm, addr, ptep, pte);
}
+
+int __init arch_ioremap_pud_supported(void)
+{
+ return radix_enabled();
+}
+
+int __init arch_ioremap_pmd_supported(void)
+{
+ return radix_enabled();
+}
+
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0;
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ pte_t *ptep = (pte_t *)pud;
+ pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
+
+ set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
+
+ return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+ if (pud_huge(*pud)) {
+ pud_clear(pud);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ pmd_t *pmd;
+ int i;
+
+ pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pud_clear(pud);
+
+ flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd[i])) {
+ pte_t *pte;
+ pte = (pte_t *)pmd_page_vaddr(pmd[i]);
+
+ pte_free_kernel(&init_mm, pte);
+ }
+ }
+
+ pmd_free(&init_mm, pmd);
+
+ return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ pte_t *ptep = (pte_t *)pmd;
+ pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
+
+ set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
+
+ return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+ if (pmd_huge(*pmd)) {
+ pmd_clear(pmd);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ pte_t *pte;
+
+ pte = (pte_t *)pmd_page_vaddr(*pmd);
+ pmd_clear(pmd);
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+
+ pte_free_kernel(&init_mm, pte);
+
+ return 1;
+}
--
2.20.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/2] powerpc/64s/radix: ioremap use huge page mappings
2019-06-07 6:19 [PATCH 1/2] powerpc/64s/radix: Enable HAVE_ARCH_HUGE_VMAP Nicholas Piggin
@ 2019-06-07 6:19 ` Nicholas Piggin
2019-06-07 6:56 ` Christophe Leroy
0 siblings, 1 reply; 4+ messages in thread
From: Nicholas Piggin @ 2019-06-07 6:19 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Nicholas Piggin
powerpc/64s does not use ioremap_page_range, so it does not get huge
vmap iomap mappings automatically. The radix kernel mapping function
already allows larger page mappings that work with huge vmap, so wire
that up to allow huge pages to be used for ioremap mappings.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/include/asm/book3s/64/pgtable.h | 8 +++
arch/powerpc/mm/pgtable_64.c | 58 ++++++++++++++++++--
include/linux/io.h | 1 +
lib/ioremap.c | 2 +-
4 files changed, 62 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index ccf00a8b98c6..d7a4f2d80598 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -274,6 +274,14 @@ extern unsigned long __vmalloc_end;
#define VMALLOC_START __vmalloc_start
#define VMALLOC_END __vmalloc_end
+static inline unsigned int ioremap_max_order(void)
+{
+ if (radix_enabled())
+ return PUD_SHIFT;
+ return 7 + PAGE_SHIFT; /* default from linux/vmalloc.h */
+}
+#define IOREMAP_MAX_ORDER ({ ioremap_max_order();})
+
extern unsigned long __kernel_virt_start;
extern unsigned long __kernel_virt_size;
extern unsigned long __kernel_io_start;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index d2d976ff8a0e..cf02b67eee55 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -112,7 +112,7 @@ unsigned long ioremap_bot = IOREMAP_BASE;
* __ioremap_at - Low level function to establish the page tables
* for an IO mapping
*/
-void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
+static void __iomem * hash__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
{
unsigned long i;
@@ -120,6 +120,54 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
if (pgprot_val(prot) & H_PAGE_4K_PFN)
return NULL;
+ for (i = 0; i < size; i += PAGE_SIZE)
+ if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
+ return NULL;
+
+ return (void __iomem *)ea;
+}
+
+static int radix__ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot)
+{
+ while (addr != end) {
+ if (unlikely(ioremap_huge_disabled))
+ goto use_small_page;
+
+ if (!(addr & ~PUD_MASK) && !(phys_addr & ~PUD_MASK) &&
+ end - addr >= PUD_SIZE) {
+ if (radix__map_kernel_page(addr, phys_addr, prot, PUD_SIZE))
+ return -ENOMEM;
+ addr += PUD_SIZE;
+ phys_addr += PUD_SIZE;
+
+ } else if (!(addr & ~PMD_MASK) && !(phys_addr & ~PMD_MASK) &&
+ end - addr >= PMD_SIZE) {
+ if (radix__map_kernel_page(addr, phys_addr, prot, PMD_SIZE))
+ return -ENOMEM;
+ addr += PMD_SIZE;
+ phys_addr += PMD_SIZE;
+
+ } else {
+use_small_page:
+ if (radix__map_kernel_page(addr, phys_addr, prot, PAGE_SIZE))
+ return -ENOMEM;
+ addr += PAGE_SIZE;
+ phys_addr += PAGE_SIZE;
+ }
+ }
+ return 0;
+}
+
+static void __iomem * radix__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
+{
+ if (radix__ioremap_page_range((unsigned long)ea, (unsigned long)ea + size, pa, prot))
+ return NULL;
+ return ea;
+}
+
+void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
+{
if ((ea + size) >= (void *)IOREMAP_END) {
pr_warn("Outside the supported range\n");
return NULL;
@@ -129,11 +177,9 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
WARN_ON(size & ~PAGE_MASK);
- for (i = 0; i < size; i += PAGE_SIZE)
- if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
- return NULL;
-
- return (void __iomem *)ea;
+ if (radix_enabled())
+ return radix__ioremap_at(pa, ea, size, prot);
+ return hash__ioremap_at(pa, ea, size, prot);
}
/**
diff --git a/include/linux/io.h b/include/linux/io.h
index 32e30e8fb9db..423c4294aaa3 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -44,6 +44,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
#endif
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+extern int ioremap_huge_disabled;
void __init ioremap_huge_init(void);
int arch_ioremap_pud_supported(void);
int arch_ioremap_pmd_supported(void);
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 063213685563..386ff956755f 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -18,7 +18,7 @@
static int __read_mostly ioremap_p4d_capable;
static int __read_mostly ioremap_pud_capable;
static int __read_mostly ioremap_pmd_capable;
-static int __read_mostly ioremap_huge_disabled;
+int __read_mostly ioremap_huge_disabled;
static int __init set_nohugeiomap(char *str)
{
--
2.20.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH 2/2] powerpc/64s/radix: ioremap use huge page mappings
2019-06-07 6:19 ` [PATCH 2/2] powerpc/64s/radix: ioremap use huge page mappings Nicholas Piggin
@ 2019-06-07 6:56 ` Christophe Leroy
2019-06-07 7:24 ` Nicholas Piggin
0 siblings, 1 reply; 4+ messages in thread
From: Christophe Leroy @ 2019-06-07 6:56 UTC (permalink / raw)
To: Nicholas Piggin, linuxppc-dev
Le 07/06/2019 à 08:19, Nicholas Piggin a écrit :
> powerpc/64s does not use ioremap_page_range, so it does not get huge
> vmap iomap mappings automatically. The radix kernel mapping function
> already allows larger page mappings that work with huge vmap, so wire
> that up to allow huge pages to be used for ioremap mappings.
Argh ... I was on the way to merge pgtable_64.c and pgtable_32.c. This
will complicate the task ... Anyway this looks a good improvment.
Any reason to limit that to Radix ?
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
> arch/powerpc/include/asm/book3s/64/pgtable.h | 8 +++
> arch/powerpc/mm/pgtable_64.c | 58 ++++++++++++++++++--
> include/linux/io.h | 1 +
> lib/ioremap.c | 2 +-
> 4 files changed, 62 insertions(+), 7 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index ccf00a8b98c6..d7a4f2d80598 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -274,6 +274,14 @@ extern unsigned long __vmalloc_end;
> #define VMALLOC_START __vmalloc_start
> #define VMALLOC_END __vmalloc_end
>
> +static inline unsigned int ioremap_max_order(void)
> +{
> + if (radix_enabled())
> + return PUD_SHIFT;
> + return 7 + PAGE_SHIFT; /* default from linux/vmalloc.h */
> +}
> +#define IOREMAP_MAX_ORDER ({ ioremap_max_order();})
Following form doesn't work ?
#define IOREMAP_MAX_ORDER ioremap_max_order()
> +
> extern unsigned long __kernel_virt_start;
> extern unsigned long __kernel_virt_size;
> extern unsigned long __kernel_io_start;
> diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
> index d2d976ff8a0e..cf02b67eee55 100644
> --- a/arch/powerpc/mm/pgtable_64.c
> +++ b/arch/powerpc/mm/pgtable_64.c
> @@ -112,7 +112,7 @@ unsigned long ioremap_bot = IOREMAP_BASE;
> * __ioremap_at - Low level function to establish the page tables
> * for an IO mapping
> */
> -void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
> +static void __iomem * hash__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
Is this the correct name ?
As far as I understand, this function will be used by nohash/64, looks
strange to call hash__something() a function used by nohash platforms.
> {
> unsigned long i;
>
> @@ -120,6 +120,54 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
> if (pgprot_val(prot) & H_PAGE_4K_PFN)
> return NULL;
>
> + for (i = 0; i < size; i += PAGE_SIZE)
> + if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
> + return NULL;
> +
> + return (void __iomem *)ea;
> +}
> +
> +static int radix__ioremap_page_range(unsigned long addr, unsigned long end,
> + phys_addr_t phys_addr, pgprot_t prot)
> +{
> + while (addr != end) {
> + if (unlikely(ioremap_huge_disabled))
> + goto use_small_page;
I don't like too much a goto in the middle of an if/else set inside a loop.
Couldn't we have two while() loops, one for the !ioremap_huge_disabled()
and one for the ioremap_huge_disabled() case ? It would duplicate some
code but that's only 3 small lines.
Or, when ioremap_huge_disabled(), couldn't it just fallback to the
hash__ioremap_at() function ?
> +
> + if (!(addr & ~PUD_MASK) && !(phys_addr & ~PUD_MASK) &&
> + end - addr >= PUD_SIZE) {
> + if (radix__map_kernel_page(addr, phys_addr, prot, PUD_SIZE))
> + return -ENOMEM;
> + addr += PUD_SIZE;
> + phys_addr += PUD_SIZE;
> +
> + } else if (!(addr & ~PMD_MASK) && !(phys_addr & ~PMD_MASK) &&
> + end - addr >= PMD_SIZE) {
> + if (radix__map_kernel_page(addr, phys_addr, prot, PMD_SIZE))
> + return -ENOMEM;
> + addr += PMD_SIZE;
> + phys_addr += PMD_SIZE;
> +
> + } else {
> +use_small_page:
> + if (radix__map_kernel_page(addr, phys_addr, prot, PAGE_SIZE))
> + return -ENOMEM;
> + addr += PAGE_SIZE;
> + phys_addr += PAGE_SIZE;
> + }
> + }
> + return 0;
> +}
> +
> +static void __iomem * radix__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
> +{
> + if (radix__ioremap_page_range((unsigned long)ea, (unsigned long)ea + size, pa, prot))
> + return NULL;
> + return ea;
> +}
> +
> +void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
> +{
> if ((ea + size) >= (void *)IOREMAP_END) {
> pr_warn("Outside the supported range\n");
> return NULL;
> @@ -129,11 +177,9 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
> WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
> WARN_ON(size & ~PAGE_MASK);
>
> - for (i = 0; i < size; i += PAGE_SIZE)
> - if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
> - return NULL;
> -
> - return (void __iomem *)ea;
> + if (radix_enabled())
What about if (radix_enabled() && !ioremap_huge_disabled()) instead ?
> + return radix__ioremap_at(pa, ea, size, prot);
> + return hash__ioremap_at(pa, ea, size, prot);
Can't we just leave the no radix stuff here instead of making that
hash__ioremap_at() function ?
Christophe
> }
>
> /**
> diff --git a/include/linux/io.h b/include/linux/io.h
> index 32e30e8fb9db..423c4294aaa3 100644
> --- a/include/linux/io.h
> +++ b/include/linux/io.h
> @@ -44,6 +44,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
> #endif
>
> #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
> +extern int ioremap_huge_disabled;
> void __init ioremap_huge_init(void);
> int arch_ioremap_pud_supported(void);
> int arch_ioremap_pmd_supported(void);
> diff --git a/lib/ioremap.c b/lib/ioremap.c
> index 063213685563..386ff956755f 100644
> --- a/lib/ioremap.c
> +++ b/lib/ioremap.c
> @@ -18,7 +18,7 @@
> static int __read_mostly ioremap_p4d_capable;
> static int __read_mostly ioremap_pud_capable;
> static int __read_mostly ioremap_pmd_capable;
> -static int __read_mostly ioremap_huge_disabled;
> +int __read_mostly ioremap_huge_disabled;
>
> static int __init set_nohugeiomap(char *str)
> {
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 2/2] powerpc/64s/radix: ioremap use huge page mappings
2019-06-07 6:56 ` Christophe Leroy
@ 2019-06-07 7:24 ` Nicholas Piggin
0 siblings, 0 replies; 4+ messages in thread
From: Nicholas Piggin @ 2019-06-07 7:24 UTC (permalink / raw)
To: Christophe Leroy, linuxppc-dev
Christophe Leroy's on June 7, 2019 4:56 pm:
>
>
> Le 07/06/2019 à 08:19, Nicholas Piggin a écrit :
>> powerpc/64s does not use ioremap_page_range, so it does not get huge
>> vmap iomap mappings automatically. The radix kernel mapping function
>> already allows larger page mappings that work with huge vmap, so wire
>> that up to allow huge pages to be used for ioremap mappings.
>
> Argh ... I was on the way to merge pgtable_64.c and pgtable_32.c. This
> will complicate the task ... Anyway this looks a good improvment.
I can put the radix code into book3s64, at least then you have less
gunk in the common code.
> Any reason to limit that to Radix ?
Just that the others don't have a page size easily exposed for their
map_kernel_page.
It would be nice to make this a bit more generic so other sub archs
can enable the larger mappings just by implementing map_kernel_page.
>
>>
>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>> ---
>> arch/powerpc/include/asm/book3s/64/pgtable.h | 8 +++
>> arch/powerpc/mm/pgtable_64.c | 58 ++++++++++++++++++--
>> include/linux/io.h | 1 +
>> lib/ioremap.c | 2 +-
>> 4 files changed, 62 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> index ccf00a8b98c6..d7a4f2d80598 100644
>> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
>> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> @@ -274,6 +274,14 @@ extern unsigned long __vmalloc_end;
>> #define VMALLOC_START __vmalloc_start
>> #define VMALLOC_END __vmalloc_end
>>
>> +static inline unsigned int ioremap_max_order(void)
>> +{
>> + if (radix_enabled())
>> + return PUD_SHIFT;
>> + return 7 + PAGE_SHIFT; /* default from linux/vmalloc.h */
>> +}
>> +#define IOREMAP_MAX_ORDER ({ ioremap_max_order();})
>
> Following form doesn't work ?
>
> #define IOREMAP_MAX_ORDER ioremap_max_order()
I suppose it would. I'm not sure why I did that.
>> +
>> extern unsigned long __kernel_virt_start;
>> extern unsigned long __kernel_virt_size;
>> extern unsigned long __kernel_io_start;
>> diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
>> index d2d976ff8a0e..cf02b67eee55 100644
>> --- a/arch/powerpc/mm/pgtable_64.c
>> +++ b/arch/powerpc/mm/pgtable_64.c
>> @@ -112,7 +112,7 @@ unsigned long ioremap_bot = IOREMAP_BASE;
>> * __ioremap_at - Low level function to establish the page tables
>> * for an IO mapping
>> */
>> -void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
>> +static void __iomem * hash__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
>
> Is this the correct name ?
>
> As far as I understand, this function will be used by nohash/64, looks
> strange to call hash__something() a function used by nohash platforms.
Yeah you're right, I'll fix that.
>> {
>> unsigned long i;
>>
>> @@ -120,6 +120,54 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
>> if (pgprot_val(prot) & H_PAGE_4K_PFN)
>> return NULL;
>>
>> + for (i = 0; i < size; i += PAGE_SIZE)
>> + if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
>> + return NULL;
>> +
>> + return (void __iomem *)ea;
>> +}
>> +
>> +static int radix__ioremap_page_range(unsigned long addr, unsigned long end,
>> + phys_addr_t phys_addr, pgprot_t prot)
>> +{
>> + while (addr != end) {
>> + if (unlikely(ioremap_huge_disabled))
>> + goto use_small_page;
>
> I don't like too much a goto in the middle of an if/else set inside a loop.
>
> Couldn't we have two while() loops, one for the !ioremap_huge_disabled()
> and one for the ioremap_huge_disabled() case ? It would duplicate some
> code but that's only 3 small lines.
>
> Or, when ioremap_huge_disabled(), couldn't it just fallback to the
> hash__ioremap_at() function ?
Yeah okay. I'll see how the code looks after I move it into
radix_pgtable.c, it might be best to keep all radix code there together
even for the disabled case.
>> + return radix__ioremap_at(pa, ea, size, prot);
>> + return hash__ioremap_at(pa, ea, size, prot);
>
> Can't we just leave the no radix stuff here instead of making that
> hash__ioremap_at() function ?
Yeah that'll probably work better.
Thanks,
Nick
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2019-06-07 7:27 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-06-07 6:19 [PATCH 1/2] powerpc/64s/radix: Enable HAVE_ARCH_HUGE_VMAP Nicholas Piggin
2019-06-07 6:19 ` [PATCH 2/2] powerpc/64s/radix: ioremap use huge page mappings Nicholas Piggin
2019-06-07 6:56 ` Christophe Leroy
2019-06-07 7:24 ` Nicholas Piggin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).