From: Nicholas Piggin <npiggin@gmail.com>
To: linuxppc-dev@lists.ozlabs.org
Cc: Nicholas Piggin <npiggin@gmail.com>,
Michael Ellerman <mpe@ellerman.id.au>,
"Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>,
Florian Weimer <fweimer@redhat.com>,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH v2 5/5] powerpc/64s: mm_context.addr_limit is only used on hash
Date: Fri, 10 Nov 2017 04:27:40 +1100 [thread overview]
Message-ID: <20171109172740.19681-6-npiggin@gmail.com> (raw)
In-Reply-To: <20171109172740.19681-1-npiggin@gmail.com>
Radix keeps no meaningful state in addr_limit, so remove it from
radix code and rename to slb_addr_limit to make it clear it applies
to hash only.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/include/asm/book3s/64/mmu-hash.h | 2 +-
arch/powerpc/include/asm/book3s/64/mmu.h | 2 +-
arch/powerpc/include/asm/paca.h | 2 +-
arch/powerpc/kernel/asm-offsets.c | 2 +-
arch/powerpc/kernel/paca.c | 4 ++--
arch/powerpc/kernel/setup-common.c | 3 ++-
arch/powerpc/mm/hugetlbpage-radix.c | 8 +-------
arch/powerpc/mm/mmap.c | 18 ++++--------------
arch/powerpc/mm/mmu_context_book3s64.c | 4 ++--
arch/powerpc/mm/slb_low.S | 2 +-
arch/powerpc/mm/slice.c | 22 +++++++++++-----------
11 files changed, 27 insertions(+), 42 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 508275bb05d5..e91e115a816f 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -606,7 +606,7 @@ extern void slb_set_size(u16 size);
/* 4 bits per slice and we have one slice per 1TB */
#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
-#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.addr_limit >> 41)
+#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.slb_addr_limit >> 41)
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 37fdede5a24c..c9448e19847a 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -93,7 +93,7 @@ typedef struct {
#ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */
unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
- unsigned long addr_limit;
+ unsigned long slb_addr_limit;
#else
u16 sllp; /* SLB page size encoding */
#endif
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index c907ae23c956..3892db93b837 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -143,7 +143,7 @@ struct paca_struct {
#ifdef CONFIG_PPC_MM_SLICES
u64 mm_ctx_low_slices_psize;
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
- unsigned long addr_limit;
+ unsigned long mm_ctx_slb_addr_limit;
#else
u16 mm_ctx_user_psize;
u16 mm_ctx_sllp;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 200623e71474..9aace433491a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -185,7 +185,7 @@ int main(void)
#ifdef CONFIG_PPC_MM_SLICES
OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
- DEFINE(PACA_ADDR_LIMIT, offsetof(struct paca_struct, addr_limit));
+ OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
#endif /* CONFIG_PPC_MM_SLICES */
#endif
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 5d38d5ea9a24..d6597038931d 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -262,8 +262,8 @@ void copy_mm_to_paca(struct mm_struct *mm)
get_paca()->mm_ctx_id = context->id;
#ifdef CONFIG_PPC_MM_SLICES
- VM_BUG_ON(!mm->context.addr_limit);
- get_paca()->addr_limit = mm->context.addr_limit;
+ VM_BUG_ON(!mm->context.slb_addr_limit);
+ get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize;
memcpy(&get_paca()->mm_ctx_high_slices_psize,
&context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index fa661ed616f5..2075322cd225 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -898,7 +898,8 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_PPC_MM_SLICES
#ifdef CONFIG_PPC64
- init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
+ if (!radix_enabled())
+ init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
#else
#error "context.addr_limit not initialized."
#endif
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index bd022d16745c..2486bee0f93e 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -61,16 +61,10 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return -EINVAL;
if (len > high_limit)
return -ENOMEM;
+
if (fixed) {
if (addr > high_limit - len)
return -ENOMEM;
- }
-
- if (unlikely(addr > mm->context.addr_limit &&
- mm->context.addr_limit != TASK_SIZE))
- mm->context.addr_limit = TASK_SIZE;
-
- if (fixed) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
return addr;
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 6d476a7b5611..d503f344e476 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -116,17 +116,12 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (len > high_limit)
return -ENOMEM;
+
if (fixed) {
if (addr > high_limit - len)
return -ENOMEM;
- }
-
- if (unlikely(addr > mm->context.addr_limit &&
- mm->context.addr_limit != TASK_SIZE))
- mm->context.addr_limit = TASK_SIZE;
-
- if (fixed)
return addr;
+ }
if (addr) {
addr = PAGE_ALIGN(addr);
@@ -165,17 +160,12 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
if (len > high_limit)
return -ENOMEM;
+
if (fixed) {
if (addr > high_limit - len)
return -ENOMEM;
- }
-
- if (unlikely(addr > mm->context.addr_limit &&
- mm->context.addr_limit != TASK_SIZE))
- mm->context.addr_limit = TASK_SIZE;
-
- if (fixed)
return addr;
+ }
if (addr) {
addr = PAGE_ALIGN(addr);
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 846cbad45fce..5e193e444ee8 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -96,8 +96,8 @@ static int hash__init_new_context(struct mm_struct *mm)
* In the case of exec, use the default limit,
* otherwise inherit it from the mm we are duplicating.
*/
- if (!mm->context.addr_limit)
- mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
+ if (!mm->context.slb_addr_limit)
+ mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
/*
* The old code would re-promote on fork, we don't do that when using
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 906a86fe457b..7046bb389704 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -167,7 +167,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
/*
* user space make sure we are within the allowed limit
*/
- ld r11,PACA_ADDR_LIMIT(r13)
+ ld r11,PACA_SLB_ADDR_LIMIT(r13)
cmpld r3,r11
bge- 8f
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index a4f93699194b..564fff06f5c1 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
{
struct vm_area_struct *vma;
- if ((mm->context.addr_limit - len) < addr)
+ if ((mm->context.slb_addr_limit - len) < addr)
return 0;
vma = find_vma(mm, addr);
return (!vma || (addr + len) <= vm_start_gap(vma));
@@ -133,10 +133,10 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
if (!slice_low_has_vma(mm, i))
ret->low_slices |= 1u << i;
- if (mm->context.addr_limit <= SLICE_LOW_TOP)
+ if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
return;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
if (!slice_high_has_vma(mm, i))
__set_bit(i, ret->high_slices);
}
@@ -157,7 +157,7 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
ret->low_slices |= 1u << i;
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
mask_index = i & 0x1;
index = i >> 1;
if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -169,7 +169,7 @@ static int slice_check_fit(struct mm_struct *mm,
struct slice_mask mask, struct slice_mask available)
{
DECLARE_BITMAP(result, SLICE_NUM_HIGH);
- unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit);
+ unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
bitmap_and(result, mask.high_slices,
available.high_slices, slice_count);
@@ -219,7 +219,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
mm->context.low_slices_psize = lpsizes;
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
mask_index = i & 0x1;
index = i >> 1;
if (test_bit(i, mask.high_slices))
@@ -329,8 +329,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* Only for that request for which high_limit is above
* DEFAULT_MAP_WINDOW we should apply this.
*/
- if (high_limit > DEFAULT_MAP_WINDOW)
- addr += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
+ if (high_limit > DEFAULT_MAP_WINDOW)
+ addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
while (addr > PAGE_SIZE) {
info.high_limit = addr;
@@ -432,8 +432,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
return -ENOMEM;
}
- if (high_limit > mm->context.addr_limit) {
- mm->context.addr_limit = high_limit;
+ if (high_limit > mm->context.slb_addr_limit) {
+ mm->context.slb_addr_limit = high_limit;
on_each_cpu(slice_flush_segments, mm, 1);
}
@@ -452,7 +452,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Sanity checks */
BUG_ON(mm->task_size == 0);
- BUG_ON(mm->context.addr_limit == 0);
+ BUG_ON(mm->context.slb_addr_limit == 0);
VM_BUG_ON(radix_enabled());
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
--
2.15.0
next prev parent reply other threads:[~2017-11-09 17:28 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-11-09 17:27 [PATCH v2 0/5] powerpc VA allocator fixes for 512TB support Nicholas Piggin
2017-11-09 17:27 ` [PATCH v2 1/5] powerpc/64s/hash: Fix 128TB-512TB virtual address boundary case allocation Nicholas Piggin
2017-11-13 4:59 ` Aneesh Kumar K.V
2017-11-13 7:36 ` Nicholas Piggin
2017-11-14 11:12 ` [v2, " Michael Ellerman
2017-11-09 17:27 ` [PATCH v2 2/5] powerpc/64s/hash: Fix fork() with 512TB process address space Nicholas Piggin
2017-11-13 4:59 ` Aneesh Kumar K.V
2017-11-09 17:27 ` [PATCH v2 3/5] powerpc/64s/hash: Allow MAP_FIXED allocations to cross 128TB boundary Nicholas Piggin
2017-11-13 4:59 ` Aneesh Kumar K.V
2017-11-09 17:27 ` [PATCH v2 4/5] powerpc/64s/radix: Fix 128TB-512TB virtual address boundary case allocation Nicholas Piggin
2017-11-13 5:01 ` Aneesh Kumar K.V
2017-11-09 17:27 ` Nicholas Piggin [this message]
2017-11-13 5:01 ` [PATCH v2 5/5] powerpc/64s: mm_context.addr_limit is only used on hash Aneesh Kumar K.V
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171109172740.19681-6-npiggin@gmail.com \
--to=npiggin@gmail.com \
--cc=aneesh.kumar@linux.vnet.ibm.com \
--cc=fweimer@redhat.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=mpe@ellerman.id.au \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).