linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
To: benh@kernel.crashing.org, paulus@samba.org, mpe@ellerman.id.au
Cc: linuxppc-dev@lists.ozlabs.org,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Subject: [RFC PATCH 4/5] powerpc/mm: Add support for handling > 512TB address in SLB miss
Date: Sat, 10 Feb 2018 16:26:42 +0530	[thread overview]
Message-ID: <20180210105643.11857-5-aneesh.kumar@linux.vnet.ibm.com> (raw)
In-Reply-To: <20180210105643.11857-1-aneesh.kumar@linux.vnet.ibm.com>

For address above 512TB we allocate additonal mmu context. To make it all
easy address above 512TB is handled with IR/DR=1 and with stack frame setup.

We do the additonal context allocation in SLB miss handler. If the context is
not allocated, we enable interrupts and allocate the context and retry the
access which will again result in a SLB miss.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/hash-4k.h  |   6 ++
 arch/powerpc/include/asm/book3s/64/hash-64k.h |   5 +
 arch/powerpc/include/asm/book3s/64/mmu-hash.h |   6 +-
 arch/powerpc/include/asm/book3s/64/mmu.h      |  24 +++++
 arch/powerpc/kernel/exceptions-64s.S          |  12 ++-
 arch/powerpc/mm/copro_fault.c                 |   2 +-
 arch/powerpc/mm/hash_utils_64.c               |   4 +-
 arch/powerpc/mm/mmu_context_book3s64.c        |  17 ++-
 arch/powerpc/mm/pgtable-hash64.c              |   2 +-
 arch/powerpc/mm/slb.c                         | 144 ++++++++++++++++++++++++++
 arch/powerpc/mm/slb_low.S                     |   6 +-
 arch/powerpc/mm/tlb_hash64.c                  |   2 +-
 12 files changed, 215 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 8a91f2475ea3..fb1a75df23e3 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -11,6 +11,12 @@
 #define H_PUD_INDEX_SIZE  9
 #define H_PGD_INDEX_SIZE  9
 
+/*
+ * No of address bits below which we use the default context
+ * for slb allocation. For 4k this is 64TB.
+ */
+#define H_BITS_FIRST_CONTEXT	46
+
 #ifndef __ASSEMBLY__
 #define H_PTE_TABLE_SIZE	(sizeof(pte_t) << H_PTE_INDEX_SIZE)
 #define H_PMD_TABLE_SIZE	(sizeof(pmd_t) << H_PMD_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 1dace73e1d12..d21ff0372f19 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -6,6 +6,11 @@
 #define H_PMD_INDEX_SIZE  10
 #define H_PUD_INDEX_SIZE  7
 #define H_PGD_INDEX_SIZE  8
+/*
+ * No of address bits below which we use the default context
+ * for slb allocation. For 64k this is 512TB.
+ */
+#define H_BITS_FIRST_CONTEXT	49
 
 /*
  * 64k aligned address free up few of the lower bits of RPN for us
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 50ed64fba4ae..8ee83f6e9c84 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -691,8 +691,8 @@ static inline int user_segment_size(unsigned long addr)
 	return MMU_SEGSIZE_256M;
 }
 
-static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
-				     int ssize)
+static inline unsigned long __get_vsid(unsigned long context, unsigned long ea,
+				       int ssize)
 {
 	unsigned long va_bits = VA_BITS;
 	unsigned long vsid_bits;
@@ -744,7 +744,7 @@ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
 	 */
 	context = (ea >> 60) - KERNEL_REGION_CONTEXT_OFFSET;
 
-	return get_vsid(context, ea, ssize);
+	return __get_vsid(context, ea, ssize);
 }
 
 unsigned htab_shift_for_mem_size(unsigned long mem_size);
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 0abeb0e2d616..ebd2f62050ee 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -82,6 +82,12 @@ struct spinlock;
 
 typedef struct {
 	mm_context_id_t id;
+	/*
+	 * One context for each 512TB above the first 512TB.
+	 * First 512TB context is saved in id and is also used
+	 * as PIDR.
+	 */
+	mm_context_id_t extended_id[(TASK_SIZE_USER64/TASK_SIZE_512TB) - 1];
 	u16 user_psize;		/* page size index */
 
 	/* Number of bits in the mm_cpumask */
@@ -174,5 +180,23 @@ extern void radix_init_pseries(void);
 static inline void radix_init_pseries(void) { };
 #endif
 
+static inline int get_esid_context(mm_context_t *ctx, unsigned long ea)
+{
+	int index = ea >> H_BITS_FIRST_CONTEXT;
+
+	if (index == 0)
+		return ctx->id;
+	return ctx->extended_id[--index];
+}
+
+static inline unsigned long get_user_vsid(mm_context_t *ctx,
+					  unsigned long ea, int ssize)
+{
+	unsigned long context = get_esid_context(ctx, ea);
+
+	return __get_vsid(context, ea, ssize);
+}
+
+
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9e6882bdc526..d8188d52327c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -620,8 +620,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
 	ld	r10,PACA_EXSLB+EX_LR(r13)
 	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
 	mtlr	r10
+	/*
+	 * Large address, check whether we have to allocate new
+	 * contexts.
+	 */
+	beq-	8f
 
-	beq-	8f		/* if bad address, make full stack frame */
 
 	bne-	cr5,2f		/* if unrecoverable exception, oops */
 
@@ -685,7 +689,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
 	mr	r3,r12
 	mfspr	r11,SPRN_SRR0
 	mfspr	r12,SPRN_SRR1
-	LOAD_HANDLER(r10,bad_addr_slb)
+	LOAD_HANDLER(r10, multi_context_slb)
 	mtspr	SPRN_SRR0,r10
 	ld	r10,PACAKMSR(r13)
 	mtspr	SPRN_SRR1,r10
@@ -700,7 +704,7 @@ EXC_COMMON_BEGIN(unrecov_slb)
 	bl	unrecoverable_exception
 	b	1b
 
-EXC_COMMON_BEGIN(bad_addr_slb)
+EXC_COMMON_BEGIN(multi_context_slb)
 	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
 	RECONCILE_IRQ_STATE(r10, r11)
 	ld	r3, PACA_EXSLB+EX_DAR(r13)
@@ -710,7 +714,7 @@ EXC_COMMON_BEGIN(bad_addr_slb)
 	std	r10, _TRAP(r1)
 2:	bl	save_nvgprs
 	addi	r3, r1, STACK_FRAME_OVERHEAD
-	bl	slb_miss_bad_addr
+	bl	handle_multi_context_slb_miss
 	b	ret_from_except
 
 EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 697b70ad1195..7d0945bd3a61 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -112,7 +112,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
 			return 1;
 		psize = get_slice_psize(mm, ea);
 		ssize = user_segment_size(ea);
-		vsid = get_vsid(mm->context.id, ea, ssize);
+		vsid = get_user_vsid(&mm->context, ea, ssize);
 		vsidkey = SLB_VSID_USER;
 		break;
 	case VMALLOC_REGION_ID:
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index cf290d415dcd..d6fdf20cdd7b 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1262,7 +1262,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
 		}
 		psize = get_slice_psize(mm, ea);
 		ssize = user_segment_size(ea);
-		vsid = get_vsid(mm->context.id, ea, ssize);
+		vsid = get_user_vsid(&mm->context, ea, ssize);
 		break;
 	case VMALLOC_REGION_ID:
 		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
@@ -1527,7 +1527,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
 
 	/* Get VSID */
 	ssize = user_segment_size(ea);
-	vsid = get_vsid(mm->context.id, ea, ssize);
+	vsid = get_user_vsid(&mm->context, ea, ssize);
 	if (!vsid)
 		return;
 	/*
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 929d9ef7083f..fbcba7337354 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -185,6 +185,19 @@ void __destroy_context(int context_id)
 }
 EXPORT_SYMBOL_GPL(__destroy_context);
 
+void destroy_extended_context(mm_context_t *ctx)
+{
+	int index, context_id;
+
+	spin_lock(&mmu_context_lock);
+	for (index = 1; index < (TASK_SIZE_USER64/TASK_SIZE_512TB); index++) {
+		context_id = ctx->extended_id[index - 1];
+		if (context_id != 0)
+			ida_remove(&mmu_context_ida, context_id); 
+	}
+	spin_unlock(&mmu_context_lock);
+}
+
 #ifdef CONFIG_PPC_64K_PAGES
 static void destroy_pagetable_page(struct mm_struct *mm)
 {
@@ -220,8 +233,10 @@ void destroy_context(struct mm_struct *mm)
 #endif
 	if (radix_enabled())
 		WARN_ON(process_tb[mm->context.id].prtb0 != 0);
-	else
+	else {
 		subpage_prot_free(mm);
+		destroy_extended_context(&mm->context);
+	}
 	destroy_pagetable_page(mm);
 	__destroy_context(mm->context.id);
 	mm->context.id = MMU_NO_CONTEXT;
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index ec277913e01b..2e01794709ad 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -342,7 +342,7 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
 
 	if (!is_kernel_addr(addr)) {
 		ssize = user_segment_size(addr);
-		vsid = get_vsid(mm->context.id, addr, ssize);
+		vsid = get_user_vsid(&mm->context, addr, ssize);
 		WARN_ON(vsid == 0);
 	} else {
 		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 13cfe413b40d..31e7ad0608f0 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -23,6 +23,7 @@
 #include <asm/smp.h>
 #include <linux/compiler.h>
 #include <linux/mm_types.h>
+#include <linux/context_tracking.h>
 
 #include <asm/udbg.h>
 #include <asm/code-patching.h>
@@ -340,3 +341,146 @@ void slb_initialize(void)
 
 	asm volatile("isync":::"memory");
 }
+
+/*
+ * Only handle insert of 1TB slb entries.
+ */
+static void insert_slb_entry(unsigned long vsid, unsigned long ea,
+			     int bpsize, int ssize)
+{
+	int slb_cache_index;
+	unsigned long flags;
+	enum slb_index index;
+	unsigned long vsid_data, esid_data;
+
+	/*
+	 * We are irq disabled, hence should be safe
+	 * to access PACA.
+	 */
+	index =  get_paca()->stab_rr;
+	/*
+	 * simple round roubin replacement of slb.
+	 */
+	if (index < mmu_slb_size)
+		index++;
+	else
+		index = SLB_NUM_BOLTED;
+	get_paca()->stab_rr = index;
+
+	flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
+	vsid_data =  (vsid << SLB_VSID_SHIFT_1T) | flags |
+		((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
+	esid_data = mk_esid_data(ea, mmu_highuser_ssize, index);
+
+	asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)
+		     : "memory");
+	/*
+	 * Now update slb cache entries
+	 */
+	slb_cache_index = get_paca()->slb_cache_ptr;
+	if (slb_cache_index < SLB_CACHE_ENTRIES) {
+		/*
+		 * We have space in slb cache for optimized switch_slb().
+		 * Top 36 bits from esid_data as per ISA
+		 */
+		get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28;
+	}
+	/*
+	 * if we are full, just increment and return.
+	 */
+	get_paca()->slb_cache_ptr++;
+	return;
+}
+
+static void alloc_extended_context(struct mm_struct *mm, unsigned long ea)
+{
+	int context_id;
+
+	int index = (ea >> H_BITS_FIRST_CONTEXT) - 1;
+
+	/*
+	 * FIXME!! Another lock than mmap_sem?
+	 */
+	down_read(&mm->mmap_sem);
+	context_id = hash__alloc_context_id();
+	if (context_id < 0) {
+		up_read(&mm->mmap_sem);
+		pagefault_out_of_memory();
+		return;
+	}
+	/* Check for parallel allocation after holding lock */
+	if (mm->context.extended_id[index] < MIN_USER_CONTEXT)
+		mm->context.extended_id[index] = context_id;
+	else
+		__destroy_context(context_id);
+	up_read(&mm->mmap_sem);
+	return;
+}
+
+static void __handle_multi_context_slb_miss(struct pt_regs *regs,
+					    unsigned long ea)
+{
+	int context, bpsize;
+	unsigned long vsid;
+	struct mm_struct *mm = current->mm;
+
+	context = get_esid_context(&mm->context, ea);
+	if (context < MIN_USER_CONTEXT) {
+		/*
+		 * haven't allocated context yet for this range.
+		 * Enable irq and allo context and return. We will
+		 * take an slb miss on this again and come here with
+		 * allocated context.
+		 */
+		/* We restore the interrupt state now */
+		if (!arch_irq_disabled_regs(regs))
+			local_irq_enable();
+		return alloc_extended_context(mm, ea);
+	}
+	/*
+	 * We are always above 1TB, hence use high user segment size.
+	 */
+	vsid = __get_vsid(context, ea, mmu_highuser_ssize);
+	bpsize = get_slice_psize(mm, ea);
+
+	insert_slb_entry(vsid, ea, bpsize, mmu_highuser_ssize);
+	return;
+}
+
+/*
+ * exception_enter() handling? FIXME!!
+ */
+void handle_multi_context_slb_miss(struct pt_regs *regs)
+{
+	enum ctx_state prev_state = exception_enter();
+	unsigned long ea = regs->dar;
+
+	/*
+	 * Kernel always runs with single context. Hence
+	 * anything that request for multi context is
+	 * considered bad slb request.
+	 */
+	if (!user_mode(regs))
+		return bad_page_fault(regs, ea, SIGSEGV);
+
+	if (REGION_ID(ea) != USER_REGION_ID)
+		goto slb_bad_addr;
+	/*
+	 * Are we beyound what the page table layout support ?
+	 */
+	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
+		goto slb_bad_addr;
+
+	/* Lower address should be handled by asm code */
+	if (ea <= (1UL << H_BITS_FIRST_CONTEXT))
+		goto slb_bad_addr;
+
+	__handle_multi_context_slb_miss(regs, ea);
+	exception_exit(prev_state);
+	return;
+
+slb_bad_addr:
+	_exception(SIGSEGV, regs, SEGV_BNDERR, ea);
+	exception_exit(prev_state);
+	return;
+}
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 2cf5ef3fc50d..425b4c5ec1e9 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -75,10 +75,12 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA)
  */
 _GLOBAL(slb_allocate)
 	/*
-	 * check for bad kernel/user address
+	 * Check for address range for which we need to handle multi context. For
+         * the default context we allocate the slb via the fast path. For large
+         * address we branch out to C-code and look at additional context allocated.
 	 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
 	 */
-	rldicr. r9,r3,4,(63 - H_PGTABLE_EADDR_SIZE - 4)
+	rldicr. r9,r3,4,(63 - H_BITS_FIRST_CONTEXT - 4)
 	bne-	8f
 
 	srdi	r9,r3,60		/* get region */
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 881ebd53ffc2..08a69985cfca 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -84,7 +84,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 	/* Build full vaddr */
 	if (!is_kernel_addr(addr)) {
 		ssize = user_segment_size(addr);
-		vsid = get_vsid(mm->context.id, addr, ssize);
+		vsid = get_user_vsid(&mm->context, addr, ssize);
 	} else {
 		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
 		ssize = mmu_kernel_ssize;
-- 
2.14.3

  parent reply	other threads:[~2018-02-10 10:57 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-10 10:56 [RFC PATCH 0/5] Add support for 4PB virtual address space on hash Aneesh Kumar K.V
2018-02-10 10:56 ` [RFC PATCH 1/5] powerpc: Don't do runtime futex_cmpxchg test Aneesh Kumar K.V
2018-02-10 10:56 ` [RFC PATCH 2/5] powerpc/mm/slice: Update documentation in the file Aneesh Kumar K.V
2018-02-10 10:56 ` [RFC PATCH 3/5] powerpc/mm/slice: Reduce the stack usage in slice_get_unmapped_area Aneesh Kumar K.V
2018-02-10 10:56 ` Aneesh Kumar K.V [this message]
2018-02-10 10:56 ` [RFC PATCH 5/5] powerpc/mm/hash64: Increase the VA range Aneesh Kumar K.V

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180210105643.11857-5-aneesh.kumar@linux.vnet.ibm.com \
    --to=aneesh.kumar@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).