linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible
       [not found] <20171031064432.25190-1-npiggin@gmail.com>
@ 2017-10-31  7:18 ` Nicholas Piggin
  2017-10-31  7:18   ` [RFC PATCH 7/7] powerpc/64s/radix: Only flush local TLB for spurious fault flushes Nicholas Piggin
                     ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Nicholas Piggin @ 2017-10-31  7:18 UTC (permalink / raw)
  To: linuxppc-dev, Aneesh Kumar K . V, Nicholas Piggin

When a single-threaded process has a non-local mm_cpumask and requires
a full PID tlbie invalidation, use that as an opportunity to reset the
cpumask back to the current CPU we're running on.

There is a lot of tuning we can do with this, and more sophisticated
management of PIDs and stale translations across CPUs, but this is
something simple that can be done to significantly help single threaded
processes without changing behaviour too much.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/include/asm/mmu_context.h | 19 +++++++++++++
 arch/powerpc/mm/tlb-radix.c            | 52 +++++++++++++++++++++++++++-------
 2 files changed, 60 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 20eae6f76247..05516027fd82 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -5,6 +5,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
+#include <linux/sched/mm.h>
 #include <linux/spinlock.h>
 #include <asm/mmu.h>	
 #include <asm/cputable.h>
@@ -153,6 +154,24 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
 static inline void enter_lazy_tlb(struct mm_struct *mm,
 				  struct task_struct *tsk)
 {
+#ifdef CONFIG_PPC_BOOK3S_64
+	/*
+	 * Under radix, we do not want to keep lazy PIDs around because
+	 * even if the CPU does not access userspace, it can still bring
+	 * in translations through speculation and prefetching.
+	 *
+	 * Switching away here allows us to trim back the mm_cpumask in
+	 * cases where we know the process is not running on some CPUs
+	 * (see mm/tlb-radix.c).
+	 */
+	if (radix_enabled() && mm != &init_mm) {
+		mmgrab(&init_mm);
+		tsk->active_mm = &init_mm;
+		switch_mm_irqs_off(mm, tsk->active_mm, tsk);
+		mmdrop(mm);
+	}
+#endif
+
 	/* 64-bit Book3E keeps track of current PGD in the PACA */
 #ifdef CONFIG_PPC_BOOK3E_64
 	get_paca()->pgd = NULL;
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 49cc581a31cd..db7e696e4faf 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -255,10 +255,18 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
 		return;
 
 	preempt_disable();
-	if (!mm_is_thread_local(mm))
-		_tlbie_pid(pid, RIC_FLUSH_TLB);
-	else
+	if (!mm_is_thread_local(mm)) {
+		if (atomic_read(&mm->mm_users) == 1 && current->mm == mm) {
+			_tlbie_pid(pid, RIC_FLUSH_ALL);
+			atomic_set(&mm->context.active_cpus, 1);
+			cpumask_clear(mm_cpumask(mm));
+			cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+		} else {
+			_tlbie_pid(pid, RIC_FLUSH_TLB);
+		}
+	} else {
 		_tlbiel_pid(pid, RIC_FLUSH_TLB);
+	}
 	preempt_enable();
 }
 EXPORT_SYMBOL(radix__flush_tlb_mm);
@@ -272,10 +280,16 @@ void radix__flush_all_mm(struct mm_struct *mm)
 		return;
 
 	preempt_disable();
-	if (!mm_is_thread_local(mm))
+	if (!mm_is_thread_local(mm)) {
 		_tlbie_pid(pid, RIC_FLUSH_ALL);
-	else
+		if (atomic_read(&mm->mm_users) == 1 && current->mm == mm) {
+			atomic_set(&mm->context.active_cpus, 1);
+			cpumask_clear(mm_cpumask(mm));
+			cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+		}
+	} else {
 		_tlbiel_pid(pid, RIC_FLUSH_ALL);
+	}
 	preempt_enable();
 }
 EXPORT_SYMBOL(radix__flush_all_mm);
@@ -368,10 +382,18 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 	}
 
 	if (full) {
-		if (local)
+		if (local) {
 			_tlbiel_pid(pid, RIC_FLUSH_TLB);
-		else
-			_tlbie_pid(pid, RIC_FLUSH_TLB);
+		} else {
+			if (atomic_read(&mm->mm_users) == 1 && current->mm == mm) {
+				_tlbie_pid(pid, RIC_FLUSH_ALL);
+				atomic_set(&mm->context.active_cpus, 1);
+				cpumask_clear(mm_cpumask(mm));
+				cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+			} else {
+				_tlbie_pid(pid, RIC_FLUSH_TLB);
+			}
+		}
 	} else {
 		bool hflush = false;
 		unsigned long hstart, hend;
@@ -481,10 +503,18 @@ static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
 	}
 
 	if (full) {
-		if (local)
+		if (local) {
 			_tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
-		else
-			_tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL: RIC_FLUSH_TLB);
+		} else {
+			if (atomic_read(&mm->mm_users) == 1 && current->mm == mm) {
+				_tlbie_pid(pid, RIC_FLUSH_ALL);
+				atomic_set(&mm->context.active_cpus, 1);
+				cpumask_clear(mm_cpumask(mm));
+				cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+			} else {
+				_tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
+			}
+		}
 	} else {
 		if (local)
 			_tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
-- 
2.15.0.rc2

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [RFC PATCH 7/7] powerpc/64s/radix: Only flush local TLB for spurious fault flushes
  2017-10-31  7:18 ` [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible Nicholas Piggin
@ 2017-10-31  7:18   ` Nicholas Piggin
  2017-10-31 23:28   ` [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible Nicholas Piggin
  2017-11-01  5:09   ` Balbir Singh
  2 siblings, 0 replies; 5+ messages in thread
From: Nicholas Piggin @ 2017-10-31  7:18 UTC (permalink / raw)
  To: linuxppc-dev, Aneesh Kumar K . V, Nicholas Piggin

When permissiveness is relaxed, or found to have been relaxed by
another thread, we flush that address out of the TLB to avoid a
future fault or micro-fault due to a stale TLB entry.

Currently for processes with TLBs on other CPUs, this flush is always
done with a global tlbie. Although that could reduce faults on remote
CPUs, a broadcast operation seems to be wasteful for something that
can be handled in-core by the remote CPU if it comes to it.

XXX: This still needs some consideration with accelerators, not for
merge yet
---
 .../powerpc/include/asm/book3s/64/tlbflush-radix.h |  5 +++
 arch/powerpc/include/asm/book3s/64/tlbflush.h      | 11 ++++++
 arch/powerpc/mm/pgtable-book3s64.c                 |  5 ++-
 arch/powerpc/mm/pgtable.c                          |  2 +-
 arch/powerpc/mm/tlb-radix.c                        | 40 +++++++++++++++++++---
 5 files changed, 57 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index af06c6fe8a9f..f1851eb64026 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -16,6 +16,8 @@ extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long sta
 					 unsigned long end, int psize);
 extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
 				       unsigned long start, unsigned long end);
+extern void radix__local_flush_pmd_tlb_range(struct vm_area_struct *vma,
+				unsigned long start, unsigned long end);
 extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 			    unsigned long end);
 extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end);
@@ -25,6 +27,9 @@ extern void radix__local_flush_all_mm(struct mm_struct *mm);
 extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
 					      int psize);
+extern void radix__local_flush_tlb_range_psize(struct mm_struct *mm,
+				unsigned long start, unsigned long end,
+				int psize);
 extern void radix__tlb_flush(struct mmu_gather *tlb);
 #ifdef CONFIG_SMP
 extern void radix__flush_tlb_mm(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 70760d018bcd..accfb49247a4 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -98,6 +98,17 @@ static inline void flush_all_mm(struct mm_struct *mm)
 #define flush_tlb_page(vma, addr)	local_flush_tlb_page(vma, addr)
 #define flush_all_mm(mm)		local_flush_all_mm(mm)
 #endif /* CONFIG_SMP */
+
+#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
+static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
+						unsigned long address)
+{
+	if (radix_enabled())
+		radix__local_flush_tlb_page(vma, address);
+	else
+		flush_tlb_page(vma, address);
+}
+
 /*
  * flush the page walk cache for the address
  */
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 3b65917785a5..e46f346388d6 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -40,7 +40,10 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 	if (changed) {
 		__ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
 					pmd_pte(entry), address);
-		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+		if (radix_enabled())
+			radix__local_flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+		else
+			flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 	}
 	return changed;
 }
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index a03ff3d99e0c..acd6ae8062ce 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -223,7 +223,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 		if (!is_vm_hugetlb_page(vma))
 			assert_pte_locked(vma->vm_mm, address);
 		__ptep_set_access_flags(vma->vm_mm, ptep, entry, address);
-		flush_tlb_page(vma, address);
+		flush_tlb_fix_spurious_fault(vma, address);
 	}
 	return changed;
 }
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index db7e696e4faf..2aca596e3853 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -410,16 +410,18 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 
 		asm volatile("ptesync": : :"memory");
 		if (local) {
-			__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
+			__tlbiel_va_range(start, end, pid, page_size,
+						mmu_virtual_psize);
 			if (hflush)
 				__tlbiel_va_range(hstart, hend, pid,
-						HPAGE_PMD_SIZE, MMU_PAGE_2M);
+					HPAGE_PMD_SIZE, MMU_PAGE_2M);
 			asm volatile("ptesync": : :"memory");
 		} else {
-			__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
+			__tlbie_va_range(start, end, pid, page_size,
+						mmu_virtual_psize);
 			if (hflush)
 				__tlbie_va_range(hstart, hend, pid,
-						HPAGE_PMD_SIZE, MMU_PAGE_2M);
+					HPAGE_PMD_SIZE, MMU_PAGE_2M);
 			asm volatile("eieio; tlbsync; ptesync": : :"memory");
 		}
 	}
@@ -477,6 +479,30 @@ void radix__tlb_flush(struct mmu_gather *tlb)
 	tlb->need_flush_all = 0;
 }
 
+void radix__local_flush_tlb_range_psize(struct mm_struct *mm,
+				unsigned long start, unsigned long end,
+				int psize)
+{
+	unsigned long pid;
+	unsigned int page_shift = mmu_psize_defs[psize].shift;
+	unsigned long page_size = 1UL << page_shift;
+	unsigned long nr_pages = (end - start) >> page_shift;
+	bool full;
+
+	pid = mm->context.id;
+	if (unlikely(pid == MMU_NO_CONTEXT))
+		return;
+
+	preempt_disable();
+	full = (end == TLB_FLUSH_ALL ||
+			nr_pages > tlb_local_single_page_flush_ceiling);
+	if (full)
+		_tlbiel_pid(pid, RIC_FLUSH_TLB);
+	else
+		_tlbiel_va_range(start, end, pid, page_size, psize, false);
+	preempt_enable();
+}
+
 static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
 				unsigned long start, unsigned long end,
 				int psize, bool also_pwc)
@@ -604,6 +630,12 @@ void radix__flush_tlb_lpid(unsigned long lpid)
 }
 EXPORT_SYMBOL(radix__flush_tlb_lpid);
 
+void radix__local_flush_pmd_tlb_range(struct vm_area_struct *vma,
+				unsigned long start, unsigned long end)
+{
+	radix__local_flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
+}
+
 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
 				unsigned long start, unsigned long end)
 {
-- 
2.15.0.rc2

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible
  2017-10-31  7:18 ` [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible Nicholas Piggin
  2017-10-31  7:18   ` [RFC PATCH 7/7] powerpc/64s/radix: Only flush local TLB for spurious fault flushes Nicholas Piggin
@ 2017-10-31 23:28   ` Nicholas Piggin
  2017-11-01  5:09   ` Balbir Singh
  2 siblings, 0 replies; 5+ messages in thread
From: Nicholas Piggin @ 2017-10-31 23:28 UTC (permalink / raw)
  To: linuxppc-dev, Aneesh Kumar K . V, Nicholas Piggin

On Tue, 31 Oct 2017 18:18:27 +1100
Nicholas Piggin <npiggin@gmail.com> wrote:

> diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
> index 49cc581a31cd..db7e696e4faf 100644
> --- a/arch/powerpc/mm/tlb-radix.c
> +++ b/arch/powerpc/mm/tlb-radix.c
> @@ -255,10 +255,18 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
>  		return;
>  
>  	preempt_disable();
> -	if (!mm_is_thread_local(mm))
> -		_tlbie_pid(pid, RIC_FLUSH_TLB);
> -	else
> +	if (!mm_is_thread_local(mm)) {
> +		if (atomic_read(&mm->mm_users) == 1 && current->mm == mm) {
> +			_tlbie_pid(pid, RIC_FLUSH_ALL);
> +			atomic_set(&mm->context.active_cpus, 1);
> +			cpumask_clear(mm_cpumask(mm));
> +			cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));

Ben and Michael pointed out this could be racy. At least mmget_not_zero
could in theory come in here, grab the mm, and use_mm it. Needs a bit
more auditing throughout the tree first.

We could close races by putting a lock around the mm_is_thread_local test
and resetting the cpumask and counter, taken in mm switch path as well.
Would be nice to avoid that if the use_mm/mmget/etc APIs don't get in the
way.

Thanks,
Nick

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible
  2017-10-31  7:18 ` [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible Nicholas Piggin
  2017-10-31  7:18   ` [RFC PATCH 7/7] powerpc/64s/radix: Only flush local TLB for spurious fault flushes Nicholas Piggin
  2017-10-31 23:28   ` [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible Nicholas Piggin
@ 2017-11-01  5:09   ` Balbir Singh
  2017-11-01  5:16     ` Nicholas Piggin
  2 siblings, 1 reply; 5+ messages in thread
From: Balbir Singh @ 2017-11-01  5:09 UTC (permalink / raw)
  To: Nicholas Piggin
  Cc: open list:LINUX FOR POWERPC (32-BIT AND 64-BIT),
	Aneesh Kumar K . V

On Tue, Oct 31, 2017 at 6:18 PM, Nicholas Piggin <npiggin@gmail.com> wrote:
> When a single-threaded process has a non-local mm_cpumask and requires
> a full PID tlbie invalidation, use that as an opportunity to reset the
> cpumask back to the current CPU we're running on.
>
> There is a lot of tuning we can do with this, and more sophisticated
> management of PIDs and stale translations across CPUs, but this is
> something simple that can be done to significantly help single threaded
> processes without changing behaviour too much.
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
>  arch/powerpc/include/asm/mmu_context.h | 19 +++++++++++++
>  arch/powerpc/mm/tlb-radix.c            | 52 +++++++++++++++++++++++++++-------
>  2 files changed, 60 insertions(+), 11 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
> index 20eae6f76247..05516027fd82 100644
> --- a/arch/powerpc/include/asm/mmu_context.h
> +++ b/arch/powerpc/include/asm/mmu_context.h
> @@ -5,6 +5,7 @@
>  #include <linux/kernel.h>
>  #include <linux/mm.h>
>  #include <linux/sched.h>
> +#include <linux/sched/mm.h>
>  #include <linux/spinlock.h>
>  #include <asm/mmu.h>
>  #include <asm/cputable.h>
> @@ -153,6 +154,24 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
>  static inline void enter_lazy_tlb(struct mm_struct *mm,
>                                   struct task_struct *tsk)
>  {
> +#ifdef CONFIG_PPC_BOOK3S_64
> +       /*
> +        * Under radix, we do not want to keep lazy PIDs around because
> +        * even if the CPU does not access userspace, it can still bring
> +        * in translations through speculation and prefetching.
> +        *
> +        * Switching away here allows us to trim back the mm_cpumask in
> +        * cases where we know the process is not running on some CPUs
> +        * (see mm/tlb-radix.c).
> +        */
> +       if (radix_enabled() && mm != &init_mm) {
> +               mmgrab(&init_mm);
> +               tsk->active_mm = &init_mm;
> +               switch_mm_irqs_off(mm, tsk->active_mm, tsk);
> +               mmdrop(mm);
> +       }
> +#endif
> +

I thought we did not use enter_lazy_tlb for anything. Even unuse_mm() is not
called from common paths, do we care?

>         /* 64-bit Book3E keeps track of current PGD in the PACA */
>  #ifdef CONFIG_PPC_BOOK3E_64
>         get_paca()->pgd = NULL;
> diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
> index 49cc581a31cd..db7e696e4faf 100644
> --- a/arch/powerpc/mm/tlb-radix.c
> +++ b/arch/powerpc/mm/tlb-radix.c
> @@ -255,10 +255,18 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
>                 return;
>
>         preempt_disable();
> -       if (!mm_is_thread_local(mm))
> -               _tlbie_pid(pid, RIC_FLUSH_TLB);
> -       else
> +       if (!mm_is_thread_local(mm)) {
> +               if (atomic_read(&mm->mm_users) == 1 && current->mm == mm) {
> +                       _tlbie_pid(pid, RIC_FLUSH_ALL);
> +                       atomic_set(&mm->context.active_cpus, 1);
> +                       cpumask_clear(mm_cpumask(mm));

I wonder if we can delegate this back to switch_mm_irqs_off() to lazily clear
the previous cpumask and check for changes to the current mask

Balbir

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible
  2017-11-01  5:09   ` Balbir Singh
@ 2017-11-01  5:16     ` Nicholas Piggin
  0 siblings, 0 replies; 5+ messages in thread
From: Nicholas Piggin @ 2017-11-01  5:16 UTC (permalink / raw)
  To: Balbir Singh
  Cc: open list:LINUX FOR POWERPC (32-BIT AND 64-BIT),
	Aneesh Kumar K . V

On Wed, 1 Nov 2017 16:09:39 +1100
Balbir Singh <bsingharora@gmail.com> wrote:

> On Tue, Oct 31, 2017 at 6:18 PM, Nicholas Piggin <npiggin@gmail.com> wrote:
> > When a single-threaded process has a non-local mm_cpumask and requires
> > a full PID tlbie invalidation, use that as an opportunity to reset the
> > cpumask back to the current CPU we're running on.
> >
> > There is a lot of tuning we can do with this, and more sophisticated
> > management of PIDs and stale translations across CPUs, but this is
> > something simple that can be done to significantly help single threaded
> > processes without changing behaviour too much.
> >
> > Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> > ---
> >  arch/powerpc/include/asm/mmu_context.h | 19 +++++++++++++
> >  arch/powerpc/mm/tlb-radix.c            | 52 +++++++++++++++++++++++++++-------
> >  2 files changed, 60 insertions(+), 11 deletions(-)
> >
> > diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
> > index 20eae6f76247..05516027fd82 100644
> > --- a/arch/powerpc/include/asm/mmu_context.h
> > +++ b/arch/powerpc/include/asm/mmu_context.h
> > @@ -5,6 +5,7 @@
> >  #include <linux/kernel.h>
> >  #include <linux/mm.h>
> >  #include <linux/sched.h>
> > +#include <linux/sched/mm.h>
> >  #include <linux/spinlock.h>
> >  #include <asm/mmu.h>
> >  #include <asm/cputable.h>
> > @@ -153,6 +154,24 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
> >  static inline void enter_lazy_tlb(struct mm_struct *mm,
> >                                   struct task_struct *tsk)
> >  {
> > +#ifdef CONFIG_PPC_BOOK3S_64
> > +       /*
> > +        * Under radix, we do not want to keep lazy PIDs around because
> > +        * even if the CPU does not access userspace, it can still bring
> > +        * in translations through speculation and prefetching.
> > +        *
> > +        * Switching away here allows us to trim back the mm_cpumask in
> > +        * cases where we know the process is not running on some CPUs
> > +        * (see mm/tlb-radix.c).
> > +        */
> > +       if (radix_enabled() && mm != &init_mm) {
> > +               mmgrab(&init_mm);
> > +               tsk->active_mm = &init_mm;
> > +               switch_mm_irqs_off(mm, tsk->active_mm, tsk);
> > +               mmdrop(mm);
> > +       }
> > +#endif
> > +  
> 
> I thought we did not use enter_lazy_tlb for anything. Even unuse_mm() is not
> called from common paths, do we care?

Yes the core code uses this when switching to a kernel thread.

> 
> >         /* 64-bit Book3E keeps track of current PGD in the PACA */
> >  #ifdef CONFIG_PPC_BOOK3E_64
> >         get_paca()->pgd = NULL;
> > diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
> > index 49cc581a31cd..db7e696e4faf 100644
> > --- a/arch/powerpc/mm/tlb-radix.c
> > +++ b/arch/powerpc/mm/tlb-radix.c
> > @@ -255,10 +255,18 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
> >                 return;
> >
> >         preempt_disable();
> > -       if (!mm_is_thread_local(mm))
> > -               _tlbie_pid(pid, RIC_FLUSH_TLB);
> > -       else
> > +       if (!mm_is_thread_local(mm)) {
> > +               if (atomic_read(&mm->mm_users) == 1 && current->mm == mm) {
> > +                       _tlbie_pid(pid, RIC_FLUSH_ALL);
> > +                       atomic_set(&mm->context.active_cpus, 1);
> > +                       cpumask_clear(mm_cpumask(mm));  
> 
> I wonder if we can delegate this back to switch_mm_irqs_off() to lazily clear
> the previous cpumask and check for changes to the current mask

Yeah there are some interesting concerns here we have to flesh out more.
I think the first 5 patches in the series are reasonable and they don't
really change behaviour fundamentally except to better match the radix
tlbie instructions to the primitives we implement.

This one needs more work for sure :P

Thanks,
Nick

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2017-11-01  5:16 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20171031064432.25190-1-npiggin@gmail.com>
2017-10-31  7:18 ` [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible Nicholas Piggin
2017-10-31  7:18   ` [RFC PATCH 7/7] powerpc/64s/radix: Only flush local TLB for spurious fault flushes Nicholas Piggin
2017-10-31 23:28   ` [RFC PATCH 6/7] powerpc/64s/radix: reset mm_cpumask for single thread process when possible Nicholas Piggin
2017-11-01  5:09   ` Balbir Singh
2017-11-01  5:16     ` Nicholas Piggin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).