linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] powerpc/mm: Batch tlb flush when invalidating pte entries
@ 2016-11-14 15:58 Aneesh Kumar K.V
  2016-11-14 16:08 ` Aneesh Kumar K.V
  0 siblings, 1 reply; 2+ messages in thread
From: Aneesh Kumar K.V @ 2016-11-14 15:58 UTC (permalink / raw)
  To: benh, paulus, mpe; +Cc: linuxppc-dev, Aneesh Kumar K.V

This will improve the task exit case, by batching tlb invalidates.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/radix.h | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index aec6e8ee6e27..e8b4f39e9fab 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -147,10 +147,16 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
 		 * new value of pte
 		 */
 		new_pte = (old_pte | set) & ~clr;
-		psize = radix_get_mmu_psize(pg_sz);
-		radix__flush_tlb_page_psize(mm, addr, psize);
-
-		__radix_pte_update(ptep, 0, new_pte);
+		/*
+		 * If we are trying to clear the pte, we can skip
+		 * the below sequence and batch the tlb flush. The
+		 * tlb flush batching is done by mmu gather code
+		 */
+		if (new_pte) {
+			psize = radix_get_mmu_psize(pg_sz);
+			radix__flush_tlb_page_psize(mm, addr, psize);
+			__radix_pte_update(ptep, 0, new_pte);
+		}
 	} else
 		old_pte = __radix_pte_update(ptep, clr, set);
 	asm volatile("ptesync" : : : "memory");
-- 
2.10.2

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] powerpc/mm: Batch tlb flush when invalidating pte entries
  2016-11-14 15:58 [PATCH] powerpc/mm: Batch tlb flush when invalidating pte entries Aneesh Kumar K.V
@ 2016-11-14 16:08 ` Aneesh Kumar K.V
  0 siblings, 0 replies; 2+ messages in thread
From: Aneesh Kumar K.V @ 2016-11-14 16:08 UTC (permalink / raw)
  To: benh, paulus, mpe; +Cc: linuxppc-dev

"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> writes:

> This will improve the task exit case, by batching tlb invalidates.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
>  arch/powerpc/include/asm/book3s/64/radix.h | 14 ++++++++++----
>  1 file changed, 10 insertions(+), 4 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
> index aec6e8ee6e27..e8b4f39e9fab 100644
> --- a/arch/powerpc/include/asm/book3s/64/radix.h
> +++ b/arch/powerpc/include/asm/book3s/64/radix.h
> @@ -147,10 +147,16 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
>  		 * new value of pte
>  		 */
>  		new_pte = (old_pte | set) & ~clr;
> -		psize = radix_get_mmu_psize(pg_sz);
> -		radix__flush_tlb_page_psize(mm, addr, psize);
> -
> -		__radix_pte_update(ptep, 0, new_pte);
> +		/*
> +		 * If we are trying to clear the pte, we can skip
> +		 * the below sequence and batch the tlb flush. The
> +		 * tlb flush batching is done by mmu gather code
> +		 */
> +		if (new_pte) {
> +			psize = radix_get_mmu_psize(pg_sz);
> +			radix__flush_tlb_page_psize(mm, addr, psize);
> +			__radix_pte_update(ptep, 0, new_pte);
> +		}
>  	} else
>  		old_pte = __radix_pte_update(ptep, clr, set);
>  	asm volatile("ptesync" : : : "memory");

We can also avoid the ptesync I guess. BTW for transition from V=0 to
a valid pte, we are good without this patch because that is done via set_pte_at()

diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index e8b4f39e9fab..83c77323a769 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -142,7 +142,6 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
 		unsigned long new_pte;
 
 		old_pte = __radix_pte_update(ptep, ~0, 0);
-		asm volatile("ptesync" : : : "memory");
 		/*
 		 * new value of pte
 		 */
@@ -153,6 +152,7 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
 		 * tlb flush batching is done by mmu gather code
 		 */
 		if (new_pte) {
+			asm volatile("ptesync" : : : "memory");
 			psize = radix_get_mmu_psize(pg_sz);
 			radix__flush_tlb_page_psize(mm, addr, psize);
 			__radix_pte_update(ptep, 0, new_pte);

^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2016-11-14 16:09 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-11-14 15:58 [PATCH] powerpc/mm: Batch tlb flush when invalidating pte entries Aneesh Kumar K.V
2016-11-14 16:08 ` Aneesh Kumar K.V

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).