From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga14.intel.com (mga14.intel.com [143.182.124.37]) by ozlabs.org (Postfix) with ESMTP id 6E5322C00B6 for ; Fri, 10 Aug 2012 01:03:26 +1000 (EST) From: "Kirill A. Shutemov" To: linux-mm@kvack.org Subject: [PATCH v2 6/6] x86: switch the 64bit uncached page clear to SSE/AVX v2 Date: Thu, 9 Aug 2012 18:03:03 +0300 Message-Id: <1344524583-1096-7-git-send-email-kirill.shutemov@linux.intel.com> In-Reply-To: <1344524583-1096-1-git-send-email-kirill.shutemov@linux.intel.com> References: <1344524583-1096-1-git-send-email-kirill.shutemov@linux.intel.com> Cc: linux-mips@linux-mips.org, linux-sh@vger.kernel.org, Jan Beulich , "H. Peter Anvin" , sparclinux@vger.kernel.org, Andrea Arcangeli , Andi Kleen , Robert Richter , x86@kernel.org, Hugh Dickins , Ingo Molnar , Mel Gorman , Alex Shi , Thomas Gleixner , KAMEZAWA Hiroyuki , Tim Chen , linux-kernel@vger.kernel.org, Andy Lutomirski , Johannes Weiner , Andrew Morton , linuxppc-dev@lists.ozlabs.org, "Kirill A. Shutemov" List-Id: Linux on PowerPC Developers Mail List List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , From: Andi Kleen With multiple threads vector stores are more efficient, so use them. This will cause the page clear to run non preemptable and add some overhead. However on 32bit it was already non preempable (due to kmap_atomic) and there is an preemption opportunity every 4K unit. On a NPB (Nasa Parallel Benchmark) 128GB run on a Westmere this improves the performance regression of enabling transparent huge pages by ~2% (2.81% to 0.81%), near the runtime variability now. On a system with AVX support more is expected. Signed-off-by: Andi Kleen [kirill.shutemov@linux.intel.com: Properly save/restore arguments] Signed-off-by: Kirill A. Shutemov --- arch/x86/lib/clear_page_nocache_64.S | 91 ++++++++++++++++++++++++++++----- 1 files changed, 77 insertions(+), 14 deletions(-) diff --git a/arch/x86/lib/clear_page_nocache_64.S b/arch/x86/lib/clear_page_nocache_64.S index ee16d15..c092919 100644 --- a/arch/x86/lib/clear_page_nocache_64.S +++ b/arch/x86/lib/clear_page_nocache_64.S @@ -1,29 +1,92 @@ +/* + * Clear pages with cache bypass. + * + * Copyright (C) 2011, 2012 Intel Corporation + * Author: Andi Kleen + * + * This software may be redistributed and/or modified under the terms of + * the GNU General Public License ("GPL") version 2 only as published by the + * Free Software Foundation. + */ + #include +#include +#include #include +#define SSE_UNROLL 128 + /* * Zero a page avoiding the caches * rdi page */ ENTRY(clear_page_nocache) CFI_STARTPROC - xorl %eax,%eax - movl $4096/64,%ecx + push %rdi + call kernel_fpu_begin + pop %rdi + sub $16,%rsp + CFI_ADJUST_CFA_OFFSET 16 + movdqu %xmm0,(%rsp) + xorpd %xmm0,%xmm0 + movl $4096/SSE_UNROLL,%ecx .p2align 4 .Lloop: decl %ecx -#define PUT(x) movnti %rax,x*8(%rdi) - movnti %rax,(%rdi) - PUT(1) - PUT(2) - PUT(3) - PUT(4) - PUT(5) - PUT(6) - PUT(7) - leaq 64(%rdi),%rdi + .set x,0 + .rept SSE_UNROLL/16 + movntdq %xmm0,x(%rdi) + .set x,x+16 + .endr + leaq SSE_UNROLL(%rdi),%rdi jnz .Lloop - nop - ret + movdqu (%rsp),%xmm0 + addq $16,%rsp + CFI_ADJUST_CFA_OFFSET -16 + jmp kernel_fpu_end CFI_ENDPROC ENDPROC(clear_page_nocache) + +#ifdef CONFIG_AS_AVX + + .section .altinstr_replacement,"ax" +1: .byte 0xeb /* jmp */ + .byte (clear_page_nocache_avx - clear_page_nocache) - (2f - 1b) + /* offset */ +2: + .previous + .section .altinstructions,"a" + altinstruction_entry clear_page_nocache,1b,X86_FEATURE_AVX,\ + 16, 2b-1b + .previous + +#define AVX_UNROLL 256 /* TUNE ME */ + +ENTRY(clear_page_nocache_avx) + CFI_STARTPROC + push %rdi + call kernel_fpu_begin + pop %rdi + sub $32,%rsp + CFI_ADJUST_CFA_OFFSET 32 + vmovdqu %ymm0,(%rsp) + vxorpd %ymm0,%ymm0,%ymm0 + movl $4096/AVX_UNROLL,%ecx + .p2align 4 +.Lloop_avx: + decl %ecx + .set x,0 + .rept AVX_UNROLL/32 + vmovntdq %ymm0,x(%rdi) + .set x,x+32 + .endr + leaq AVX_UNROLL(%rdi),%rdi + jnz .Lloop_avx + vmovdqu (%rsp),%ymm0 + addq $32,%rsp + CFI_ADJUST_CFA_OFFSET -32 + jmp kernel_fpu_end + CFI_ENDPROC +ENDPROC(clear_page_nocache_avx) + +#endif -- 1.7.7.6