* [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX
2011-06-17 4:53 [PATCH 0/3] POWER7 optimised copy loops Anton Blanchard
@ 2011-06-17 4:53 ` Anton Blanchard
2011-06-17 5:16 ` Michael Neuling
` (2 more replies)
2011-06-17 4:54 ` [PATCH 2/3] powerpc: POWER7 optimised memcpy " Anton Blanchard
2011-06-17 4:54 ` [PATCH 3/3] powerpc: POWER7 optimised copy_to_user/copy_from_user " Anton Blanchard
2 siblings, 3 replies; 13+ messages in thread
From: Anton Blanchard @ 2011-06-17 4:53 UTC (permalink / raw)
To: benh, paulus, mikey; +Cc: linuxppc-dev
Implement a POWER7 optimised copy_page using VMX. We copy a cacheline
at a time using VMX loads and stores.
Signed-off-by: Anton Blanchard <anton@samba.org>
---
How do we want to handle per machine optimised functions? I create
yet another feature bit, but feature bits might get out of control
at some point.
Index: linux-powerpc/arch/powerpc/include/asm/cputable.h
===================================================================
--- linux-powerpc.orig/arch/powerpc/include/asm/cputable.h 2011-06-06 08:07:35.128707749 +1000
+++ linux-powerpc/arch/powerpc/include/asm/cputable.h 2011-06-17 07:39:58.996165527 +1000
@@ -200,6 +200,7 @@ extern const char *powerpc_base_platform
#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000)
#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000)
#define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000)
+#define CPU_FTR_POWER7 LONG_ASM_CONST(0x2000000000000000)
#ifndef __ASSEMBLY__
@@ -423,7 +424,7 @@ extern const char *powerpc_base_platform
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_ICSWX | CPU_FTR_CFAR)
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_POWER7)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
Index: linux-powerpc/arch/powerpc/lib/copypage_power7.S
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-powerpc/arch/powerpc/lib/copypage_power7.S 2011-06-17 07:39:58.996165527 +1000
@@ -0,0 +1,70 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2011
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+
+#define STACKFRAMESIZE 112
+
+_GLOBAL(copypage_power7)
+ mflr r0
+ std r3,48(r1)
+ std r4,56(r1)
+ std r0,16(r1)
+ stdu r1,-STACKFRAMESIZE(r1)
+
+ bl .enable_kernel_altivec
+
+ ld r12,STACKFRAMESIZE+16(r1)
+ ld r4,STACKFRAMESIZE+56(r1)
+ li r0,(PAGE_SIZE/128)
+ li r6,16
+ ld r3,STACKFRAMESIZE+48(r1)
+ li r7,32
+ li r8,48
+ mtctr r0
+ li r9,64
+ li r10,80
+ mtlr r12
+ li r11,96
+ li r12,112
+ addi r1,r1,STACKFRAMESIZE
+
+ .align 5
+1: lvx vr7,r0,r4
+ lvx vr6,r4,r6
+ lvx vr5,r4,r7
+ lvx vr4,r4,r8
+ lvx vr3,r4,r9
+ lvx vr2,r4,r10
+ lvx vr1,r4,r11
+ lvx vr0,r4,r12
+ addi r4,r4,128
+ stvx vr7,r0,r3
+ stvx vr6,r3,r6
+ stvx vr5,r3,r7
+ stvx vr4,r3,r8
+ stvx vr3,r3,r9
+ stvx vr2,r3,r10
+ stvx vr1,r3,r11
+ stvx vr0,r3,r12
+ addi r3,r3,128
+ bdnz 1b
+
+ blr
Index: linux-powerpc/arch/powerpc/lib/Makefile
===================================================================
--- linux-powerpc.orig/arch/powerpc/lib/Makefile 2011-05-19 19:57:38.058570608 +1000
+++ linux-powerpc/arch/powerpc/lib/Makefile 2011-06-17 07:39:58.996165527 +1000
@@ -16,7 +16,8 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o string.o \
- checksum_wrappers_64.o hweight_64.o
+ checksum_wrappers_64.o hweight_64.o \
+ copypage_power7.o
obj-$(CONFIG_XMON) += sstep.o ldstfp.o
obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
Index: linux-powerpc/arch/powerpc/lib/copypage_64.S
===================================================================
--- linux-powerpc.orig/arch/powerpc/lib/copypage_64.S 2011-06-06 08:07:35.000000000 +1000
+++ linux-powerpc/arch/powerpc/lib/copypage_64.S 2011-06-17 07:39:58.996165527 +1000
@@ -17,7 +17,11 @@ PPC64_CACHES:
.section ".text"
_GLOBAL(copy_page)
+BEGIN_FTR_SECTION
lis r5,PAGE_SIZE@h
+FTR_SECTION_ELSE
+ b .copypage_power7
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POWER7)
ori r5,r5,PAGE_SIZE@l
BEGIN_FTR_SECTION
ld r10,PPC64_CACHES@toc(r2)
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX
2011-06-17 4:53 ` [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX Anton Blanchard
@ 2011-06-17 5:16 ` Michael Neuling
2011-06-17 5:26 ` Anton Blanchard
2011-06-17 5:43 ` Benjamin Herrenschmidt
2011-06-17 5:53 ` Benjamin Herrenschmidt
2 siblings, 1 reply; 13+ messages in thread
From: Michael Neuling @ 2011-06-17 5:16 UTC (permalink / raw)
To: Anton Blanchard; +Cc: paulus, linuxppc-dev
> Implement a POWER7 optimised copy_page using VMX. We copy a cacheline
> at a time using VMX loads and stores.
>
> Signed-off-by: Anton Blanchard <anton@samba.org>
> ---
>
> How do we want to handle per machine optimised functions? I create
> yet another feature bit, but feature bits might get out of control
> at some point.
Yeah, I'm pretty against CPU_FTR_POWER7. Every loon is going to attach
anything POWER7 to it.
I'm keen to see it setup in __setup_cpu_power7. Either a function
pointer or use the patch_instruction infrastructure to avoid indirect
function calls on small copies.
Mikey
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX
2011-06-17 5:16 ` Michael Neuling
@ 2011-06-17 5:26 ` Anton Blanchard
0 siblings, 0 replies; 13+ messages in thread
From: Anton Blanchard @ 2011-06-17 5:26 UTC (permalink / raw)
To: Michael Neuling; +Cc: paulus, linuxppc-dev
Hi,
> Yeah, I'm pretty against CPU_FTR_POWER7. Every loon is going to
> attach anything POWER7 to it.
>
> I'm keen to see it setup in __setup_cpu_power7. Either a function
> pointer or use the patch_instruction infrastructure to avoid indirect
> function calls on small copies.
Instruction patching in __setup_cpu_power7 could work. We might want to
have a nop at the start of the base functions and a label at the start
of the next instruction so we can easily override the base function and
jump back to it if things are too hard (like I do in the
copy_tofrom_user patch).
Anton
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX
2011-06-17 4:53 ` [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX Anton Blanchard
2011-06-17 5:16 ` Michael Neuling
@ 2011-06-17 5:43 ` Benjamin Herrenschmidt
2011-06-17 5:53 ` Benjamin Herrenschmidt
2 siblings, 0 replies; 13+ messages in thread
From: Benjamin Herrenschmidt @ 2011-06-17 5:43 UTC (permalink / raw)
To: Anton Blanchard; +Cc: linuxppc-dev, mikey, paulus
On Fri, 2011-06-17 at 14:53 +1000, Anton Blanchard wrote:
> plain text document attachment (power7_copypage)
> Implement a POWER7 optimised copy_page using VMX. We copy a cacheline
> at a time using VMX loads and stores.
>
> Signed-off-by: Anton Blanchard <anton@samba.org>
> ---
>
> How do we want to handle per machine optimised functions? I create
> yet another feature bit, but feature bits might get out of control
> at some point.
I've been wondering about that for some time.... The feature bit itself
isn't a big deal, for the in-kernel feature it's easy to split that into
separate masks (CPU features, cache features, debug features,
whatever...) but I don't like much the branch tricks, that won't scale
much when we have 4 or 5 versions....
What I really want is a way to patch the call sites to branch to an
alternate function.
We've looked at that with Michael a while back when pondering about
merging book3e/s but never got to something satisfactory, but maybe we
didn't look hard enough at what our toolchain is capable of...
Cheers,
Ben.
> Index: linux-powerpc/arch/powerpc/include/asm/cputable.h
> ===================================================================
> --- linux-powerpc.orig/arch/powerpc/include/asm/cputable.h 2011-06-06 08:07:35.128707749 +1000
> +++ linux-powerpc/arch/powerpc/include/asm/cputable.h 2011-06-17 07:39:58.996165527 +1000
> @@ -200,6 +200,7 @@ extern const char *powerpc_base_platform
> #define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000)
> #define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000)
> #define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000)
> +#define CPU_FTR_POWER7 LONG_ASM_CONST(0x2000000000000000)
>
> #ifndef __ASSEMBLY__
>
> @@ -423,7 +424,7 @@ extern const char *powerpc_base_platform
> CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
> CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
> CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
> - CPU_FTR_ICSWX | CPU_FTR_CFAR)
> + CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_POWER7)
> #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
> CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
> CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
> Index: linux-powerpc/arch/powerpc/lib/copypage_power7.S
> ===================================================================
> --- /dev/null 1970-01-01 00:00:00.000000000 +0000
> +++ linux-powerpc/arch/powerpc/lib/copypage_power7.S 2011-06-17 07:39:58.996165527 +1000
> @@ -0,0 +1,70 @@
> +/*
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
> + *
> + * Copyright (C) IBM Corporation, 2011
> + *
> + * Author: Anton Blanchard <anton@au.ibm.com>
> + */
> +#include <asm/page.h>
> +#include <asm/ppc_asm.h>
> +
> +#define STACKFRAMESIZE 112
> +
> +_GLOBAL(copypage_power7)
> + mflr r0
> + std r3,48(r1)
> + std r4,56(r1)
> + std r0,16(r1)
> + stdu r1,-STACKFRAMESIZE(r1)
> +
> + bl .enable_kernel_altivec
> +
> + ld r12,STACKFRAMESIZE+16(r1)
> + ld r4,STACKFRAMESIZE+56(r1)
> + li r0,(PAGE_SIZE/128)
> + li r6,16
> + ld r3,STACKFRAMESIZE+48(r1)
> + li r7,32
> + li r8,48
> + mtctr r0
> + li r9,64
> + li r10,80
> + mtlr r12
> + li r11,96
> + li r12,112
> + addi r1,r1,STACKFRAMESIZE
> +
> + .align 5
> +1: lvx vr7,r0,r4
> + lvx vr6,r4,r6
> + lvx vr5,r4,r7
> + lvx vr4,r4,r8
> + lvx vr3,r4,r9
> + lvx vr2,r4,r10
> + lvx vr1,r4,r11
> + lvx vr0,r4,r12
> + addi r4,r4,128
> + stvx vr7,r0,r3
> + stvx vr6,r3,r6
> + stvx vr5,r3,r7
> + stvx vr4,r3,r8
> + stvx vr3,r3,r9
> + stvx vr2,r3,r10
> + stvx vr1,r3,r11
> + stvx vr0,r3,r12
> + addi r3,r3,128
> + bdnz 1b
> +
> + blr
> Index: linux-powerpc/arch/powerpc/lib/Makefile
> ===================================================================
> --- linux-powerpc.orig/arch/powerpc/lib/Makefile 2011-05-19 19:57:38.058570608 +1000
> +++ linux-powerpc/arch/powerpc/lib/Makefile 2011-06-17 07:39:58.996165527 +1000
> @@ -16,7 +16,8 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
>
> obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
> memcpy_64.o usercopy_64.o mem_64.o string.o \
> - checksum_wrappers_64.o hweight_64.o
> + checksum_wrappers_64.o hweight_64.o \
> + copypage_power7.o
> obj-$(CONFIG_XMON) += sstep.o ldstfp.o
> obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
> obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
> Index: linux-powerpc/arch/powerpc/lib/copypage_64.S
> ===================================================================
> --- linux-powerpc.orig/arch/powerpc/lib/copypage_64.S 2011-06-06 08:07:35.000000000 +1000
> +++ linux-powerpc/arch/powerpc/lib/copypage_64.S 2011-06-17 07:39:58.996165527 +1000
> @@ -17,7 +17,11 @@ PPC64_CACHES:
> .section ".text"
>
> _GLOBAL(copy_page)
> +BEGIN_FTR_SECTION
> lis r5,PAGE_SIZE@h
> +FTR_SECTION_ELSE
> + b .copypage_power7
> +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POWER7)
> ori r5,r5,PAGE_SIZE@l
> BEGIN_FTR_SECTION
> ld r10,PPC64_CACHES@toc(r2)
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX
2011-06-17 4:53 ` [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX Anton Blanchard
2011-06-17 5:16 ` Michael Neuling
2011-06-17 5:43 ` Benjamin Herrenschmidt
@ 2011-06-17 5:53 ` Benjamin Herrenschmidt
2011-06-17 20:26 ` Segher Boessenkool
2 siblings, 1 reply; 13+ messages in thread
From: Benjamin Herrenschmidt @ 2011-06-17 5:53 UTC (permalink / raw)
To: Anton Blanchard; +Cc: linuxppc-dev, mikey, paulus
On Fri, 2011-06-17 at 14:53 +1000, Anton Blanchard wrote:
> +#include <asm/page.h>
> +#include <asm/ppc_asm.h>
> +
> +#define STACKFRAMESIZE 112
> +
> +_GLOBAL(copypage_power7)
> + mflr r0
> + std r3,48(r1)
> + std r4,56(r1)
> + std r0,16(r1)
> + stdu r1,-STACKFRAMESIZE(r1)
> +
> + bl .enable_kernel_altivec
Don't you need to preempt disable ? Or even irq disable ? Or do we know
copy page will never called at irq time ?
Also I wonder if you wouldn't be better to instead just manually enable
it MSR and save some VRs (if no current thread regs is attached) ? That
would be re-entrant.
> + ld r12,STACKFRAMESIZE+16(r1)
> + ld r4,STACKFRAMESIZE+56(r1)
> + li r0,(PAGE_SIZE/128)
> + li r6,16
> + ld r3,STACKFRAMESIZE+48(r1)
> + li r7,32
> + li r8,48
> + mtctr r0
> + li r9,64
> + li r10,80
> + mtlr r12
> + li r11,96
> + li r12,112
> + addi r1,r1,STACKFRAMESIZE
> +
> + .align 5
Do we know that the blank will be filled with something harmless ?
> +1: lvx vr7,r0,r4
> + lvx vr6,r4,r6
> + lvx vr5,r4,r7
> + lvx vr4,r4,r8
> + lvx vr3,r4,r9
> + lvx vr2,r4,r10
> + lvx vr1,r4,r11
> + lvx vr0,r4,r12
> + addi r4,r4,128
> + stvx vr7,r0,r3
> + stvx vr6,r3,r6
> + stvx vr5,r3,r7
> + stvx vr4,r3,r8
> + stvx vr3,r3,r9
> + stvx vr2,r3,r10
> + stvx vr1,r3,r11
> + stvx vr0,r3,r12
> + addi r3,r3,128
> + bdnz 1b
What about lvxl ? You aren't likely to re-use the source data soon
right ?
Hrm... re-reading the arch, it looks like the "l" variant is quirky,
should really only used on the last load of a cache block, but in your
case that should be ok to put it on the last accesses since we know the
alignment.
> + blr
> Index: linux-powerpc/arch/powerpc/lib/Makefile
> ===================================================================
> --- linux-powerpc.orig/arch/powerpc/lib/Makefile 2011-05-19 19:57:38.058570608 +1000
> +++ linux-powerpc/arch/powerpc/lib/Makefile 2011-06-17 07:39:58.996165527 +1000
> @@ -16,7 +16,8 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
>
> obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
> memcpy_64.o usercopy_64.o mem_64.o string.o \
> - checksum_wrappers_64.o hweight_64.o
> + checksum_wrappers_64.o hweight_64.o \
> + copypage_power7.o
> obj-$(CONFIG_XMON) += sstep.o ldstfp.o
> obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
> obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
> Index: linux-powerpc/arch/powerpc/lib/copypage_64.S
> ===================================================================
> --- linux-powerpc.orig/arch/powerpc/lib/copypage_64.S 2011-06-06 08:07:35.000000000 +1000
> +++ linux-powerpc/arch/powerpc/lib/copypage_64.S 2011-06-17 07:39:58.996165527 +1000
> @@ -17,7 +17,11 @@ PPC64_CACHES:
> .section ".text"
>
> _GLOBAL(copy_page)
> +BEGIN_FTR_SECTION
> lis r5,PAGE_SIZE@h
> +FTR_SECTION_ELSE
> + b .copypage_power7
> +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POWER7)
> ori r5,r5,PAGE_SIZE@l
> BEGIN_FTR_SECTION
> ld r10,PPC64_CACHES@toc(r2)
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX
2011-06-17 5:53 ` Benjamin Herrenschmidt
@ 2011-06-17 20:26 ` Segher Boessenkool
0 siblings, 0 replies; 13+ messages in thread
From: Segher Boessenkool @ 2011-06-17 20:26 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: paulus, mikey, linuxppc-dev, Anton Blanchard
>> + addi r1,r1,STACKFRAMESIZE
>> +
>> + .align 5
>
> Do we know that the blank will be filled with something harmless ?
Yes. See ppc_handle_align() in gas/config/tc-ppc.c : it fills with nops
(ori 0,0,0), and a branch if there are more than four nops, and for
POWER6
and POWER7 it puts a group-terminating insn (ori 1,1,0 resp. ori 2,2,0)
at
the end.
It has done this (well, nops at least) since 2001.
Segher
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 2/3] powerpc: POWER7 optimised memcpy using VMX
2011-06-17 4:53 [PATCH 0/3] POWER7 optimised copy loops Anton Blanchard
2011-06-17 4:53 ` [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX Anton Blanchard
@ 2011-06-17 4:54 ` Anton Blanchard
2011-06-17 5:57 ` Benjamin Herrenschmidt
2011-06-17 7:12 ` Gabriel Paubert
2011-06-17 4:54 ` [PATCH 3/3] powerpc: POWER7 optimised copy_to_user/copy_from_user " Anton Blanchard
2 siblings, 2 replies; 13+ messages in thread
From: Anton Blanchard @ 2011-06-17 4:54 UTC (permalink / raw)
To: benh, paulus, mikey; +Cc: linuxppc-dev
Implement a POWER7 optimised memcpy using VMX. For large aligned
copies this new loop is over 10% faster and for large unaligned
copies it is over 200% faster.
On POWER7 unaligned stores rarely slow down - they only flush when
a store crosses a 4KB page boundary. Furthermore this flush is
handled completely in hardware and should be 20-30 cycles.
Unaligned loads on the other hand flush much more often - whenever
crossing a 128 byte cache line, or a 32 byte sector if either sector
is an L1 miss.
Considering this information we really want to get the loads aligned
and not worry about the alignment of the stores. Microbenchmarks
confirm that this approach is much faster than the current unaligned
copy loop that uses shifts and rotates to ensure both loads and
stores are aligned.
We also want to try and do the stores in cacheline aligned, cacheline
sized chunks. If the store queue is unable to merge an entire
cacheline of stores then the L2 cache will have to do a
read/modify/write. Even worse, we will serialise this with the stores
in the next iteration of the copy loop since both iterations hit
the same cacheline.
Based on this, the new loop does the following things:
1 - 127 bytes
Get the source 8 byte aligned and use 8 byte loads and stores. Pretty
boring and similar to how the current loop works.
128 - 4095 bytes
Get the source 8 byte aligned and use 8 byte loads and stores,
1 cacheline at a time. We aren't doing the stores in cacheline
aligned chunks so we will potentially serialise once per cacheline.
Even so it is much better than the loop we have today.
4096 - bytes
If both source and destination have the same alignment get them both
16 byte aligned, then get the destination cacheline aligned. Do
cacheline sized loads and stores using VMX.
If source and destination do not have the same alignment, we get the
destination cacheline aligned, and use permute to do aligned loads.
In both cases the VMX loop should be optimal - we always do aligned
loads and stores and are always doing stores in cacheline aligned,
cacheline sized chunks.
The VMX breakpoint of 4096 bytes was chosen using this microbenchmark:
http://ozlabs.org/~anton/junkcode/copy_to_user.c
(Note that the breakpoint analysis was done with the copy_tofrom_user
version of the loop and using varying sizes and alignments to read().
It's much easier to create a benchmark using read() that can control
the size and alignment of a kernel copy loop and synchronise it with
userspace doing optional VMX instructions).
Since we are using VMX and there is a cost to saving and restoring
the user VMX state there are two broad cases we need to benchmark:
- Best case - userspace never uses VMX
- Worst case - userspace always uses VMX
In reality a userspace process will sit somewhere between these two
extremes. Since we need to test both aligned and unaligned copies we
end up with 4 combinations. The point at which the VMX loop begins to
win is:
0% VMX
aligned 2048 bytes
unaligned 2048 bytes
100% VMX
aligned 16384 bytes
unaligned 8192 bytes
Considering this is a microbenchmark, the data is hot in cache and
the VMX loop has better store queue merging properties we set the
breakpoint to 4096 bytes, a little below the unaligned breakpoints.
Some future optimisations we can look at:
- Looking at the perf data, a significant part of the cost when a task
is always using VMX is the extra exception we take to restore the
VMX state. As such we should do something similar to the x86
optimisation that restores FPU state for heavy users. ie:
/*
* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
*/
preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
and
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
- We could create a paca bit to mirror the VMX enabled MSR bit and check
that first, avoiding multiple calls to calling enable_kernel_altivec.
- We could have two VMX breakpoints, one for when we know the user VMX
state is loaded into the registers and one when it isn't. This could
be a second bit in the paca so we can calculate the break points quickly.
Signed-off-by: Anton Blanchard <anton@samba.org>
---
Index: linux-powerpc/arch/powerpc/lib/Makefile
===================================================================
--- linux-powerpc.orig/arch/powerpc/lib/Makefile 2011-06-17 08:38:25.786110167 +1000
+++ linux-powerpc/arch/powerpc/lib/Makefile 2011-06-17 14:05:30.023020417 +1000
@@ -17,7 +17,7 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o string.o \
checksum_wrappers_64.o hweight_64.o \
- copypage_power7.o
+ copypage_power7.o memcpy_power7.o
obj-$(CONFIG_XMON) += sstep.o ldstfp.o
obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
Index: linux-powerpc/arch/powerpc/lib/memcpy_64.S
===================================================================
--- linux-powerpc.orig/arch/powerpc/lib/memcpy_64.S 2011-06-17 08:32:33.670110896 +1000
+++ linux-powerpc/arch/powerpc/lib/memcpy_64.S 2011-06-17 08:38:25.806110507 +1000
@@ -11,7 +11,11 @@
.align 7
_GLOBAL(memcpy)
+BEGIN_FTR_SECTION
std r3,48(r1) /* save destination pointer for return value */
+FTR_SECTION_ELSE
+ b memcpy_power7
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POWER7)
PPC_MTOCRF 0x01,r5
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
Index: linux-powerpc/arch/powerpc/lib/memcpy_power7.S
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-powerpc/arch/powerpc/lib/memcpy_power7.S 2011-06-17 08:38:25.806110507 +1000
@@ -0,0 +1,596 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2011
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/ppc_asm.h>
+
+#define STACKFRAMESIZE 256
+#define STK_REG(i) (112 + ((i)-14)*8)
+
+_GLOBAL(memcpy_power7)
+ cmpldi r5,16
+ cmpldi cr1,r5,4096
+
+ std r3,48(r1)
+
+ blt .Lshort_copy
+ bgt cr1,.Lvmx_copy
+
+ /* Get the source 8B aligned */
+ neg r6,r4
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-3)
+
+ bf cr7*4+3,1f
+ lbz r0,0(r4)
+ addi r4,r4,1
+ stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+3: sub r5,r5,r6
+ cmpldi r5,128
+ blt 5f
+
+ stdu r1,-STACKFRAMESIZE(r1)
+ std r14,STK_REG(r14)(r1)
+ std r15,STK_REG(r15)(r1)
+ std r16,STK_REG(r16)(r1)
+ std r17,STK_REG(r17)(r1)
+ std r18,STK_REG(r18)(r1)
+ std r19,STK_REG(r19)(r1)
+ std r20,STK_REG(r20)(r1)
+ std r21,STK_REG(r21)(r1)
+ std r22,STK_REG(r22)(r1)
+
+ srdi r6,r5,7
+ mtctr r6
+
+ /* Now do cacheline (128B) sized loads and stores. */
+ .align 5
+4: ld r0,0(r4)
+ ld r6,8(r4)
+ ld r7,16(r4)
+ ld r8,24(r4)
+ ld r9,32(r4)
+ ld r10,40(r4)
+ ld r11,48(r4)
+ ld r12,56(r4)
+ ld r14,64(r4)
+ ld r15,72(r4)
+ ld r16,80(r4)
+ ld r17,88(r4)
+ ld r18,96(r4)
+ ld r19,104(r4)
+ ld r20,112(r4)
+ ld r21,120(r4)
+ addi r4,r4,128
+ std r0,0(r3)
+ std r6,8(r3)
+ std r7,16(r3)
+ std r8,24(r3)
+ std r9,32(r3)
+ std r10,40(r3)
+ std r11,48(r3)
+ std r12,56(r3)
+ std r14,64(r3)
+ std r15,72(r3)
+ std r16,80(r3)
+ std r17,88(r3)
+ std r18,96(r3)
+ std r19,104(r3)
+ std r20,112(r3)
+ std r21,120(r3)
+ addi r3,r3,128
+ bdnz 4b
+
+ clrldi r5,r5,(64-7)
+
+ ld r14,STK_REG(r14)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r16,STK_REG(r16)(r1)
+ ld r17,STK_REG(r17)(r1)
+ ld r18,STK_REG(r18)(r1)
+ ld r19,STK_REG(r19)(r1)
+ ld r20,STK_REG(r20)(r1)
+ ld r21,STK_REG(r21)(r1)
+ ld r22,STK_REG(r22)(r1)
+ addi r1,r1,STACKFRAMESIZE
+
+ /* Up to 127B to go */
+5: srdi r6,r5,4
+ mtocrf 0x01,r6
+
+6: bf cr7*4+1,7f
+ ld r0,0(r4)
+ ld r6,8(r4)
+ ld r7,16(r4)
+ ld r8,24(r4)
+ ld r9,32(r4)
+ ld r10,40(r4)
+ ld r11,48(r4)
+ ld r12,56(r4)
+ addi r4,r4,64
+ std r0,0(r3)
+ std r6,8(r3)
+ std r7,16(r3)
+ std r8,24(r3)
+ std r9,32(r3)
+ std r10,40(r3)
+ std r11,48(r3)
+ std r12,56(r3)
+ addi r3,r3,64
+
+ /* Up to 63B to go */
+7: bf cr7*4+2,8f
+ ld r0,0(r4)
+ ld r6,8(r4)
+ ld r7,16(r4)
+ ld r8,24(r4)
+ addi r4,r4,32
+ std r0,0(r3)
+ std r6,8(r3)
+ std r7,16(r3)
+ std r8,24(r3)
+ addi r3,r3,32
+
+ /* Up to 31B to go */
+8: bf cr7*4+3,9f
+ ld r0,0(r4)
+ ld r6,8(r4)
+ addi r4,r4,16
+ std r0,0(r3)
+ std r6,8(r3)
+ addi r3,r3,16
+
+9: clrldi r5,r5,(64-4)
+
+ /* Up to 15B to go */
+.Lshort_copy:
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+ lwz r0,0(r4) /* Less chance of a reject with word ops */
+ lwz r6,4(r4)
+ addi r4,r4,8
+ stw r0,0(r3)
+ stw r6,4(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+ lbz r0,0(r4)
+ stb r0,0(r3)
+
+15: ld r3,48(r1)
+ blr
+
+.Lvmx_copy:
+ mflr r0
+ std r4,56(r1)
+ std r5,64(r1)
+ std r0,16(r1)
+ stdu r1,-STACKFRAMESIZE(r1)
+ bl .enable_kernel_altivec
+ ld r0,STACKFRAMESIZE+16(r1)
+ ld r3,STACKFRAMESIZE+48(r1)
+ ld r4,STACKFRAMESIZE+56(r1)
+ ld r5,STACKFRAMESIZE+64(r1)
+ mtlr r0
+
+ /*
+ * If source and destination are not relatively aligned we use a
+ * slower permute loop.
+ */
+ xor r6,r4,r3
+ rldicl. r6,r6,0,(64-4)
+ bne .Lvmx_unaligned_copy
+
+ /* Get the destination 16B aligned */
+ neg r6,r3
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-4)
+
+ bf cr7*4+3,1f
+ lbz r0,0(r4)
+ addi r4,r4,1
+ stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+3: bf cr7*4+0,4f
+ ld r0,0(r4)
+ addi r4,r4,8
+ std r0,0(r3)
+ addi r3,r3,8
+
+4: sub r5,r5,r6
+
+ /* Get the desination 128B aligned */
+ neg r6,r3
+ srdi r7,r6,4
+ mtocrf 0x01,r7
+ clrldi r6,r6,(64-7)
+
+ li r9,16
+ li r10,32
+ li r11,48
+
+ bf cr7*4+3,5f
+ lvx vr1,r0,r4
+ addi r4,r4,16
+ stvx vr1,r0,r3
+ addi r3,r3,16
+
+5: bf cr7*4+2,6f
+ lvx vr1,r0,r4
+ lvx vr0,r4,r9
+ addi r4,r4,32
+ stvx vr1,r0,r3
+ stvx vr0,r3,r9
+ addi r3,r3,32
+
+6: bf cr7*4+1,7f
+ lvx vr3,r0,r4
+ lvx vr2,r4,r9
+ lvx vr1,r4,r10
+ lvx vr0,r4,r11
+ addi r4,r4,64
+ stvx vr3,r0,r3
+ stvx vr2,r3,r9
+ stvx vr1,r3,r10
+ stvx vr0,r3,r11
+ addi r3,r3,64
+
+7: sub r5,r5,r6
+ srdi r6,r5,7
+
+ std r14,STK_REG(r14)(r1)
+ std r15,STK_REG(r15)(r1)
+ std r16,STK_REG(r16)(r1)
+
+ li r12,64
+ li r14,80
+ li r15,96
+ li r16,112
+
+ mtctr r6
+
+ /*
+ * Now do cacheline sized loads and stores. By this stage the
+ * cacheline stores are also cacheline aligned.
+ */
+ .align 5
+8: lvx vr7,r0,r4
+ lvx vr6,r4,r9
+ lvx vr5,r4,r10
+ lvx vr4,r4,r11
+ lvx vr3,r4,r12
+ lvx vr2,r4,r14
+ lvx vr1,r4,r15
+ lvx vr0,r4,r16
+ addi r4,r4,128
+ stvx vr7,r0,r3
+ stvx vr6,r3,r9
+ stvx vr5,r3,r10
+ stvx vr4,r3,r11
+ stvx vr3,r3,r12
+ stvx vr2,r3,r14
+ stvx vr1,r3,r15
+ stvx vr0,r3,r16
+ addi r3,r3,128
+ bdnz 8b
+
+ ld r14,STK_REG(r14)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r16,STK_REG(r16)(r1)
+
+ /* Up to 127B to go */
+ clrldi r5,r5,(64-7)
+ srdi r6,r5,4
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+ lvx vr3,r0,r4
+ lvx vr2,r4,r9
+ lvx vr1,r4,r10
+ lvx vr0,r4,r11
+ addi r4,r4,64
+ stvx vr3,r0,r3
+ stvx vr2,r3,r9
+ stvx vr1,r3,r10
+ stvx vr0,r3,r11
+ addi r3,r3,64
+
+9: bf cr7*4+2,10f
+ lvx vr1,r0,r4
+ lvx vr0,r4,r9
+ addi r4,r4,32
+ stvx vr1,r0,r3
+ stvx vr0,r3,r9
+ addi r3,r3,32
+
+10: bf cr7*4+3,11f
+ lvx vr1,r0,r4
+ addi r4,r4,16
+ stvx vr1,r0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+11: clrldi r5,r5,(64-4)
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+ ld r0,0(r4)
+ addi r4,r4,8
+ std r0,0(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+ lbz r0,0(r4)
+ stb r0,0(r3)
+
+15: addi r1,r1,STACKFRAMESIZE
+ ld r3,48(r1)
+ blr
+
+.Lvmx_unaligned_copy:
+ /* Get the destination 16B aligned */
+ neg r6,r3
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-4)
+
+ bf cr7*4+3,1f
+ lbz r0,0(r4)
+ addi r4,r4,1
+ stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+3: bf cr7*4+0,4f
+ lwz r0,0(r4) /* Less chance of a reject with word ops */
+ lwz r7,4(r4)
+ addi r4,r4,8
+ stw r0,0(r3)
+ stw r7,4(r3)
+ addi r3,r3,8
+
+4: sub r5,r5,r6
+
+ /* Get the desination 128B aligned */
+ neg r6,r3
+ srdi r7,r6,4
+ mtocrf 0x01,r7
+ clrldi r6,r6,(64-7)
+
+ li r9,16
+ li r10,32
+ li r11,48
+
+ lvsl vr16,0,r4 /* Setup permute control vector */
+ lvx vr0,0,r4
+ addi r4,r4,16
+
+ bf cr7*4+3,5f
+ lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ addi r4,r4,16
+ stvx vr8,r0,r3
+ addi r3,r3,16
+ vor vr0,vr1,vr1
+
+5: bf cr7*4+2,6f
+ lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ lvx vr0,r4,r9
+ vperm vr9,vr1,vr0,vr16
+ addi r4,r4,32
+ stvx vr8,r0,r3
+ stvx vr9,r3,r9
+ addi r3,r3,32
+
+6: bf cr7*4+1,7f
+ lvx vr3,r0,r4
+ vperm vr8,vr0,vr3,vr16
+ lvx vr2,r4,r9
+ vperm vr9,vr3,vr2,vr16
+ lvx vr1,r4,r10
+ vperm vr10,vr2,vr1,vr16
+ lvx vr0,r4,r11
+ vperm vr11,vr1,vr0,vr16
+ addi r4,r4,64
+ stvx vr8,r0,r3
+ stvx vr9,r3,r9
+ stvx vr10,r3,r10
+ stvx vr11,r3,r11
+ addi r3,r3,64
+
+7: sub r5,r5,r6
+ srdi r6,r5,7
+
+ std r14,STK_REG(r14)(r1)
+ std r15,STK_REG(r15)(r1)
+ std r16,STK_REG(r16)(r1)
+
+ li r12,64
+ li r14,80
+ li r15,96
+ li r16,112
+
+ mtctr r6
+
+ /*
+ * Now do cacheline sized loads and stores. By this stage the
+ * cacheline stores are also cacheline aligned.
+ */
+ .align 5
+8: lvx vr7,r0,r4
+ vperm vr8,vr0,vr7,vr16
+ lvx vr6,r4,r9
+ vperm vr9,vr7,vr6,vr16
+ lvx vr5,r4,r10
+ vperm vr10,vr6,vr5,vr16
+ lvx vr4,r4,r11
+ vperm vr11,vr5,vr4,vr16
+ lvx vr3,r4,r12
+ vperm vr12,vr4,vr3,vr16
+ lvx vr2,r4,r14
+ vperm vr13,vr3,vr2,vr16
+ lvx vr1,r4,r15
+ vperm vr14,vr2,vr1,vr16
+ lvx vr0,r4,r16
+ vperm vr15,vr1,vr0,vr16
+ addi r4,r4,128
+ stvx vr8,r0,r3
+ stvx vr9,r3,r9
+ stvx vr10,r3,r10
+ stvx vr11,r3,r11
+ stvx vr12,r3,r12
+ stvx vr13,r3,r14
+ stvx vr14,r3,r15
+ stvx vr15,r3,r16
+ addi r3,r3,128
+ bdnz 8b
+
+ ld r14,STK_REG(r14)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r16,STK_REG(r16)(r1)
+
+ /* Up to 127B to go */
+ clrldi r5,r5,(64-7)
+ srdi r6,r5,4
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+ lvx vr3,r0,r4
+ vperm vr8,vr0,vr3,vr16
+ lvx vr2,r4,r9
+ vperm vr9,vr3,vr2,vr16
+ lvx vr1,r4,r10
+ vperm vr10,vr2,vr1,vr16
+ lvx vr0,r4,r11
+ vperm vr11,vr1,vr0,vr16
+ addi r4,r4,64
+ stvx vr8,r0,r3
+ stvx vr9,r3,r9
+ stvx vr10,r3,r10
+ stvx vr11,r3,r11
+ addi r3,r3,64
+
+9: bf cr7*4+2,10f
+ lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ lvx vr0,r4,r9
+ vperm vr9,vr1,vr0,vr16
+ addi r4,r4,32
+ stvx vr8,r0,r3
+ stvx vr9,r3,r9
+ addi r3,r3,32
+
+10: bf cr7*4+3,11f
+ lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ addi r4,r4,16
+ stvx vr8,r0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+11: clrldi r5,r5,(64-4)
+ addi r4,r4,-16 /* Unwind the +16 load offset */
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+ lwz r0,0(r4) /* Less chance of a reject with word ops */
+ lwz r6,4(r4)
+ addi r4,r4,8
+ stw r0,0(r3)
+ stw r6,4(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+ lbz r0,0(r4)
+ stb r0,0(r3)
+
+15: addi r1,r1,STACKFRAMESIZE
+ ld r3,48(r1)
+ blr
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/3] powerpc: POWER7 optimised memcpy using VMX
2011-06-17 4:54 ` [PATCH 2/3] powerpc: POWER7 optimised memcpy " Anton Blanchard
@ 2011-06-17 5:57 ` Benjamin Herrenschmidt
2011-06-17 7:12 ` Gabriel Paubert
1 sibling, 0 replies; 13+ messages in thread
From: Benjamin Herrenschmidt @ 2011-06-17 5:57 UTC (permalink / raw)
To: Anton Blanchard; +Cc: linuxppc-dev, mikey, paulus
O
> +.Lvmx_copy:
> + mflr r0
> + std r4,56(r1)
> + std r5,64(r1)
> + std r0,16(r1)
> + stdu r1,-STACKFRAMESIZE(r1)
> + bl .enable_kernel_altivec
> + ld r0,STACKFRAMESIZE+16(r1)
> + ld r3,STACKFRAMESIZE+48(r1)
> + ld r4,STACKFRAMESIZE+56(r1)
> + ld r5,STACKFRAMESIZE+64(r1)
> + mtlr r0
Disable interrupts ? We wont save the VMX state on interrupts and memcpy
is definitely re-entrant.
Or only run the optimization when not at interrupt time....
Cheers,
Ben.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/3] powerpc: POWER7 optimised memcpy using VMX
2011-06-17 4:54 ` [PATCH 2/3] powerpc: POWER7 optimised memcpy " Anton Blanchard
2011-06-17 5:57 ` Benjamin Herrenschmidt
@ 2011-06-17 7:12 ` Gabriel Paubert
2011-06-17 9:02 ` David Laight
1 sibling, 1 reply; 13+ messages in thread
From: Gabriel Paubert @ 2011-06-17 7:12 UTC (permalink / raw)
To: Anton Blanchard; +Cc: mikey, paulus, linuxppc-dev
On Fri, Jun 17, 2011 at 02:54:00PM +1000, Anton Blanchard wrote:
> Implement a POWER7 optimised memcpy using VMX. For large aligned
> copies this new loop is over 10% faster and for large unaligned
> copies it is over 200% faster.
>
> On POWER7 unaligned stores rarely slow down - they only flush when
> a store crosses a 4KB page boundary. Furthermore this flush is
> handled completely in hardware and should be 20-30 cycles.
>
> Unaligned loads on the other hand flush much more often - whenever
> crossing a 128 byte cache line, or a 32 byte sector if either sector
> is an L1 miss.
>
> Considering this information we really want to get the loads aligned
> and not worry about the alignment of the stores. Microbenchmarks
> confirm that this approach is much faster than the current unaligned
> copy loop that uses shifts and rotates to ensure both loads and
> stores are aligned.
>
> We also want to try and do the stores in cacheline aligned, cacheline
> sized chunks. If the store queue is unable to merge an entire
> cacheline of stores then the L2 cache will have to do a
> read/modify/write. Even worse, we will serialise this with the stores
> in the next iteration of the copy loop since both iterations hit
> the same cacheline.
>
> Based on this, the new loop does the following things:
>
>
> 1 - 127 bytes
> Get the source 8 byte aligned and use 8 byte loads and stores. Pretty
> boring and similar to how the current loop works.
>
> 128 - 4095 bytes
> Get the source 8 byte aligned and use 8 byte loads and stores,
> 1 cacheline at a time. We aren't doing the stores in cacheline
> aligned chunks so we will potentially serialise once per cacheline.
> Even so it is much better than the loop we have today.
>
> 4096 - bytes
> If both source and destination have the same alignment get them both
> 16 byte aligned, then get the destination cacheline aligned. Do
> cacheline sized loads and stores using VMX.
>
> If source and destination do not have the same alignment, we get the
> destination cacheline aligned, and use permute to do aligned loads.
>
> In both cases the VMX loop should be optimal - we always do aligned
> loads and stores and are always doing stores in cacheline aligned,
> cacheline sized chunks.
>
>
> The VMX breakpoint of 4096 bytes was chosen using this microbenchmark:
>
> http://ozlabs.org/~anton/junkcode/copy_to_user.c
>
> (Note that the breakpoint analysis was done with the copy_tofrom_user
> version of the loop and using varying sizes and alignments to read().
> It's much easier to create a benchmark using read() that can control
> the size and alignment of a kernel copy loop and synchronise it with
> userspace doing optional VMX instructions).
>
> Since we are using VMX and there is a cost to saving and restoring
> the user VMX state there are two broad cases we need to benchmark:
>
> - Best case - userspace never uses VMX
>
> - Worst case - userspace always uses VMX
>
> In reality a userspace process will sit somewhere between these two
> extremes. Since we need to test both aligned and unaligned copies we
> end up with 4 combinations. The point at which the VMX loop begins to
> win is:
>
> 0% VMX
> aligned 2048 bytes
> unaligned 2048 bytes
>
> 100% VMX
> aligned 16384 bytes
> unaligned 8192 bytes
>
> Considering this is a microbenchmark, the data is hot in cache and
> the VMX loop has better store queue merging properties we set the
> breakpoint to 4096 bytes, a little below the unaligned breakpoints.
>
> Some future optimisations we can look at:
>
> - Looking at the perf data, a significant part of the cost when a task
> is always using VMX is the extra exception we take to restore the
> VMX state. As such we should do something similar to the x86
> optimisation that restores FPU state for heavy users. ie:
>
> /*
> * If the task has used fpu the last 5 timeslices, just do a full
> * restore of the math state immediately to avoid the trap; the
> * chances of needing FPU soon are obviously high now
> */
> preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
>
> and
>
> /*
> * fpu_counter contains the number of consecutive context switches
> * that the FPU is used. If this is over a threshold, the lazy fpu
> * saving becomes unlazy to save the trap. This is an unsigned char
> * so that after 256 times the counter wraps and the behavior turns
> * lazy again; this to deal with bursty apps that only use FPU for
> * a short time
> */
>
> - We could create a paca bit to mirror the VMX enabled MSR bit and check
> that first, avoiding multiple calls to calling enable_kernel_altivec.
>
> - We could have two VMX breakpoints, one for when we know the user VMX
> state is loaded into the registers and one when it isn't. This could
> be a second bit in the paca so we can calculate the break points quickly.
>
> Signed-off-by: Anton Blanchard <anton@samba.org>
> ---
>
> Index: linux-powerpc/arch/powerpc/lib/Makefile
> ===================================================================
> --- linux-powerpc.orig/arch/powerpc/lib/Makefile 2011-06-17 08:38:25.786110167 +1000
> +++ linux-powerpc/arch/powerpc/lib/Makefile 2011-06-17 14:05:30.023020417 +1000
> @@ -17,7 +17,7 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
> obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
> memcpy_64.o usercopy_64.o mem_64.o string.o \
> checksum_wrappers_64.o hweight_64.o \
> - copypage_power7.o
> + copypage_power7.o memcpy_power7.o
> obj-$(CONFIG_XMON) += sstep.o ldstfp.o
> obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
> obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
> Index: linux-powerpc/arch/powerpc/lib/memcpy_64.S
> ===================================================================
> --- linux-powerpc.orig/arch/powerpc/lib/memcpy_64.S 2011-06-17 08:32:33.670110896 +1000
> +++ linux-powerpc/arch/powerpc/lib/memcpy_64.S 2011-06-17 08:38:25.806110507 +1000
> @@ -11,7 +11,11 @@
>
> .align 7
> _GLOBAL(memcpy)
> +BEGIN_FTR_SECTION
> std r3,48(r1) /* save destination pointer for return value */
> +FTR_SECTION_ELSE
> + b memcpy_power7
> +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POWER7)
> PPC_MTOCRF 0x01,r5
> cmpldi cr1,r5,16
> neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
> Index: linux-powerpc/arch/powerpc/lib/memcpy_power7.S
> ===================================================================
> --- /dev/null 1970-01-01 00:00:00.000000000 +0000
> +++ linux-powerpc/arch/powerpc/lib/memcpy_power7.S 2011-06-17 08:38:25.806110507 +1000
> @@ -0,0 +1,596 @@
> +/*
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
> + *
> + * Copyright (C) IBM Corporation, 2011
> + *
> + * Author: Anton Blanchard <anton@au.ibm.com>
> + */
> +#include <asm/ppc_asm.h>
> +
> +#define STACKFRAMESIZE 256
> +#define STK_REG(i) (112 + ((i)-14)*8)
> +
> +_GLOBAL(memcpy_power7)
> + cmpldi r5,16
> + cmpldi cr1,r5,4096
> +
> + std r3,48(r1)
> +
> + blt .Lshort_copy
> + bgt cr1,.Lvmx_copy
> +
> + /* Get the source 8B aligned */
> + neg r6,r4
> + mtocrf 0x01,r6
> + clrldi r6,r6,(64-3)
> +
> + bf cr7*4+3,1f
> + lbz r0,0(r4)
> + addi r4,r4,1
> + stb r0,0(r3)
> + addi r3,r3,1
> +
> +1: bf cr7*4+2,2f
> + lhz r0,0(r4)
> + addi r4,r4,2
> + sth r0,0(r3)
> + addi r3,r3,2
> +
> +2: bf cr7*4+1,3f
> + lwz r0,0(r4)
> + addi r4,r4,4
> + stw r0,0(r3)
> + addi r3,r3,4
> +
> +3: sub r5,r5,r6
> + cmpldi r5,128
> + blt 5f
> +
> + stdu r1,-STACKFRAMESIZE(r1)
> + std r14,STK_REG(r14)(r1)
> + std r15,STK_REG(r15)(r1)
> + std r16,STK_REG(r16)(r1)
> + std r17,STK_REG(r17)(r1)
> + std r18,STK_REG(r18)(r1)
> + std r19,STK_REG(r19)(r1)
> + std r20,STK_REG(r20)(r1)
> + std r21,STK_REG(r21)(r1)
> + std r22,STK_REG(r22)(r1)
> +
> + srdi r6,r5,7
> + mtctr r6
> +
> + /* Now do cacheline (128B) sized loads and stores. */
> + .align 5
> +4: ld r0,0(r4)
> + ld r6,8(r4)
> + ld r7,16(r4)
> + ld r8,24(r4)
> + ld r9,32(r4)
> + ld r10,40(r4)
> + ld r11,48(r4)
> + ld r12,56(r4)
> + ld r14,64(r4)
> + ld r15,72(r4)
> + ld r16,80(r4)
> + ld r17,88(r4)
> + ld r18,96(r4)
> + ld r19,104(r4)
> + ld r20,112(r4)
> + ld r21,120(r4)
> + addi r4,r4,128
> + std r0,0(r3)
> + std r6,8(r3)
> + std r7,16(r3)
> + std r8,24(r3)
> + std r9,32(r3)
> + std r10,40(r3)
> + std r11,48(r3)
> + std r12,56(r3)
> + std r14,64(r3)
> + std r15,72(r3)
> + std r16,80(r3)
> + std r17,88(r3)
> + std r18,96(r3)
> + std r19,104(r3)
> + std r20,112(r3)
> + std r21,120(r3)
> + addi r3,r3,128
> + bdnz 4b
> +
> + clrldi r5,r5,(64-7)
> +
> + ld r14,STK_REG(r14)(r1)
> + ld r15,STK_REG(r15)(r1)
> + ld r16,STK_REG(r16)(r1)
> + ld r17,STK_REG(r17)(r1)
> + ld r18,STK_REG(r18)(r1)
> + ld r19,STK_REG(r19)(r1)
> + ld r20,STK_REG(r20)(r1)
> + ld r21,STK_REG(r21)(r1)
> + ld r22,STK_REG(r22)(r1)
> + addi r1,r1,STACKFRAMESIZE
> +
> + /* Up to 127B to go */
> +5: srdi r6,r5,4
> + mtocrf 0x01,r6
> +
> +6: bf cr7*4+1,7f
> + ld r0,0(r4)
> + ld r6,8(r4)
> + ld r7,16(r4)
> + ld r8,24(r4)
> + ld r9,32(r4)
> + ld r10,40(r4)
> + ld r11,48(r4)
> + ld r12,56(r4)
> + addi r4,r4,64
> + std r0,0(r3)
> + std r6,8(r3)
> + std r7,16(r3)
> + std r8,24(r3)
> + std r9,32(r3)
> + std r10,40(r3)
> + std r11,48(r3)
> + std r12,56(r3)
> + addi r3,r3,64
> +
> + /* Up to 63B to go */
> +7: bf cr7*4+2,8f
> + ld r0,0(r4)
> + ld r6,8(r4)
> + ld r7,16(r4)
> + ld r8,24(r4)
> + addi r4,r4,32
> + std r0,0(r3)
> + std r6,8(r3)
> + std r7,16(r3)
> + std r8,24(r3)
> + addi r3,r3,32
> +
> + /* Up to 31B to go */
> +8: bf cr7*4+3,9f
> + ld r0,0(r4)
> + ld r6,8(r4)
> + addi r4,r4,16
> + std r0,0(r3)
> + std r6,8(r3)
> + addi r3,r3,16
> +
> +9: clrldi r5,r5,(64-4)
I fail to see the point of that instruction: after that
you move r5 to cr7 and only test the 4 LSB, so clearing
the higher order bits looks superfluous.
There are other places where I think that you can save
a few instructions, but that one stands out as being
completely useless, unless I miss something really subtle.
And no, I don't have a Power7. I wish I had one, or 3...
BTW: do you have any statistics on the size distribution
of memcpy memcpy_to_from_usr?
My gut feeling is that the intermediate case is the most
important, and the short case the less critical (drowned
in overhead's noise) but that's the kind of things on which
I've often been wrong.
Do you really need to save and restore all the 32 VMX registers
(1/2 kB) or would it be possible (in a later step) to ony save
and restore the actually used ones (and no CSR either) ?
> +
> + /* Up to 15B to go */
> +.Lshort_copy:
> + mtocrf 0x01,r5
> + bf cr7*4+0,12f
> + lwz r0,0(r4) /* Less chance of a reject with word ops */
> + lwz r6,4(r4)
> + addi r4,r4,8
> + stw r0,0(r3)
> + stw r6,4(r3)
> + addi r3,r3,8
> +
> +12: bf cr7*4+1,13f
> + lwz r0,0(r4)
> + addi r4,r4,4
> + stw r0,0(r3)
> + addi r3,r3,4
> +
> +13: bf cr7*4+2,14f
> + lhz r0,0(r4)
> + addi r4,r4,2
> + sth r0,0(r3)
> + addi r3,r3,2
> +
> +14: bf cr7*4+3,15f
> + lbz r0,0(r4)
> + stb r0,0(r3)
> +
> +15: ld r3,48(r1)
> + blr
> +
Regards,
Gabriel
^ permalink raw reply [flat|nested] 13+ messages in thread
* RE: [PATCH 2/3] powerpc: POWER7 optimised memcpy using VMX
2011-06-17 7:12 ` Gabriel Paubert
@ 2011-06-17 9:02 ` David Laight
0 siblings, 0 replies; 13+ messages in thread
From: David Laight @ 2011-06-17 9:02 UTC (permalink / raw)
To: Gabriel Paubert, Anton Blanchard; +Cc: linuxppc-dev, mikey, paulus
=20
> On Fri, Jun 17, 2011 at 02:54:00PM +1000, Anton Blanchard wrote:
> > Implement a POWER7 optimised memcpy using VMX. For large aligned
> > copies this new loop is over 10% faster and for large unaligned
> > copies it is over 200% faster.
...
> BTW: do you have any statistics on the size distribution
> of memcpy memcpy_to_from_usr?
>=20
> My gut feeling is that the intermediate case is the most
> important, and the short case the less critical (drowned
> in overhead's noise) but that's the kind of things on which
> I've often been wrong.
My thoughts are certainly that the code is too big, and that
the 'cold cache' version and possibly the effects of increasing
the size of the working set (ie displacing other code) may
be significant in real life.
For memcpy() the 'short' case will happen surprisingly often,
I suspect the fixed costs for the short case may dominate some
real workloads.
I'm not sure the speed of misaligned copies matters enough
to take the hit of the alignment test!
Of course, I don't actually remember doing any instrumentation
of this, but I have changed i386/amd64 memcpy (not linux/glibc)
to avoid the 'rep movsb' used for the trailing bytes (copy
the last 'word' first) - the setup cost for 'rep movsb' is
over 40 clocks on netburst P4!
(It is possible to get amd64 to copy data as fast as the
'rep movd', but the setup times are longer. And very recent
Intel cpus contain hardware acceleration for aligned and
misaligned 'rep movsd' - so trying anything clever isn't good.)
I do realise thise doesn't directly apply to ppc :-)
David
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 3/3] powerpc: POWER7 optimised copy_to_user/copy_from_user using VMX
2011-06-17 4:53 [PATCH 0/3] POWER7 optimised copy loops Anton Blanchard
2011-06-17 4:53 ` [PATCH 1/3] powerpc: POWER7 optimised copy_page using VMX Anton Blanchard
2011-06-17 4:54 ` [PATCH 2/3] powerpc: POWER7 optimised memcpy " Anton Blanchard
@ 2011-06-17 4:54 ` Anton Blanchard
2011-06-17 5:58 ` Benjamin Herrenschmidt
2 siblings, 1 reply; 13+ messages in thread
From: Anton Blanchard @ 2011-06-17 4:54 UTC (permalink / raw)
To: benh, paulus, mikey; +Cc: linuxppc-dev
Implement a POWER7 optimised copy_to_user/copy_from_user using VMX.
For large aligned copies this new loop is over 10% faster, and for
large unaligned copies it is over 200% faster.
If we take a fault we fall back to the old version, this keeps
things relatively simple and easy to verify.
(The detailed comments below are copied from the POWER7 optimised
memcpy patch for completeness).
On POWER7 unaligned stores rarely slow down - they only flush when
a store crosses a 4KB page boundary. Furthermore this flush is
handled completely in hardware and should be 20-30 cycles.
Unaligned loads on the other hand flush much more often - whenever
crossing a 128 byte cache line, or a 32 byte sector if either sector
is an L1 miss.
Considering this information we really want to get the loads aligned
and not worry about the alignment of the stores. Microbenchmarks
confirm that this approach is much faster than the current unaligned
copy loop that uses shifts and rotates to ensure both loads and
stores are aligned.
We also want to try and do the stores in cacheline aligned, cacheline
sized chunks. If the store queue is unable to merge an entire
cacheline of stores then the L2 cache will have to do a
read/modify/write. Even worse, we will serialise this with the stores
in the next iteration of the copy loop since both iterations hit
the same cacheline.
Based on this, the new loop does the following things:
1 - 127 bytes
Get the source 8 byte aligned and use 8 byte loads and stores. Pretty
boring and similar to how the current loop works.
128 - 4095 bytes
Get the source 8 byte aligned and use 8 byte loads and stores,
1 cacheline at a time. We aren't doing the stores in cacheline
aligned chunks so we will potentially serialise once per cacheline.
Even so it is much better than the loop we have today.
4096 - bytes
If both source and destination have the same alignment get them both
16 byte aligned, then get the destination cacheline aligned. Do
cacheline sized loads and stores using VMX.
If source and destination do not have the same alignment, we get the
destination cacheline aligned, and use permute to do aligned loads.
In both cases the VMX loop should be optimal - we always do aligned
loads and stores and are always doing stores in cacheline aligned,
cacheline sized chunks.
The VMX breakpoint of 4096 bytes was chosen using this microbenchmark:
http://ozlabs.org/~anton/junkcode/copy_to_user.c
Since we are using VMX and there is a cost to saving and restoring
the user VMX state there are two broad cases we need to benchmark:
- Best case - userspace never uses VMX
- Worst case - userspace always uses VMX
In reality a userspace process will sit somewhere between these two
extremes. Since we need to test both aligned and unaligned copies we
end up with 4 combinations. The point at which the VMX loop begins to
win is:
0% VMX
aligned 2048 bytes
unaligned 2048 bytes
100% VMX
aligned 16384 bytes
unaligned 8192 bytes
Considering this is a microbenchmark, the data is hot in cache and
the VMX loop has better store queue merging properties we set the
breakpoint to 4096 bytes, a little below the unaligned breakpoints.
Some future optimisations we can look at:
- Looking at the perf data, a significant part of the cost when a task
is always using VMX is the extra exception we take to restore the
VMX state. As such we should do something similar to the x86
optimisation that restores FPU state for heavy users. ie:
/*
* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
*/
preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
and
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
- We could create a paca bit to mirror the VMX enabled MSR bit and check
that first, avoiding multiple calls to calling enable_kernel_altivec.
- We could have two VMX breakpoints, one for when we know the user VMX
state is loaded into the registers and one when it isn't. This could
be a second bit in the paca so we can calculate the break points quickly.
Signed-off-by: Anton Blanchard <anton@samba.org>
---
Index: linux-powerpc/arch/powerpc/lib/copyuser_64.S
===================================================================
--- linux-powerpc.orig/arch/powerpc/lib/copyuser_64.S 2011-06-17 14:05:30.013020235 +1000
+++ linux-powerpc/arch/powerpc/lib/copyuser_64.S 2011-06-17 14:27:43.026572962 +1000
@@ -11,6 +11,10 @@
.align 7
_GLOBAL(__copy_tofrom_user)
+BEGIN_FTR_SECTION
+ b __copy_tofrom_user_power7
+END_FTR_SECTION_IFSET(CPU_FTR_POWER7)
+_GLOBAL(__copy_tofrom_user_base)
/* first check for a whole page copy on a page boundary */
cmpldi cr1,r5,16
cmpdi cr6,r5,4096
Index: linux-powerpc/arch/powerpc/lib/copyuser_power7.S
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-powerpc/arch/powerpc/lib/copyuser_power7.S 2011-06-17 14:41:47.901277096 +1000
@@ -0,0 +1,654 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2011
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/ppc_asm.h>
+
+#define STACKFRAMESIZE 256
+#define STK_REG(i) (112 + ((i)-14)*8)
+
+ .macro err1
+100:
+ .section __ex_table,"a"
+ .align 3
+ .llong 100b,.Ldo_err1
+ .previous
+ .endm
+
+ .macro err2
+200:
+ .section __ex_table,"a"
+ .align 3
+ .llong 200b,.Ldo_err2
+ .previous
+ .endm
+
+ .macro err3
+300:
+ .section __ex_table,"a"
+ .align 3
+ .llong 300b,.Ldo_err3
+ .previous
+ .endm
+
+ .macro err4
+400:
+ .section __ex_table,"a"
+ .align 3
+ .llong 400b,.Ldo_err4
+ .previous
+ .endm
+
+
+.Ldo_err2:
+ ld r22,STK_REG(r22)(r1)
+ ld r21,STK_REG(r21)(r1)
+ ld r20,STK_REG(r20)(r1)
+ ld r19,STK_REG(r19)(r1)
+ ld r18,STK_REG(r18)(r1)
+ ld r17,STK_REG(r17)(r1)
+.Ldo_err4:
+ ld r16,STK_REG(r16)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r14,STK_REG(r14)(r1)
+.Ldo_err3:
+ addi r1,r1,STACKFRAMESIZE
+.Ldo_err1:
+ ld r3,48(r1)
+ ld r4,56(r1)
+ ld r5,64(r1)
+ b __copy_tofrom_user_base
+
+
+_GLOBAL(__copy_tofrom_user_power7)
+ cmpldi r5,16
+ cmpldi cr1,r5,4096
+
+ std r3,48(r1)
+ std r4,56(r1)
+ std r5,64(r1)
+
+ blt .Lshort_copy
+ bgt cr1,.Lvmx_copy
+
+ /* Get the source 8B aligned */
+ neg r6,r4
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-3)
+
+ bf cr7*4+3,1f
+err1; lbz r0,0(r4)
+ addi r4,r4,1
+err1; stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+err1; lhz r0,0(r4)
+ addi r4,r4,2
+err1; sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+err1; lwz r0,0(r4)
+ addi r4,r4,4
+err1; stw r0,0(r3)
+ addi r3,r3,4
+
+3: sub r5,r5,r6
+ cmpldi r5,128
+ blt 5f
+
+ stdu r1,-STACKFRAMESIZE(r1)
+ std r14,STK_REG(r14)(r1)
+ std r15,STK_REG(r15)(r1)
+ std r16,STK_REG(r16)(r1)
+ std r17,STK_REG(r17)(r1)
+ std r18,STK_REG(r18)(r1)
+ std r19,STK_REG(r19)(r1)
+ std r20,STK_REG(r20)(r1)
+ std r21,STK_REG(r21)(r1)
+ std r22,STK_REG(r22)(r1)
+
+ srdi r6,r5,7
+ mtctr r6
+
+ /* Now do cacheline (128B) sized loads and stores. */
+ .align 5
+4:
+err2; ld r0,0(r4)
+err2; ld r6,8(r4)
+err2; ld r7,16(r4)
+err2; ld r8,24(r4)
+err2; ld r9,32(r4)
+err2; ld r10,40(r4)
+err2; ld r11,48(r4)
+err2; ld r12,56(r4)
+err2; ld r14,64(r4)
+err2; ld r15,72(r4)
+err2; ld r16,80(r4)
+err2; ld r17,88(r4)
+err2; ld r18,96(r4)
+err2; ld r19,104(r4)
+err2; ld r20,112(r4)
+err2; ld r21,120(r4)
+ addi r4,r4,128
+err2; std r0,0(r3)
+err2; std r6,8(r3)
+err2; std r7,16(r3)
+err2; std r8,24(r3)
+err2; std r9,32(r3)
+err2; std r10,40(r3)
+err2; std r11,48(r3)
+err2; std r12,56(r3)
+err2; std r14,64(r3)
+err2; std r15,72(r3)
+err2; std r16,80(r3)
+err2; std r17,88(r3)
+err2; std r18,96(r3)
+err2; std r19,104(r3)
+err2; std r20,112(r3)
+err2; std r21,120(r3)
+ addi r3,r3,128
+ bdnz 4b
+
+ clrldi r5,r5,(64-7)
+
+ ld r14,STK_REG(r14)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r16,STK_REG(r16)(r1)
+ ld r17,STK_REG(r17)(r1)
+ ld r18,STK_REG(r18)(r1)
+ ld r19,STK_REG(r19)(r1)
+ ld r20,STK_REG(r20)(r1)
+ ld r21,STK_REG(r21)(r1)
+ ld r22,STK_REG(r22)(r1)
+ addi r1,r1,STACKFRAMESIZE
+
+ /* Up to 127B to go */
+5: srdi r6,r5,4
+ mtocrf 0x01,r6
+
+6: bf cr7*4+1,7f
+err1; ld r0,0(r4)
+err1; ld r6,8(r4)
+err1; ld r7,16(r4)
+err1; ld r8,24(r4)
+err1; ld r9,32(r4)
+err1; ld r10,40(r4)
+err1; ld r11,48(r4)
+err1; ld r12,56(r4)
+ addi r4,r4,64
+err1; std r0,0(r3)
+err1; std r6,8(r3)
+err1; std r7,16(r3)
+err1; std r8,24(r3)
+err1; std r9,32(r3)
+err1; std r10,40(r3)
+err1; std r11,48(r3)
+err1; std r12,56(r3)
+ addi r3,r3,64
+
+ /* Up to 63B to go */
+7: bf cr7*4+2,8f
+err1; ld r0,0(r4)
+err1; ld r6,8(r4)
+err1; ld r7,16(r4)
+err1; ld r8,24(r4)
+ addi r4,r4,32
+err1; std r0,0(r3)
+err1; std r6,8(r3)
+err1; std r7,16(r3)
+err1; std r8,24(r3)
+ addi r3,r3,32
+
+ /* Up to 31B to go */
+8: bf cr7*4+3,9f
+err1; ld r0,0(r4)
+err1; ld r6,8(r4)
+ addi r4,r4,16
+err1; std r0,0(r3)
+err1; std r6,8(r3)
+ addi r3,r3,16
+
+9: clrldi r5,r5,(64-4)
+
+ /* Up to 15B to go */
+.Lshort_copy:
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+err1; lwz r0,0(r4) /* Less chance of a reject with word ops */
+err1; lwz r6,4(r4)
+ addi r4,r4,8
+err1; stw r0,0(r3)
+err1; stw r6,4(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+err1; lwz r0,0(r4)
+ addi r4,r4,4
+err1; stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+err1; lhz r0,0(r4)
+ addi r4,r4,2
+err1; sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+err1; lbz r0,0(r4)
+err1; stb r0,0(r3)
+
+15: li r3,0
+ blr
+
+.Lvmx_copy:
+ mflr r0
+ std r4,56(r1)
+ std r5,64(r1)
+ std r0,16(r1)
+ stdu r1,-STACKFRAMESIZE(r1)
+ bl .enable_kernel_altivec
+ ld r0,STACKFRAMESIZE+16(r1)
+ ld r3,STACKFRAMESIZE+48(r1)
+ ld r4,STACKFRAMESIZE+56(r1)
+ ld r5,STACKFRAMESIZE+64(r1)
+ mtlr r0
+
+ /*
+ * If source and destination are not relatively aligned we use a
+ * slower permute loop.
+ */
+ xor r6,r4,r3
+ rldicl. r6,r6,0,(64-4)
+ bne .Lvmx_unaligned_copy
+
+ /* Get the destination 16B aligned */
+ neg r6,r3
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-4)
+
+ bf cr7*4+3,1f
+err3; lbz r0,0(r4)
+ addi r4,r4,1
+err3; stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+err3; lhz r0,0(r4)
+ addi r4,r4,2
+err3; sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+err3; lwz r0,0(r4)
+ addi r4,r4,4
+err3; stw r0,0(r3)
+ addi r3,r3,4
+
+3: bf cr7*4+0,4f
+err3; ld r0,0(r4)
+ addi r4,r4,8
+err3; std r0,0(r3)
+ addi r3,r3,8
+
+4: sub r5,r5,r6
+
+ /* Get the desination 128B aligned */
+ neg r6,r3
+ srdi r7,r6,4
+ mtocrf 0x01,r7
+ clrldi r6,r6,(64-7)
+
+ li r9,16
+ li r10,32
+ li r11,48
+
+ bf cr7*4+3,5f
+err3; lvx vr1,r0,r4
+ addi r4,r4,16
+err3; stvx vr1,r0,r3
+ addi r3,r3,16
+
+5: bf cr7*4+2,6f
+err3; lvx vr1,r0,r4
+err3; lvx vr0,r4,r9
+ addi r4,r4,32
+err3; stvx vr1,r0,r3
+err3; stvx vr0,r3,r9
+ addi r3,r3,32
+
+6: bf cr7*4+1,7f
+err3; lvx vr3,r0,r4
+err3; lvx vr2,r4,r9
+err3; lvx vr1,r4,r10
+err3; lvx vr0,r4,r11
+ addi r4,r4,64
+err3; stvx vr3,r0,r3
+err3; stvx vr2,r3,r9
+err3; stvx vr1,r3,r10
+err3; stvx vr0,r3,r11
+ addi r3,r3,64
+
+7: sub r5,r5,r6
+ srdi r6,r5,7
+
+ std r14,STK_REG(r14)(r1)
+ std r15,STK_REG(r15)(r1)
+ std r16,STK_REG(r16)(r1)
+
+ li r12,64
+ li r14,80
+ li r15,96
+ li r16,112
+
+ mtctr r6
+
+ /*
+ * Now do cacheline sized loads and stores. By this stage the
+ * cacheline stores are also cacheline aligned.
+ */
+ .align 5
+8:
+err4; lvx vr7,r0,r4
+err4; lvx vr6,r4,r9
+err4; lvx vr5,r4,r10
+err4; lvx vr4,r4,r11
+err4; lvx vr3,r4,r12
+err4; lvx vr2,r4,r14
+err4; lvx vr1,r4,r15
+err4; lvx vr0,r4,r16
+ addi r4,r4,128
+err4; stvx vr7,r0,r3
+err4; stvx vr6,r3,r9
+err4; stvx vr5,r3,r10
+err4; stvx vr4,r3,r11
+err4; stvx vr3,r3,r12
+err4; stvx vr2,r3,r14
+err4; stvx vr1,r3,r15
+err4; stvx vr0,r3,r16
+ addi r3,r3,128
+ bdnz 8b
+
+ ld r14,STK_REG(r14)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r16,STK_REG(r16)(r1)
+
+ /* Up to 127B to go */
+ clrldi r5,r5,(64-7)
+ srdi r6,r5,4
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+err3; lvx vr3,r0,r4
+err3; lvx vr2,r4,r9
+err3; lvx vr1,r4,r10
+err3; lvx vr0,r4,r11
+ addi r4,r4,64
+err3; stvx vr3,r0,r3
+err3; stvx vr2,r3,r9
+err3; stvx vr1,r3,r10
+err3; stvx vr0,r3,r11
+ addi r3,r3,64
+
+9: bf cr7*4+2,10f
+err3; lvx vr1,r0,r4
+err3; lvx vr0,r4,r9
+ addi r4,r4,32
+err3; stvx vr1,r0,r3
+err3; stvx vr0,r3,r9
+ addi r3,r3,32
+
+10: bf cr7*4+3,11f
+err3; lvx vr1,r0,r4
+ addi r4,r4,16
+err3; stvx vr1,r0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+11: clrldi r5,r5,(64-4)
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+err3; ld r0,0(r4)
+ addi r4,r4,8
+err3; std r0,0(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+err3; lwz r0,0(r4)
+ addi r4,r4,4
+err3; stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+err3; lhz r0,0(r4)
+ addi r4,r4,2
+err3; sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+err3; lbz r0,0(r4)
+err3; stb r0,0(r3)
+
+15: addi r1,r1,STACKFRAMESIZE
+ li r3,0
+ blr
+
+.Lvmx_unaligned_copy:
+ /* Get the destination 16B aligned */
+ neg r6,r3
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-4)
+
+ bf cr7*4+3,1f
+err3; lbz r0,0(r4)
+ addi r4,r4,1
+err3; stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+err3; lhz r0,0(r4)
+ addi r4,r4,2
+err3; sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+err3; lwz r0,0(r4)
+ addi r4,r4,4
+err3; stw r0,0(r3)
+ addi r3,r3,4
+
+3: bf cr7*4+0,4f
+err3; lwz r0,0(r4) /* Less chance of a reject with word ops */
+err3; lwz r7,4(r4)
+ addi r4,r4,8
+err3; stw r0,0(r3)
+err3; stw r7,4(r3)
+ addi r3,r3,8
+
+4: sub r5,r5,r6
+
+ /* Get the desination 128B aligned */
+ neg r6,r3
+ srdi r7,r6,4
+ mtocrf 0x01,r7
+ clrldi r6,r6,(64-7)
+
+ li r9,16
+ li r10,32
+ li r11,48
+
+ lvsl vr16,0,r4 /* Setup permute control vector */
+err3; lvx vr0,0,r4
+ addi r4,r4,16
+
+ bf cr7*4+3,5f
+err3; lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ addi r4,r4,16
+err3; stvx vr8,r0,r3
+ addi r3,r3,16
+ vor vr0,vr1,vr1
+
+5: bf cr7*4+2,6f
+err3; lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+err3; lvx vr0,r4,r9
+ vperm vr9,vr1,vr0,vr16
+ addi r4,r4,32
+err3; stvx vr8,r0,r3
+err3; stvx vr9,r3,r9
+ addi r3,r3,32
+
+6: bf cr7*4+1,7f
+err3; lvx vr3,r0,r4
+ vperm vr8,vr0,vr3,vr16
+err3; lvx vr2,r4,r9
+ vperm vr9,vr3,vr2,vr16
+err3; lvx vr1,r4,r10
+ vperm vr10,vr2,vr1,vr16
+err3; lvx vr0,r4,r11
+ vperm vr11,vr1,vr0,vr16
+ addi r4,r4,64
+err3; stvx vr8,r0,r3
+err3; stvx vr9,r3,r9
+err3; stvx vr10,r3,r10
+err3; stvx vr11,r3,r11
+ addi r3,r3,64
+
+7: sub r5,r5,r6
+ srdi r6,r5,7
+
+ std r14,STK_REG(r14)(r1)
+ std r15,STK_REG(r15)(r1)
+ std r16,STK_REG(r16)(r1)
+
+ li r12,64
+ li r14,80
+ li r15,96
+ li r16,112
+
+ mtctr r6
+
+ /*
+ * Now do cacheline sized loads and stores. By this stage the
+ * cacheline stores are also cacheline aligned.
+ */
+ .align 5
+8:
+err4; lvx vr7,r0,r4
+ vperm vr8,vr0,vr7,vr16
+err4; lvx vr6,r4,r9
+ vperm vr9,vr7,vr6,vr16
+err4; lvx vr5,r4,r10
+ vperm vr10,vr6,vr5,vr16
+err4; lvx vr4,r4,r11
+ vperm vr11,vr5,vr4,vr16
+err4; lvx vr3,r4,r12
+ vperm vr12,vr4,vr3,vr16
+err4; lvx vr2,r4,r14
+ vperm vr13,vr3,vr2,vr16
+err4; lvx vr1,r4,r15
+ vperm vr14,vr2,vr1,vr16
+err4; lvx vr0,r4,r16
+ vperm vr15,vr1,vr0,vr16
+ addi r4,r4,128
+err4; stvx vr8,r0,r3
+err4; stvx vr9,r3,r9
+err4; stvx vr10,r3,r10
+err4; stvx vr11,r3,r11
+err4; stvx vr12,r3,r12
+err4; stvx vr13,r3,r14
+err4; stvx vr14,r3,r15
+err4; stvx vr15,r3,r16
+ addi r3,r3,128
+ bdnz 8b
+
+ ld r14,STK_REG(r14)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r16,STK_REG(r16)(r1)
+
+ /* Up to 127B to go */
+ clrldi r5,r5,(64-7)
+ srdi r6,r5,4
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+err3; lvx vr3,r0,r4
+ vperm vr8,vr0,vr3,vr16
+err3; lvx vr2,r4,r9
+ vperm vr9,vr3,vr2,vr16
+err3; lvx vr1,r4,r10
+ vperm vr10,vr2,vr1,vr16
+err3; lvx vr0,r4,r11
+ vperm vr11,vr1,vr0,vr16
+ addi r4,r4,64
+err3; stvx vr8,r0,r3
+err3; stvx vr9,r3,r9
+err3; stvx vr10,r3,r10
+err3; stvx vr11,r3,r11
+ addi r3,r3,64
+
+9: bf cr7*4+2,10f
+err3; lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+err3; lvx vr0,r4,r9
+ vperm vr9,vr1,vr0,vr16
+ addi r4,r4,32
+err3; stvx vr8,r0,r3
+err3; stvx vr9,r3,r9
+ addi r3,r3,32
+
+10: bf cr7*4+3,11f
+err3; lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ addi r4,r4,16
+err3; stvx vr8,r0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+11: clrldi r5,r5,(64-4)
+ addi r4,r4,-16 /* Unwind the +16 load offset */
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+err3; lwz r0,0(r4) /* Less chance of a reject with word ops */
+err3; lwz r6,4(r4)
+ addi r4,r4,8
+err3; stw r0,0(r3)
+err3; stw r6,4(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+err3; lwz r0,0(r4)
+ addi r4,r4,4
+err3; stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+err3; lhz r0,0(r4)
+ addi r4,r4,2
+err3; sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+err3; lbz r0,0(r4)
+err3; stb r0,0(r3)
+
+15: addi r1,r1,STACKFRAMESIZE
+ li r3,0
+ blr
Index: linux-powerpc/arch/powerpc/lib/Makefile
===================================================================
--- linux-powerpc.orig/arch/powerpc/lib/Makefile 2011-06-17 14:27:42.396562026 +1000
+++ linux-powerpc/arch/powerpc/lib/Makefile 2011-06-17 14:27:43.026572962 +1000
@@ -17,7 +17,7 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o string.o \
checksum_wrappers_64.o hweight_64.o \
- copypage_power7.o memcpy_power7.o
+ copypage_power7.o memcpy_power7.o copyuser_power7.o
obj-$(CONFIG_XMON) += sstep.o ldstfp.o
obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 3/3] powerpc: POWER7 optimised copy_to_user/copy_from_user using VMX
2011-06-17 4:54 ` [PATCH 3/3] powerpc: POWER7 optimised copy_to_user/copy_from_user " Anton Blanchard
@ 2011-06-17 5:58 ` Benjamin Herrenschmidt
0 siblings, 0 replies; 13+ messages in thread
From: Benjamin Herrenschmidt @ 2011-06-17 5:58 UTC (permalink / raw)
To: Anton Blanchard; +Cc: linuxppc-dev, mikey, paulus
On Fri, 2011-06-17 at 14:54 +1000, Anton Blanchard wrote:
> plain text document attachment (power7_copy_tofrom_user)
> Implement a POWER7 optimised copy_to_user/copy_from_user using VMX.
> For large aligned copies this new loop is over 10% faster, and for
> large unaligned copies it is over 200% faster.
>
> If we take a fault we fall back to the old version, this keeps
> things relatively simple and easy to verify.
Same re-entrancy comment as the other ones. preempt & interrupts...
Except here is worse since you may page fault and thus lose the vmx
state completely.
Cheers,
Ben.
^ permalink raw reply [flat|nested] 13+ messages in thread