public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH RFC] [x86] Optimize small size memcpy by avoding long latency from decode stage
@ 2012-10-19 12:29 ling.ma.program
  0 siblings, 0 replies; only message in thread
From: ling.ma.program @ 2012-10-19 12:29 UTC (permalink / raw)
  To: mingo; +Cc: hpa, tglx, linux-kernel, iant, Ma Ling

From: Ma Ling <ling.ma.program@gmail.com>

CISC code has higher instruction density, saving memory and
improving i-cache hit rate. However decode become challenge,
only one mulitple-uops(2~3)instruction could be decoded in one cycle,
and instructions containing more 4 uops(rep movsq/b) have to be handled by MS-ROM,
the process take long time and eat up the advantage from it for small size. 

In order to avoid this disavantage, we take use of general instruction code
for small size copy. The result shows it can get 1~2x improvement
on Core2, Nehalem, Sandy Bridge, Ivy Bridge, Atom, and Bulldozer as well.

Signed-off-by: Ma Ling <ling.ma.program@gmail.com>
---
 arch/x86/lib/memcpy_64.S |   14 +++++++++++++-
 1 files changed, 13 insertions(+), 1 deletions(-)

diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 1c273be..6a24c8c 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -5,7 +5,6 @@
 #include <asm/cpufeature.h>
 #include <asm/dwarf2.h>
 #include <asm/alternative-asm.h>
-
 /*
  * memcpy - Copy a memory block.
  *
@@ -19,6 +18,15 @@
  */
 
 /*
+ * memcpy_c() and memcpy_c_e() use rep movsq/movsb respectively,
+ * the instruction have to get micro ops from Microcode Sequencser Rom.
+ * And the decode  process take long latency, in order to avoid it, 
+ * we choose loop unrolling routine for small size.
+ * Could vary the warm up  distance.
+ */
+
+
+/*
  * memcpy_c() - fast string ops (REP MOVSQ) based variant.
  *
  * This gets patched over the unrolled variant (below) via the
@@ -26,6 +34,8 @@
  */
 	.section .altinstr_replacement, "ax", @progbits
 .Lmemcpy_c:
+	cmpq $512, %rdx
+	jbe  memcpy 	
 	movq %rdi, %rax
 	movq %rdx, %rcx
 	shrq $3, %rcx
@@ -46,6 +56,8 @@
  */
 	.section .altinstr_replacement, "ax", @progbits
 .Lmemcpy_c_e:
+	cmpq $512, %rdx
+	jbe  memcpy
 	movq %rdi, %rax
 	movq %rdx, %rcx
 	rep movsb
-- 
1.6.5.2


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2012-10-19  5:37 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-10-19 12:29 [PATCH RFC] [x86] Optimize small size memcpy by avoding long latency from decode stage ling.ma.program

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox