* [PATCH] x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature
@ 2012-05-25 1:19 Fenghua Yu
2012-05-25 1:50 ` David Miller
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Fenghua Yu @ 2012-05-25 1:19 UTC (permalink / raw)
To: Ingo Molnar, Thomas Gleixner, H Peter Anvin, Andi Kleen,
linux-kernel, x86
Cc: Fenghua Yu
From: Fenghua Yu <fenghua.yu@intel.com>
According to Intel 64 and IA-32 SDM and Optimization Reference Manual, beginning
with Ivybridge, REG string operation using MOVSB and STOSB can provide both
flexible and high-performance REG string operations in cases like memory copy.
Enhancement availability is indicated by CPUID.7.0.EBX[9] (Enhanced REP MOVSB/
STOSB).
If CPU erms feature is detected, patch copy_user_generic with enhanced fast
string version of copy_user_generic.
A few new macros are defined to reduce duplicate code in ALTERNATIVE and
ALTERNATIVE_2.
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
---
checkpatch.pl reports two errors in alternative_call_2() definition. I think
the errors are invalid and should be ignored:
ERROR: Macros with complex values should be enclosed in parenthesis
ERROR: space prohibited before open square bracket '['
arch/x86/include/asm/alternative.h | 74 ++++++++++++++++++++++++++++-------
arch/x86/include/asm/uaccess_64.h | 11 +++++-
arch/x86/kernel/x8664_ksyms_64.c | 1 +
3 files changed, 70 insertions(+), 16 deletions(-)
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 49331be..12ce4a2 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -75,23 +75,54 @@ static inline int alternatives_text_reserved(void *start, void *end)
}
#endif /* CONFIG_SMP */
+#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
+
+#define b_replacement(number) "663"#number
+#define e_replacement(number) "664"#number
+
+#define alt_slen "662b-661b"
+#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
+
+#define ALTINSTR_ENTRY(feature, number) \
+ " .long 661b - .\n" /* label */ \
+ " .long " b_replacement(number)"f - .\n" /* new instruction */ \
+ " .word " __stringify(feature) "\n" /* feature bit */ \
+ " .byte " alt_slen "\n" /* source len */ \
+ " .byte " alt_rlen(number) "\n" /* replacement len */
+
+#define DISCARD_ENTRY(number) /* rlen <= slen */ \
+ " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
+
+#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
+ b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
+
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
- \
- "661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .long 661b - .\n" /* label */ \
- " .long 663f - .\n" /* new instruction */ \
- " .word " __stringify(feature) "\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .discard,\"aw\",@progbits\n" \
- " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
- ".previous\n" \
- ".section .altinstr_replacement, \"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous"
+ OLDINSTR(oldinstr) \
+ ".section .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature, 1) \
+ ".previous\n" \
+ ".section .discard,\"aw\",@progbits\n" \
+ DISCARD_ENTRY(1) \
+ ".previous\n" \
+ ".section .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
+ ".previous"
+
+#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
+ OLDINSTR(oldinstr) \
+ ".section .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature1, 1) \
+ ALTINSTR_ENTRY(feature2, 2) \
+ ".previous\n" \
+ ".section .discard,\"aw\",@progbits\n" \
+ DISCARD_ENTRY(1) \
+ DISCARD_ENTRY(2) \
+ ".previous\n" \
+ ".section .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
+ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
+ ".previous"
/*
* This must be included *after* the definition of ALTERNATIVE due to
@@ -140,6 +171,19 @@ static inline int alternatives_text_reserved(void *start, void *end)
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
/*
+ * Like alternative_call, but there are two features and respective functions.
+ * If CPU has feature2, function2 is used.
+ * Otherwise, if CPU has feature1, function1 is used.
+ * Otherwise, old function is used.
+ */
+#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
+ output, input...) \
+ asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
+ "call %P[new2]", feature2) \
+ : output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
+ [new2] "i" (newfunc2), ## input)
+
+/*
* use this macro(s) if you need more than one output parameter
* in alternative_io
*/
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index fcd4b6f..3d29c24 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -17,6 +17,8 @@
/* Handles exceptions in both to and from, but doesn't do access_ok */
__must_check unsigned long
+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
+__must_check unsigned long
copy_user_generic_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_unrolled(void *to, const void *from, unsigned len);
@@ -26,9 +28,16 @@ copy_user_generic(void *to, const void *from, unsigned len)
{
unsigned ret;
- alternative_call(copy_user_generic_unrolled,
+ /*
+ * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
+ * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
+ * Otherwise, use copy_user_generic_unrolled.
+ */
+ alternative_call_2(copy_user_generic_unrolled,
copy_user_generic_string,
X86_FEATURE_REP_GOOD,
+ copy_user_enhanced_fast_string,
+ X86_FEATURE_ERMS,
ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
"=d" (len)),
"1" (to), "2" (from), "3" (len)
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 9796c2f..6020f6f 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -28,6 +28,7 @@ EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(copy_user_generic_string);
EXPORT_SYMBOL(copy_user_generic_unrolled);
+EXPORT_SYMBOL(copy_user_enhanced_fast_string);
EXPORT_SYMBOL(__copy_user_nocache);
EXPORT_SYMBOL(_copy_from_user);
EXPORT_SYMBOL(_copy_to_user);
--
1.6.0.3
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature
2012-05-25 1:19 [PATCH] x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature Fenghua Yu
@ 2012-05-25 1:50 ` David Miller
2012-05-25 2:47 ` Yu, Fenghua
2012-06-06 9:52 ` Ingo Molnar
2012-06-29 23:43 ` [tip:x86/asm] " tip-bot for Fenghua Yu
2 siblings, 1 reply; 6+ messages in thread
From: David Miller @ 2012-05-25 1:50 UTC (permalink / raw)
To: fenghua.yu; +Cc: mingo, tglx, hpa, andi, linux-kernel, x86
From: "Fenghua Yu" <fenghua.yu@intel.com>
Date: Thu, 24 May 2012 18:19:45 -0700
> According to Intel 64 and IA-32 SDM and Optimization Reference
> Manual, beginning with Ivybridge, REG string operation using MOVSB
> and STOSB can provide both flexible and high-performance REG string
> operations in cases like memory copy. Enhancement availability is
> indicated by CPUID.7.0.EBX[9] (Enhanced REP MOVSB/ STOSB).
How does the cpu do overlap detection?
If the cpu does overlap detection on sub-pagesize bits, performance
will unnecessarily suffer under such circumstances.
^ permalink raw reply [flat|nested] 6+ messages in thread
* RE: [PATCH] x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature
2012-05-25 1:50 ` David Miller
@ 2012-05-25 2:47 ` Yu, Fenghua
2012-05-25 3:19 ` David Miller
0 siblings, 1 reply; 6+ messages in thread
From: Yu, Fenghua @ 2012-05-25 2:47 UTC (permalink / raw)
To: David Miller
Cc: mingo@elte.hu, tglx@linutronix.de, hpa@zytor.com,
andi@firstfloor.org, linux-kernel@vger.kernel.org, x86@kernel.org
> From: David Miller [mailto:davem@davemloft.net]
> Sent: Thursday, May 24, 2012 6:50 PM
> From: "Fenghua Yu" <fenghua.yu@intel.com>
> Date: Thu, 24 May 2012 18:19:45 -0700
>
> > According to Intel 64 and IA-32 SDM and Optimization Reference
> > Manual, beginning with Ivybridge, REG string operation using MOVSB
> > and STOSB can provide both flexible and high-performance REG string
> > operations in cases like memory copy. Enhancement availability is
> > indicated by CPUID.7.0.EBX[9] (Enhanced REP MOVSB/ STOSB).
>
> How does the cpu do overlap detection?
>
> If the cpu does overlap detection on sub-pagesize bits, performance
> will unnecessarily suffer under such circumstances.
Are you talking about memory overlap between source and destination? There is no overlap between these two areas in copy_user case because one area is in user space and another one is in kernel space.
In overlap case, it's software that detects overlap and sets backward copy. I don't see backward rep movsb performance degradation from my measurement.
Thanks.
-Fenghua
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature
2012-05-25 2:47 ` Yu, Fenghua
@ 2012-05-25 3:19 ` David Miller
0 siblings, 0 replies; 6+ messages in thread
From: David Miller @ 2012-05-25 3:19 UTC (permalink / raw)
To: fenghua.yu; +Cc: mingo, tglx, hpa, andi, linux-kernel, x86
From: "Yu, Fenghua" <fenghua.yu@intel.com>
Date: Fri, 25 May 2012 02:47:22 +0000
> Are you talking about memory overlap between source and destination?
> There is no overlap between these two areas in copy_user case
> because one area is in user space and another one is in kernel
> space.
>
> In overlap case, it's software that detects overlap and sets
> backward copy. I don't see backward rep movsb performance
> degradation from my measurement.
We have been told repeatedly in the past that the string instructions,
for compatibility with the defined semantics of the instruction, only
check the lowest bits when determining source and destination overlap.
So even if bits 12 and higher in the virtual address are different, it
is the address bits below bit 12 that determine overlap. And if this
overlap check triggers, the slow path is taken inside of the cpu.
This means that the impossibility of virtual address overlap, which
you mention, is irrelevant. Because it is the non-virtual address
bits which the cpu uses for overlap detection.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature
2012-05-25 1:19 [PATCH] x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature Fenghua Yu
2012-05-25 1:50 ` David Miller
@ 2012-06-06 9:52 ` Ingo Molnar
2012-06-29 23:43 ` [tip:x86/asm] " tip-bot for Fenghua Yu
2 siblings, 0 replies; 6+ messages in thread
From: Ingo Molnar @ 2012-06-06 9:52 UTC (permalink / raw)
To: Fenghua Yu, H. Peter Anvin, Thomas Gleixner
Cc: Ingo Molnar, Thomas Gleixner, H Peter Anvin, Andi Kleen,
linux-kernel, x86
* Fenghua Yu <fenghua.yu@intel.com> wrote:
> From: Fenghua Yu <fenghua.yu@intel.com>
>
> According to Intel 64 and IA-32 SDM and Optimization Reference Manual, beginning
> with Ivybridge, REG string operation using MOVSB and STOSB can provide both
> flexible and high-performance REG string operations in cases like memory copy.
> Enhancement availability is indicated by CPUID.7.0.EBX[9] (Enhanced REP MOVSB/
> STOSB).
>
> If CPU erms feature is detected, patch copy_user_generic with enhanced fast
> string version of copy_user_generic.
>
> A few new macros are defined to reduce duplicate code in ALTERNATIVE and
> ALTERNATIVE_2.
>
> Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
> ---
> checkpatch.pl reports two errors in alternative_call_2() definition. I think
> the errors are invalid and should be ignored:
> ERROR: Macros with complex values should be enclosed in parenthesis
> ERROR: space prohibited before open square bracket '['
ok.
>
> arch/x86/include/asm/alternative.h | 74 ++++++++++++++++++++++++++++-------
> arch/x86/include/asm/uaccess_64.h | 11 +++++-
> arch/x86/kernel/x8664_ksyms_64.c | 1 +
> 3 files changed, 70 insertions(+), 16 deletions(-)
Looks good to me. Peter, Thomas, any objections?
Thanks,
Ingo
^ permalink raw reply [flat|nested] 6+ messages in thread
* [tip:x86/asm] x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature
2012-05-25 1:19 [PATCH] x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature Fenghua Yu
2012-05-25 1:50 ` David Miller
2012-06-06 9:52 ` Ingo Molnar
@ 2012-06-29 23:43 ` tip-bot for Fenghua Yu
2 siblings, 0 replies; 6+ messages in thread
From: tip-bot for Fenghua Yu @ 2012-06-29 23:43 UTC (permalink / raw)
To: linux-tip-commits; +Cc: linux-kernel, hpa, mingo, fenghua.yu, tglx, hpa
Commit-ID: 954e482bde20b0e208fd4d34ef26e10afd194600
Gitweb: http://git.kernel.org/tip/954e482bde20b0e208fd4d34ef26e10afd194600
Author: Fenghua Yu <fenghua.yu@intel.com>
AuthorDate: Thu, 24 May 2012 18:19:45 -0700
Committer: H. Peter Anvin <hpa@linux.intel.com>
CommitDate: Fri, 29 Jun 2012 15:33:34 -0700
x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature
According to Intel 64 and IA-32 SDM and Optimization Reference Manual, beginning
with Ivybridge, REG string operation using MOVSB and STOSB can provide both
flexible and high-performance REG string operations in cases like memory copy.
Enhancement availability is indicated by CPUID.7.0.EBX[9] (Enhanced REP MOVSB/
STOSB).
If CPU erms feature is detected, patch copy_user_generic with enhanced fast
string version of copy_user_generic.
A few new macros are defined to reduce duplicate code in ALTERNATIVE and
ALTERNATIVE_2.
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Link: http://lkml.kernel.org/r/1337908785-14015-1-git-send-email-fenghua.yu@intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
---
arch/x86/include/asm/alternative.h | 74 ++++++++++++++++++++++++++++-------
arch/x86/include/asm/uaccess_64.h | 11 +++++-
arch/x86/kernel/x8664_ksyms_64.c | 1 +
3 files changed, 70 insertions(+), 16 deletions(-)
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 49331be..7078068 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -75,23 +75,54 @@ static inline int alternatives_text_reserved(void *start, void *end)
}
#endif /* CONFIG_SMP */
+#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
+
+#define b_replacement(number) "663"#number
+#define e_replacement(number) "664"#number
+
+#define alt_slen "662b-661b"
+#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
+
+#define ALTINSTR_ENTRY(feature, number) \
+ " .long 661b - .\n" /* label */ \
+ " .long " b_replacement(number)"f - .\n" /* new instruction */ \
+ " .word " __stringify(feature) "\n" /* feature bit */ \
+ " .byte " alt_slen "\n" /* source len */ \
+ " .byte " alt_rlen(number) "\n" /* replacement len */
+
+#define DISCARD_ENTRY(number) /* rlen <= slen */ \
+ " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
+
+#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
+ b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
+
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
- \
- "661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .long 661b - .\n" /* label */ \
- " .long 663f - .\n" /* new instruction */ \
- " .word " __stringify(feature) "\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .discard,\"aw\",@progbits\n" \
- " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
- ".previous\n" \
- ".section .altinstr_replacement, \"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous"
+ OLDINSTR(oldinstr) \
+ ".section .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature, 1) \
+ ".previous\n" \
+ ".section .discard,\"aw\",@progbits\n" \
+ DISCARD_ENTRY(1) \
+ ".previous\n" \
+ ".section .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
+ ".previous"
+
+#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
+ OLDINSTR(oldinstr) \
+ ".section .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature1, 1) \
+ ALTINSTR_ENTRY(feature2, 2) \
+ ".previous\n" \
+ ".section .discard,\"aw\",@progbits\n" \
+ DISCARD_ENTRY(1) \
+ DISCARD_ENTRY(2) \
+ ".previous\n" \
+ ".section .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
+ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
+ ".previous"
/*
* This must be included *after* the definition of ALTERNATIVE due to
@@ -140,6 +171,19 @@ static inline int alternatives_text_reserved(void *start, void *end)
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
/*
+ * Like alternative_call, but there are two features and respective functions.
+ * If CPU has feature2, function2 is used.
+ * Otherwise, if CPU has feature1, function1 is used.
+ * Otherwise, old function is used.
+ */
+#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
+ output, input...) \
+ asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
+ "call %P[new2]", feature2) \
+ : output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
+ [new2] "i" (newfunc2), ## input)
+
+/*
* use this macro(s) if you need more than one output parameter
* in alternative_io
*/
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 8e796fb..d8def8b 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -17,6 +17,8 @@
/* Handles exceptions in both to and from, but doesn't do access_ok */
__must_check unsigned long
+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
+__must_check unsigned long
copy_user_generic_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_unrolled(void *to, const void *from, unsigned len);
@@ -26,9 +28,16 @@ copy_user_generic(void *to, const void *from, unsigned len)
{
unsigned ret;
- alternative_call(copy_user_generic_unrolled,
+ /*
+ * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
+ * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
+ * Otherwise, use copy_user_generic_unrolled.
+ */
+ alternative_call_2(copy_user_generic_unrolled,
copy_user_generic_string,
X86_FEATURE_REP_GOOD,
+ copy_user_enhanced_fast_string,
+ X86_FEATURE_ERMS,
ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
"=d" (len)),
"1" (to), "2" (from), "3" (len)
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 9796c2f..6020f6f 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -28,6 +28,7 @@ EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(copy_user_generic_string);
EXPORT_SYMBOL(copy_user_generic_unrolled);
+EXPORT_SYMBOL(copy_user_enhanced_fast_string);
EXPORT_SYMBOL(__copy_user_nocache);
EXPORT_SYMBOL(_copy_from_user);
EXPORT_SYMBOL(_copy_to_user);
^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2012-06-29 23:44 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-05-25 1:19 [PATCH] x86/copy_user_generic: Optimize copy_user_generic with CPU erms feature Fenghua Yu
2012-05-25 1:50 ` David Miller
2012-05-25 2:47 ` Yu, Fenghua
2012-05-25 3:19 ` David Miller
2012-06-06 9:52 ` Ingo Molnar
2012-06-29 23:43 ` [tip:x86/asm] " tip-bot for Fenghua Yu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox