From: "Jan Beulich" <JBeulich@novell.com>
To: xen-devel@lists.xensource.com
Cc: andre.przywara@amd.com
Subject: [PATCH] svm: support EFER.LMSLE for guests
Date: Tue, 04 May 2010 17:03:34 +0100 [thread overview]
Message-ID: <4BE061760200007800001362@vpn.id2.novell.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 3457 bytes --]
Now that the feature is officially documented (see
http://support.amd.com/us/Processor_TechDocs/24593.pdf), I think it
makes sense to also allow HVM guests to make use of it.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Cc: Andre Przywara <andre.przywara@amd.com>
--- 2010-05-04.orig/xen/arch/x86/hvm/hvm.c 2010-04-22 14:43:25.000000000 +0200
+++ 2010-05-04/xen/arch/x86/hvm/hvm.c 2010-05-04 13:26:07.000000000 +0200
@@ -590,6 +590,7 @@ static bool_t hvm_efer_valid(uint64_t va
((sizeof(long) != 8) && (value & EFER_LME)) ||
(!cpu_has_nx && (value & EFER_NX)) ||
(!cpu_has_syscall && (value & EFER_SCE)) ||
+ (!cpu_has_lmsl && (value & EFER_LMSLE)) ||
(!cpu_has_ffxsr && (value & EFER_FFXSE)) ||
((value & (EFER_LME|EFER_LMA)) == EFER_LMA));
}
@@ -641,7 +642,8 @@ static int hvm_load_cpu_ctxt(struct doma
}
if ( !hvm_efer_valid(
- ctxt.msr_efer, EFER_FFXSE | EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
+ ctxt.msr_efer,
+ EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
{
gdprintk(XENLOG_ERR, "HVM restore: bad EFER 0x%"PRIx64"\n",
ctxt.msr_efer);
@@ -995,7 +997,8 @@ int hvm_set_efer(uint64_t value)
value &= ~EFER_LMA;
- if ( !hvm_efer_valid(value, EFER_FFXSE | EFER_LME | EFER_NX | EFER_SCE) )
+ if ( !hvm_efer_valid(value,
+ EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_NX | EFER_SCE) )
{
gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
"EFER: %"PRIx64"\n", value);
--- 2010-05-04.orig/xen/arch/x86/hvm/svm/svm.c 2010-04-12 11:28:20.000000000 +0200
+++ 2010-05-04/xen/arch/x86/hvm/svm/svm.c 2010-05-04 13:23:02.000000000 +0200
@@ -57,6 +57,11 @@
u32 svm_feature_flags;
+#ifdef __x86_64__
+/* indicate whether guest may use EFER.LMSLE */
+unsigned char cpu_has_lmsl = 0;
+#endif
+
#define set_segment_register(name, value) \
asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
@@ -848,6 +853,29 @@ static int svm_cpu_up(struct cpuinfo_x86
/* Initialize core's ASID handling. */
svm_asid_init(c);
+#ifdef __x86_64__
+ /*
+ * Check whether EFER.LMSLE can be written.
+ * Unfortunately there's no feature bit defined for this.
+ */
+ eax = read_efer();
+ edx = read_efer() >> 32;
+ if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 )
+ rdmsr(MSR_EFER, eax, edx);
+ if ( eax & EFER_LMSLE )
+ {
+ if ( c == &boot_cpu_data )
+ cpu_has_lmsl = 1;
+ wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx);
+ }
+ else
+ {
+ if ( cpu_has_lmsl )
+ printk(XENLOG_WARNING "Inconsistent LMLSE support across CPUs!\n");
+ cpu_has_lmsl = 0;
+ }
+#endif
+
return 1;
}
--- 2010-05-04.orig/xen/include/asm-x86/hvm/hvm.h 2010-04-12 11:28:20.000000000 +0200
+++ 2010-05-04/xen/include/asm-x86/hvm/hvm.h 2010-05-04 13:23:02.000000000 +0200
@@ -136,6 +136,12 @@ struct hvm_function_table {
extern struct hvm_function_table hvm_funcs;
extern int hvm_enabled;
+#ifdef __i386__
+# define cpu_has_lmsl 0
+#else
+extern unsigned char cpu_has_lmsl;
+#endif
+
int hvm_domain_initialise(struct domain *d);
void hvm_domain_relinquish_resources(struct domain *d);
void hvm_domain_destroy(struct domain *d);
[-- Attachment #2: svm-lmsl.patch --]
[-- Type: text/plain, Size: 3451 bytes --]
Now that the feature is officially documented (see
http://support.amd.com/us/Processor_TechDocs/24593.pdf), I think it
makes sense to also allow HVM guests to make use of it.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Cc: Andre Przywara <andre.przywara@amd.com>
--- 2010-05-04.orig/xen/arch/x86/hvm/hvm.c 2010-04-22 14:43:25.000000000 +0200
+++ 2010-05-04/xen/arch/x86/hvm/hvm.c 2010-05-04 13:26:07.000000000 +0200
@@ -590,6 +590,7 @@ static bool_t hvm_efer_valid(uint64_t va
((sizeof(long) != 8) && (value & EFER_LME)) ||
(!cpu_has_nx && (value & EFER_NX)) ||
(!cpu_has_syscall && (value & EFER_SCE)) ||
+ (!cpu_has_lmsl && (value & EFER_LMSLE)) ||
(!cpu_has_ffxsr && (value & EFER_FFXSE)) ||
((value & (EFER_LME|EFER_LMA)) == EFER_LMA));
}
@@ -641,7 +642,8 @@ static int hvm_load_cpu_ctxt(struct doma
}
if ( !hvm_efer_valid(
- ctxt.msr_efer, EFER_FFXSE | EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
+ ctxt.msr_efer,
+ EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
{
gdprintk(XENLOG_ERR, "HVM restore: bad EFER 0x%"PRIx64"\n",
ctxt.msr_efer);
@@ -995,7 +997,8 @@ int hvm_set_efer(uint64_t value)
value &= ~EFER_LMA;
- if ( !hvm_efer_valid(value, EFER_FFXSE | EFER_LME | EFER_NX | EFER_SCE) )
+ if ( !hvm_efer_valid(value,
+ EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_NX | EFER_SCE) )
{
gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
"EFER: %"PRIx64"\n", value);
--- 2010-05-04.orig/xen/arch/x86/hvm/svm/svm.c 2010-04-12 11:28:20.000000000 +0200
+++ 2010-05-04/xen/arch/x86/hvm/svm/svm.c 2010-05-04 13:23:02.000000000 +0200
@@ -57,6 +57,11 @@
u32 svm_feature_flags;
+#ifdef __x86_64__
+/* indicate whether guest may use EFER.LMSLE */
+unsigned char cpu_has_lmsl = 0;
+#endif
+
#define set_segment_register(name, value) \
asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
@@ -848,6 +853,29 @@ static int svm_cpu_up(struct cpuinfo_x86
/* Initialize core's ASID handling. */
svm_asid_init(c);
+#ifdef __x86_64__
+ /*
+ * Check whether EFER.LMSLE can be written.
+ * Unfortunately there's no feature bit defined for this.
+ */
+ eax = read_efer();
+ edx = read_efer() >> 32;
+ if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 )
+ rdmsr(MSR_EFER, eax, edx);
+ if ( eax & EFER_LMSLE )
+ {
+ if ( c == &boot_cpu_data )
+ cpu_has_lmsl = 1;
+ wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx);
+ }
+ else
+ {
+ if ( cpu_has_lmsl )
+ printk(XENLOG_WARNING "Inconsistent LMLSE support across CPUs!\n");
+ cpu_has_lmsl = 0;
+ }
+#endif
+
return 1;
}
--- 2010-05-04.orig/xen/include/asm-x86/hvm/hvm.h 2010-04-12 11:28:20.000000000 +0200
+++ 2010-05-04/xen/include/asm-x86/hvm/hvm.h 2010-05-04 13:23:02.000000000 +0200
@@ -136,6 +136,12 @@ struct hvm_function_table {
extern struct hvm_function_table hvm_funcs;
extern int hvm_enabled;
+#ifdef __i386__
+# define cpu_has_lmsl 0
+#else
+extern unsigned char cpu_has_lmsl;
+#endif
+
int hvm_domain_initialise(struct domain *d);
void hvm_domain_relinquish_resources(struct domain *d);
void hvm_domain_destroy(struct domain *d);
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
reply other threads:[~2010-05-04 16:03 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4BE061760200007800001362@vpn.id2.novell.com \
--to=jbeulich@novell.com \
--cc=andre.przywara@amd.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).