* [PATCH 01/14] Nested Virtualization: tools
@ 2010-08-05 14:59 Christoph Egger
2010-08-05 16:21 ` Keir Fraser
0 siblings, 1 reply; 7+ messages in thread
From: Christoph Egger @ 2010-08-05 14:59 UTC (permalink / raw)
To: xen-devel
[-- Attachment #1: Type: text/plain, Size: 322 bytes --]
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
--
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Einsteinring 24, 85609 Dornach b. Muenchen
Geschaeftsfuehrer: Alberto Bozzo, Andrew Bowd
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632
[-- Attachment #2: xen_nh01_tools.diff --]
[-- Type: text/x-diff, Size: 7367 bytes --]
# HG changeset patch
# User cegger
# Date 1280925492 -7200
tools: Add nestedhvm guest config option
diff -r 3d0c15fe28db -r b13ace9a80d8 tools/libxc/xc_cpuid_x86.c
--- a/tools/libxc/xc_cpuid_x86.c
+++ b/tools/libxc/xc_cpuid_x86.c
@@ -29,7 +29,7 @@
#define set_bit(idx, dst) ((dst) |= (1u << ((idx) & 31)))
#define DEF_MAX_BASE 0x0000000du
-#define DEF_MAX_EXT 0x80000008u
+#define DEF_MAX_EXT 0x8000000au
static int hypervisor_is_64bit(xc_interface *xch)
{
@@ -77,7 +77,7 @@ static void xc_cpuid_brand_get(char *str
static void amd_xc_cpuid_policy(
xc_interface *xch, domid_t domid,
const unsigned int *input, unsigned int *regs,
- int is_pae)
+ int is_pae, int is_nestedhvm)
{
switch ( input[0] )
{
@@ -96,6 +96,7 @@ static void amd_xc_cpuid_policy(
/* Filter all other features according to a whitelist. */
regs[2] &= ((is_64bit ? bitmaskof(X86_FEATURE_LAHF_LM) : 0) |
bitmaskof(X86_FEATURE_CMP_LEGACY) |
+ (is_nestedhvm ? bitmaskof(X86_FEATURE_SVME) : 0) |
bitmaskof(X86_FEATURE_ALTMOVCR) |
bitmaskof(X86_FEATURE_ABM) |
bitmaskof(X86_FEATURE_SSE4A) |
@@ -120,13 +121,43 @@ static void amd_xc_cpuid_policy(
*/
regs[2] = ((regs[2] & 0xf000u) + 1) | ((regs[2] & 0xffu) << 1) | 1u;
break;
+
+ case 0x8000000a: {
+ uint32_t edx;
+
+ if (!is_nestedhvm) {
+ regs[0] = regs[1] = regs[2] = regs[3] = 0;
+ break;
+ }
+
+#define SVM_FEATURE_NPT 0x00000001
+#define SVM_FEATURE_LBRV 0x00000002
+#define SVM_FEATURE_SVML 0x00000004
+#define SVM_FEATURE_NRIPS 0x00000008
+#define SVM_FEATURE_PAUSEFILTER 0x00000400
+
+ /* Only passthrough SVM features which are implemented */
+ edx = 0;
+ if (regs[3] & SVM_FEATURE_NPT)
+ edx |= SVM_FEATURE_NPT;
+ if (regs[3] & SVM_FEATURE_LBRV)
+ edx |= SVM_FEATURE_LBRV;
+ if (regs[3] & SVM_FEATURE_NRIPS)
+ edx |= SVM_FEATURE_NRIPS;
+ if (regs[3] & SVM_FEATURE_PAUSEFILTER)
+ edx |= SVM_FEATURE_PAUSEFILTER;
+
+ regs[3] = edx;
+ break;
+ }
+
}
}
static void intel_xc_cpuid_policy(
xc_interface *xch, domid_t domid,
const unsigned int *input, unsigned int *regs,
- int is_pae)
+ int is_pae, int is_nestedhvm)
{
switch ( input[0] )
{
@@ -160,6 +191,11 @@ static void intel_xc_cpuid_policy(
/* Mask AMD Number of Cores information. */
regs[2] = 0;
break;
+
+ case 0x8000000a:
+ /* Clear AMD SVM feature bits */
+ regs[0] = regs[1] = regs[2] = regs[3] = 0;
+ break;
}
}
@@ -168,12 +204,17 @@ static void xc_cpuid_hvm_policy(
const unsigned int *input, unsigned int *regs)
{
char brand[13];
+ unsigned long nestedhvm;
unsigned long pae;
int is_pae;
+ int is_nestedhvm;
xc_get_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, &pae);
is_pae = !!pae;
+ xc_get_hvm_param(xch, domid, HVM_PARAM_NESTEDHVM, &nestedhvm);
+ is_nestedhvm = !!nestedhvm;
+
switch ( input[0] )
{
case 0x00000000:
@@ -259,6 +300,7 @@ static void xc_cpuid_hvm_policy(
case 0x80000004: /* ... continued */
case 0x80000005: /* AMD L1 cache/TLB info (dumped by Intel policy) */
case 0x80000006: /* AMD L2/3 cache/TLB info ; Intel L2 cache features */
+ case 0x8000000a: /* AMD SVM feature bits */
break;
default:
@@ -268,9 +310,9 @@ static void xc_cpuid_hvm_policy(
xc_cpuid_brand_get(brand);
if ( strstr(brand, "AMD") )
- amd_xc_cpuid_policy(xch, domid, input, regs, is_pae);
+ amd_xc_cpuid_policy(xch, domid, input, regs, is_pae, is_nestedhvm);
else
- intel_xc_cpuid_policy(xch, domid, input, regs, is_pae);
+ intel_xc_cpuid_policy(xch, domid, input, regs, is_pae, is_nestedhvm);
}
diff -r 3d0c15fe28db -r b13ace9a80d8 tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py
+++ b/tools/python/xen/xend/XendConfig.py
@@ -185,6 +185,7 @@ XENAPI_PLATFORM_CFG_TYPES = {
'vhpt': int,
'guest_os_type': str,
'hap': int,
+ 'nestedhvm' : int,
'xen_extended_power_mgmt': int,
'pci_msitranslate': int,
'pci_power_mgmt': int,
diff -r 3d0c15fe28db -r b13ace9a80d8 tools/python/xen/xend/XendConstants.py
--- a/tools/python/xen/xend/XendConstants.py
+++ b/tools/python/xen/xend/XendConstants.py
@@ -52,6 +52,7 @@ HVM_PARAM_TIMER_MODE = 10
HVM_PARAM_HPET_ENABLED = 11
HVM_PARAM_ACPI_S_STATE = 14
HVM_PARAM_VPT_ALIGN = 16
+HVM_PARAM_NESTEDHVM = 17 # x86
restart_modes = [
"restart",
diff -r 3d0c15fe28db -r b13ace9a80d8 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py
+++ b/tools/python/xen/xend/XendDomainInfo.py
@@ -2585,10 +2585,15 @@ class XendDomainInfo:
xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
long(timer_mode))
- # Set Viridian interface configuration of domain
- viridian = self.info["platform"].get("viridian")
- if arch.type == "x86" and hvm and viridian is not None:
- xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
+ if arch.type == "x86" and hvm:
+ # Set Viridian interface configuration of domain
+ viridian = self.info["platform"].get("viridian")
+ if viridian is not None:
+ xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
+ # Set nestedhvm of domain
+ nestedhvm = self.info["platform"].get("nestedhvm")
+ if nestedhvm is not None:
+ xc.hvm_set_param(self.domid, HVM_PARAM_NESTEDHVM, long(nestedhvm))
# If nomigrate is set, disable migration
nomigrate = self.info["platform"].get("nomigrate")
diff -r 3d0c15fe28db -r b13ace9a80d8 tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py
+++ b/tools/python/xen/xm/create.py
@@ -633,6 +633,11 @@ gopts.var('hap', val='HAP',
use="""Hap status (0=hap is disabled;
1=hap is enabled.""")
+gopts.var('nestedhvm', val='NESTEDHVM',
+ fn=set_int, default=0,
+ use="""Nested HVM status (0=Nested HVM is disabled;
+ 1=Nested HVM is enabled.""")
+
gopts.var('s3_integrity', val='TBOOT_MEMORY_PROTECT',
fn=set_int, default=1,
use="""Should domain memory integrity be verified during S3?
@@ -1083,7 +1088,7 @@ def configure_hvm(config_image, vals):
'isa',
'keymap',
'localtime',
- 'nographic',
+ 'nestedhvm', 'nographic',
'opengl', 'oos',
'pae', 'pci', 'pci_msitranslate', 'pci_power_mgmt',
'rtc_timeoffset',
diff -r 3d0c15fe28db -r b13ace9a80d8 xen/include/public/hvm/params.h
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -109,6 +109,9 @@
/* Boolean: Enable aligning all periodic vpts to reduce interrupts */
#define HVM_PARAM_VPT_ALIGN 16
-#define HVM_NR_PARAMS 17
+/* Boolean: Enable nestedhvm */
+#define HVM_PARAM_NESTEDHVM 17
+
+#define HVM_NR_PARAMS 18
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 01/14] Nested Virtualization: tools
2010-08-05 14:59 [PATCH 01/14] Nested Virtualization: tools Christoph Egger
@ 2010-08-05 16:21 ` Keir Fraser
0 siblings, 0 replies; 7+ messages in thread
From: Keir Fraser @ 2010-08-05 16:21 UTC (permalink / raw)
To: Christoph Egger, xen-devel@lists.xensource.com; +Cc: Dong, Eddie
This, and any other patch that obviously sets up common non-vendor-specific
nesting logic, needs an ack from the Intel devs (e.g., Eddie Dong who I am
ccing here). I would/will require the same vice versa too, when Intel repost
their patches. There is obviously a common element of both patch series that
needs to be pulled out and agreed in common.
-- Keir
On 05/08/2010 15:59, "Christoph Egger" <Christoph.Egger@amd.com> wrote:
>
> Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 01/14] Nested Virtualization: tools
2010-08-17 7:19 Dong, Eddie
@ 2010-08-17 7:35 ` Keir Fraser
2010-08-17 7:54 ` Dong, Eddie
2010-08-17 11:01 ` Christoph Egger
1 sibling, 1 reply; 7+ messages in thread
From: Keir Fraser @ 2010-08-17 7:35 UTC (permalink / raw)
To: Dong, Eddie, Christoph Egger, xen-devel@lists.xensource.com
On 17/08/2010 08:19, "Dong, Eddie" <eddie.dong@intel.com> wrote:
>> xc_get_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, &pae);
>> is_pae = !!pae;
>>
>> + xc_get_hvm_param(xch, domid, HVM_PARAM_NESTEDHVM, &nestedhvm);
>
> If you insist to support cross vendor nested virtualization, I would like to
> suggest we have multiple options for configuration: VMX, SVM, or HW.
> VMX and SVM option is for what situation that the user want to enforce the
> guest VMX/SVM features regardless of underlying hardware, while HW means to
> implements same with underlying virtualization feature in guest. In this way,
> it provides room for either cross vendor nested virtualization or natively
> virtualization.
We don't want cross-vendor nested virt, ever. So a simple boolean is fine
imo.
-- Keir
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH 01/14] Nested Virtualization: tools
2010-08-17 7:35 ` Keir Fraser
@ 2010-08-17 7:54 ` Dong, Eddie
2010-08-17 10:07 ` Christoph Egger
0 siblings, 1 reply; 7+ messages in thread
From: Dong, Eddie @ 2010-08-17 7:54 UTC (permalink / raw)
To: Keir Fraser, Christoph Egger, xen-devel@lists.xensource.com; +Cc: Dong, Eddie
Keir Fraser wrote:
> On 17/08/2010 08:19, "Dong, Eddie" <eddie.dong@intel.com> wrote:
>
>>> xc_get_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, &pae);
>>> is_pae = !!pae;
>>>
>>> + xc_get_hvm_param(xch, domid, HVM_PARAM_NESTEDHVM, &nestedhvm);
>>
>> If you insist to support cross vendor nested virtualization, I would
>> like to suggest we have multiple options for configuration: VMX,
>> SVM, or HW.
>> VMX and SVM option is for what situation that the user want to
>> enforce the guest VMX/SVM features regardless of underlying
>> hardware, while HW means to implements same with underlying
>> virtualization feature in guest. In this way, it provides room for
>> either cross vendor nested virtualization or natively virtualization.
>
> We don't want cross-vendor nested virt, ever. So a simple boolean is
> fine imo.
>
OK, got it. That is also what I believe to be.
Under this situation, I would suggest Chris to re-shuffle his patch series which are primarily based on the assumption of supporting cross vendor nested virtualization.
The major part of so called common code in this patch series is SVM specific and should go to hvm/svm sub-directory.
Thanks, Eddie
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 01/14] Nested Virtualization: tools
2010-08-17 7:54 ` Dong, Eddie
@ 2010-08-17 10:07 ` Christoph Egger
0 siblings, 0 replies; 7+ messages in thread
From: Christoph Egger @ 2010-08-17 10:07 UTC (permalink / raw)
To: Dong, Eddie; +Cc: xen-devel@lists.xensource.com, Keir Fraser
On Tuesday 17 August 2010 09:54:34 Dong, Eddie wrote:
> Keir Fraser wrote:
> > On 17/08/2010 08:19, "Dong, Eddie" <eddie.dong@intel.com> wrote:
> >>> xc_get_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, &pae);
> >>> is_pae = !!pae;
> >>>
> >>> + xc_get_hvm_param(xch, domid, HVM_PARAM_NESTEDHVM, &nestedhvm);
> >>
> >> If you insist to support cross vendor nested virtualization, I would
> >> like to suggest we have multiple options for configuration: VMX,
> >> SVM, or HW.
> >> VMX and SVM option is for what situation that the user want to
> >> enforce the guest VMX/SVM features regardless of underlying
> >> hardware, while HW means to implements same with underlying
> >> virtualization feature in guest. In this way, it provides room for
> >> either cross vendor nested virtualization or natively virtualization.
> >
> > We don't want cross-vendor nested virt, ever. So a simple boolean is
> > fine imo.
>
> OK, got it. That is also what I believe to be.
>
> Under this situation, I would suggest Chris to re-shuffle his patch series
> which are primarily based on the assumption of supporting cross vendor
> nested virtualization.
What makes you think that I make this assumption?
I can't remember to have ever mentioned 'cross vendor nested virtualization'.
> The major part of so called common code in this patch series is SVM specific
> and should go to hvm/svm sub-directory.
This should no longer be the case in the last submission of the patch series.
I think, there may still be some nuances that are in your way to implement
nested virtualization for VMX.
Please go through the patch series and let me know which patch, file and
lines do not work for VMX and why.
Thanks, Christoph
--
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Einsteinring 24, 85609 Dornach b. Muenchen
Geschaeftsfuehrer: Alberto Bozzo, Andrew Bowd
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 01/14] Nested Virtualization: tools
2010-08-17 7:19 Dong, Eddie
2010-08-17 7:35 ` Keir Fraser
@ 2010-08-17 11:01 ` Christoph Egger
2010-08-17 14:18 ` Dong, Eddie
1 sibling, 1 reply; 7+ messages in thread
From: Christoph Egger @ 2010-08-17 11:01 UTC (permalink / raw)
To: Dong, Eddie; +Cc: xen-devel@lists.xensource.com
On Tuesday 17 August 2010 09:19:20 Dong, Eddie wrote:
> > # HG changeset patch
> > # User cegger
> > # Date 1280925492 -7200
> > tools: Add nestedhvm guest config option
> >
> > diff -r 3d0c15fe28db -r b13ace9a80d8 tools/libxc/xc_cpuid_x86.c
> > --- a/tools/libxc/xc_cpuid_x86.c
> > +++ b/tools/libxc/xc_cpuid_x86.c
> > @@ -29,7 +29,7 @@
> > #define set_bit(idx, dst) ((dst) |= (1u << ((idx) & 31)))
> >
> > #define DEF_MAX_BASE 0x0000000du
> > -#define DEF_MAX_EXT 0x80000008u
> > +#define DEF_MAX_EXT 0x8000000au
>
> An real Intel HW only have max leaf of 80000008, I am not sure if renaming
> it to 8000000A will cause potential issues.
The leave 8000000A is needed for SVM. Does this change cause any problems on
Intel CPUs ?
> > static int hypervisor_is_64bit(xc_interface *xch)
> > {
> > @@ -77,7 +77,7 @@ static void xc_cpuid_brand_get(char *str
> > static void amd_xc_cpuid_policy(
> > xc_interface *xch, domid_t domid,
> > const unsigned int *input, unsigned int *regs,
> > - int is_pae)
> > + int is_pae, int is_nestedhvm)
> > {
> > switch ( input[0] )
> > {
> > @@ -96,6 +96,7 @@ static void amd_xc_cpuid_policy(
> > /* Filter all other features according to a whitelist. */
> > regs[2] &= ((is_64bit ? bitmaskof(X86_FEATURE_LAHF_LM) : 0) |
> > bitmaskof(X86_FEATURE_CMP_LEGACY) |
> > + (is_nestedhvm ? bitmaskof(X86_FEATURE_SVME) : 0)
> >
> > | bitmaskof(X86_FEATURE_ALTMOVCR) |
> >
> > bitmaskof(X86_FEATURE_ABM) |
> > bitmaskof(X86_FEATURE_SSE4A) |
> > @@ -120,13 +121,43 @@ static void amd_xc_cpuid_policy(
> > */
> > regs[2] = ((regs[2] & 0xf000u) + 1) | ((regs[2] & 0xffu) <<
> > 1) | 1u; break;
> > +
> > + case 0x8000000a: {
> > + uint32_t edx;
> > +
> > + if (!is_nestedhvm) {
> > + regs[0] = regs[1] = regs[2] = regs[3] = 0;
> > + break;
> > + }
> > +
> > +#define SVM_FEATURE_NPT 0x00000001
> > +#define SVM_FEATURE_LBRV 0x00000002
> > +#define SVM_FEATURE_SVML 0x00000004
> > +#define SVM_FEATURE_NRIPS 0x00000008
> > +#define SVM_FEATURE_PAUSEFILTER 0x00000400
>
> Should those MACROs go to head file?
They are only used right below and exist for readability.
Do you see whereelse they can be used?
>
> > +
> > + /* Only passthrough SVM features which are implemented */
> > + edx = 0;
> > + if (regs[3] & SVM_FEATURE_NPT)
> > + edx |= SVM_FEATURE_NPT;
> > + if (regs[3] & SVM_FEATURE_LBRV)
> > + edx |= SVM_FEATURE_LBRV;
> > + if (regs[3] & SVM_FEATURE_NRIPS)
> > + edx |= SVM_FEATURE_NRIPS;
> > + if (regs[3] & SVM_FEATURE_PAUSEFILTER)
> > + edx |= SVM_FEATURE_PAUSEFILTER;
> > +
> > + regs[3] = edx;
> > + break;
> > + }
> > +
> > }
> > }
> >
> > static void intel_xc_cpuid_policy(
> > xc_interface *xch, domid_t domid,
> > const unsigned int *input, unsigned int *regs,
> > - int is_pae)
> > + int is_pae, int is_nestedhvm)
> > {
> > switch ( input[0] )
> > {
> > @@ -160,6 +191,11 @@ static void intel_xc_cpuid_policy(
> > /* Mask AMD Number of Cores information. */
> > regs[2] = 0;
> > break;
> > +
> > + case 0x8000000a:
> > + /* Clear AMD SVM feature bits */
> > + regs[0] = regs[1] = regs[2] = regs[3] = 0;
> > + break;
>
> How do you expect an L1 guest running on top of virtual Intel processor
> will try to detect AMD feature (CPUID leaf 0x8000000a) when it knows
> running on top of Intel processor? Leaf 8000000a is not existed in native
> Intel processor, or do you expect to paravirtualize the L1 guest? Note:
> Intel processor detect VMX feature in CPUID.1:ECX.VMX[bit 5], and most of
> rest capability in MSRs.
Fine. I just want to make sure that the variables are initialized and don't
contain garbage.
>
> Further more, if a future Intel processor defines CPUID 8000000A, how can
> both of them be accomodated? This is one example of difficulty to support
> SVM-on-VMX, although it is not a deadset, or do you expect to modify L1
> guest for this (kind of PV solution).
If I were implementing SVM-on-VMX then I wouldn't do that here.
> > }
> > }
> >
> > @@ -168,12 +204,17 @@ static void xc_cpuid_hvm_policy(
> > const unsigned int *input, unsigned int *regs)
> > {
> > char brand[13];
> > + unsigned long nestedhvm;
> > unsigned long pae;
> > int is_pae;
> > + int is_nestedhvm;
> >
> > xc_get_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, &pae);
> > is_pae = !!pae;
> >
> > + xc_get_hvm_param(xch, domid, HVM_PARAM_NESTEDHVM, &nestedhvm);
>
> If you insist to support cross vendor nested virtualization, I would like
> to suggest we have multiple options for configuration: VMX, SVM, or HW. VMX
> and SVM option is for what situation that the user want to enforce the
> guest VMX/SVM features regardless of underlying hardware, while HW means to
> implements same with underlying virtualization feature in guest. In this
> way, it provides room for either cross vendor nested virtualization or
> natively virtualization.
No, I don't insist on cross vendor nested virtualization. I just
followed "pae" here. You see the analogy in the code lines?
Christoph
>
> > + is_nestedhvm = !!nestedhvm;
> > +
> > switch ( input[0] )
> > {
> > case 0x00000000:
> > @@ -259,6 +300,7 @@ static void xc_cpuid_hvm_policy(
> > case 0x80000004: /* ... continued */
> > case 0x80000005: /* AMD L1 cache/TLB info (dumped by Intel
> > policy) */ case 0x80000006: /* AMD L2/3 cache/TLB info ; Intel
> > L2 cache features */ + case 0x8000000a: /* AMD SVM feature bits */
> > break;
> >
> > default:
> > @@ -268,9 +310,9 @@ static void xc_cpuid_hvm_policy(
> >
> > xc_cpuid_brand_get(brand);
> > if ( strstr(brand, "AMD") )
> > - amd_xc_cpuid_policy(xch, domid, input, regs, is_pae);
> > + amd_xc_cpuid_policy(xch, domid, input, regs, is_pae,
> > is_nestedhvm); else
> > - intel_xc_cpuid_policy(xch, domid, input, regs, is_pae);
> > + intel_xc_cpuid_policy(xch, domid, input, regs, is_pae,
> > is_nestedhvm);
> >
> > }
> >
> > diff -r 3d0c15fe28db -r b13ace9a80d8
> > tools/python/xen/xend/XendConfig.py ---
> > a/tools/python/xen/xend/XendConfig.py +++
> > b/tools/python/xen/xend/XendConfig.py @@ -185,6 +185,7 @@
> > XENAPI_PLATFORM_CFG_TYPES = { 'vhpt': int,
> > 'guest_os_type': str,
> > 'hap': int,
> > + 'nestedhvm' : int,
> > 'xen_extended_power_mgmt': int,
> > 'pci_msitranslate': int,
> > 'pci_power_mgmt': int,
> > diff -r 3d0c15fe28db -r b13ace9a80d8
> > tools/python/xen/xend/XendConstants.py ---
> > a/tools/python/xen/xend/XendConstants.py +++
> > b/tools/python/xen/xend/XendConstants.py @@ -52,6 +52,7 @@
> > HVM_PARAM_TIMER_MODE = 10 HVM_PARAM_HPET_ENABLED = 11
> > HVM_PARAM_ACPI_S_STATE = 14
> > HVM_PARAM_VPT_ALIGN = 16
> > +HVM_PARAM_NESTEDHVM = 17 # x86
> >
> > restart_modes = [
> > "restart",
> > diff -r 3d0c15fe28db -r b13ace9a80d8
> > tools/python/xen/xend/XendDomainInfo.py ---
> > a/tools/python/xen/xend/XendDomainInfo.py +++
> > b/tools/python/xen/xend/XendDomainInfo.py @@ -2585,10 +2585,15 @@
> > class XendDomainInfo: xc.hvm_set_param(self.domid,
> > HVM_PARAM_TIMER_MODE, long(timer_mode))
> >
> > - # Set Viridian interface configuration of domain
> > - viridian = self.info["platform"].get("viridian")
> > - if arch.type == "x86" and hvm and viridian is not None:
> > - xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN,
> > long(viridian)) + if arch.type == "x86" and hvm:
> > + # Set Viridian interface configuration of domain
> > + viridian = self.info["platform"].get("viridian")
> > + if viridian is not None:
> > + xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN,
> > long(viridian)) + # Set nestedhvm of domain
> > + nestedhvm = self.info["platform"].get("nestedhvm")
> > + if nestedhvm is not None:
> > + xc.hvm_set_param(self.domid, HVM_PARAM_NESTEDHVM,
> > long(nestedhvm))
> >
> > # If nomigrate is set, disable migration
> > nomigrate = self.info["platform"].get("nomigrate")
> > diff -r 3d0c15fe28db -r b13ace9a80d8 tools/python/xen/xm/create.py
> > --- a/tools/python/xen/xm/create.py
> > +++ b/tools/python/xen/xm/create.py
> > @@ -633,6 +633,11 @@ gopts.var('hap', val='HAP',
> > use="""Hap status (0=hap is disabled;
> > 1=hap is enabled.""")
> >
> > +gopts.var('nestedhvm', val='NESTEDHVM',
> > + fn=set_int, default=0,
> > + use="""Nested HVM status (0=Nested HVM is disabled;
> > + 1=Nested HVM is enabled.""")
> > +
> > gopts.var('s3_integrity', val='TBOOT_MEMORY_PROTECT',
> > fn=set_int, default=1,
> > use="""Should domain memory integrity be verified during
> > S3? @@ -1083,7 +1088,7 @@ def configure_hvm(config_image, vals):
> > 'isa',
> > 'keymap',
> > 'localtime',
> > - 'nographic',
> > + 'nestedhvm', 'nographic',
> > 'opengl', 'oos',
> > 'pae', 'pci', 'pci_msitranslate', 'pci_power_mgmt',
> > 'rtc_timeoffset',
> > diff -r 3d0c15fe28db -r b13ace9a80d8 xen/include/public/hvm/params.h
> > --- a/xen/include/public/hvm/params.h
> > +++ b/xen/include/public/hvm/params.h
> > @@ -109,6 +109,9 @@
> > /* Boolean: Enable aligning all periodic vpts to reduce interrupts */
> > #define HVM_PARAM_VPT_ALIGN 16
> >
> > -#define HVM_NR_PARAMS 17
> > +/* Boolean: Enable nestedhvm */
> > +#define HVM_PARAM_NESTEDHVM 17
> > +
> > +#define HVM_NR_PARAMS 18
> >
> > #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
>
> Thx, Eddie
--
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Einsteinring 24, 85609 Dornach b. Muenchen
Geschaeftsfuehrer: Alberto Bozzo, Andrew Bowd
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH 01/14] Nested Virtualization: tools
2010-08-17 11:01 ` Christoph Egger
@ 2010-08-17 14:18 ` Dong, Eddie
0 siblings, 0 replies; 7+ messages in thread
From: Dong, Eddie @ 2010-08-17 14:18 UTC (permalink / raw)
To: Christoph Egger; +Cc: xen-devel@lists.xensource.com, Dong, Eddie
Christoph Egger wrote:
> On Tuesday 17 August 2010 09:19:20 Dong, Eddie wrote:
>>> # HG changeset patch
>>> # User cegger
>>> # Date 1280925492 -7200
>>> tools: Add nestedhvm guest config option
>>>
>>> diff -r 3d0c15fe28db -r b13ace9a80d8 tools/libxc/xc_cpuid_x86.c
>>> --- a/tools/libxc/xc_cpuid_x86.c
>>> +++ b/tools/libxc/xc_cpuid_x86.c
>>> @@ -29,7 +29,7 @@
>>> #define set_bit(idx, dst) ((dst) |= (1u << ((idx) & 31)))
>>>
>>> #define DEF_MAX_BASE 0x0000000du
>>> -#define DEF_MAX_EXT 0x80000008u
>>> +#define DEF_MAX_EXT 0x8000000au
>>
>> An real Intel HW only have max leaf of 80000008, I am not sure if
>> renaming it to 8000000A will cause potential issues.
>
> The leave 8000000A is needed for SVM. Does this change cause any
> problems on Intel CPUs ?
>
Normally it won't. But if L1 guest tries to read CPUID leaf 8000000A. In native, it is invalid leaf and thus return the highest basic information
leaf (0BH here). In L1 guest, it is valid now and get whatever value this patch set.
>>> +
>>> +#define SVM_FEATURE_NPT 0x00000001
>>> +#define SVM_FEATURE_LBRV 0x00000002
>>> +#define SVM_FEATURE_SVML 0x00000004
>>> +#define SVM_FEATURE_NRIPS 0x00000008
>>> +#define SVM_FEATURE_PAUSEFILTER 0x00000400
>>
>> Should those MACROs go to head file?
>
> They are only used right below and exist for readability.
> Do you see whereelse they can be used?
>
Not for now. But from coding style point of view, In general I like code in code file, and MACROs in head file.
But I am OK if nobody else comments.
>>
>> If you insist to support cross vendor nested virtualization, I would
>> like to suggest we have multiple options for configuration: VMX,
>> SVM, or HW. VMX and SVM option is for what situation that the user
>> want to enforce the guest VMX/SVM features regardless of underlying
>> hardware, while HW means to implements same with underlying
>> virtualization feature in guest. In this way, it provides room for
>> either cross vendor nested virtualization or natively virtualization.
>
> No, I don't insist on cross vendor nested virtualization. I just
> followed "pae" here. You see the analogy in the code lines?
OK, then it is great. We should be able to reach consensus soon.
Thx, Eddie
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2010-08-17 14:18 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-08-05 14:59 [PATCH 01/14] Nested Virtualization: tools Christoph Egger
2010-08-05 16:21 ` Keir Fraser
-- strict thread matches above, loose matches on Subject: below --
2010-08-17 7:19 Dong, Eddie
2010-08-17 7:35 ` Keir Fraser
2010-08-17 7:54 ` Dong, Eddie
2010-08-17 10:07 ` Christoph Egger
2010-08-17 11:01 ` Christoph Egger
2010-08-17 14:18 ` Dong, Eddie
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).