linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] KVM: SVM: Add support for AMD's OSVW feature in guests
@ 2012-01-04  4:38 Boris Ostrovsky
  2012-01-05 11:20 ` Marcelo Tosatti
  0 siblings, 1 reply; 4+ messages in thread
From: Boris Ostrovsky @ 2012-01-04  4:38 UTC (permalink / raw)
  To: avi, mtosatti, joerg.roedel; +Cc: kvm, linux-kernel, Boris Ostrovsky

From: Boris Ostrovsky <boris.ostrovsky@amd.com>

In some cases guests should not provide workarounds for errata even when the
physical processor is affected. For example, because of erratum 400 on family
10h processors a Linux guest will read an MSR (resulting in VMEXIT) before
going to idle in order to avoid getting stuck in a non-C0 state. This is not
necessary: HLT and IO instructions are intercepted and therefore there is no
reason for erratum 400 workaround in the guest.

This patch allows us to present a guest with certain errata as fixed,
regardless of the state of actual hardware.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@amd.com>
---
 arch/x86/include/asm/kvm_host.h |    6 ++++
 arch/x86/kvm/svm.c              |   55 +++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/x86.c              |   30 ++++++++++++++++++++-
 3 files changed, 90 insertions(+), 1 deletions(-)

Second version of OSVW patch. svm_init_osvw() is still in svm.c but registers'
values are kept in virtual MSRs and should be available on reboot after
cross-vendor migration. The registers can also be set from userland so
that they are consistent across a cluster.


diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b4973f4..9ef9215 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -452,6 +452,12 @@ struct kvm_vcpu_arch {
 		u32 id;
 		bool send_user_only;
 	} apf;
+
+	/* OSVW MSRs (AMD only) */
+	struct {
+		u64 length;
+		u64 status;
+	} osvw;
 };
 
 struct kvm_arch {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e32243e..b19769d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -110,6 +110,13 @@ struct nested_state {
 #define MSRPM_OFFSETS	16
 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
 
+/*
+ * Set osvw_len to higher value when updated Revision Guides
+ * are published and we know what the new status bits are
+ */
+static uint64_t osvw_len = 4, osvw_status;
+static DEFINE_SPINLOCK(svm_lock);
+
 struct vcpu_svm {
 	struct kvm_vcpu vcpu;
 	struct vmcb *vmcb;
@@ -556,6 +563,20 @@ static void svm_init_erratum_383(void)
 	erratum_383_found = true;
 }
 
+static void svm_init_osvw(struct kvm_vcpu *vcpu)
+{
+	/*
+	 * Guests should see errata 400 and 415 as fixed (assuming that
+	 * HLT and IO instructions are intercepted).
+	 */
+	vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
+	vcpu->arch.osvw.status = osvw_status & ~(6ULL);
+
+	/* Mark erratum 298 as present */
+	if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
+		vcpu->arch.osvw.status |= 1;
+}
+
 static int has_svm(void)
 {
 	const char *msg;
@@ -620,6 +641,38 @@ static int svm_hardware_enable(void *garbage)
 		__get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
 	}
 
+
+	/*
+	 * Get OSVW bits.
+	 *
+	 * Note that it is possible to have a system with mixed processor
+	 * revisions and therefore different OSVW bits. If bits are not the same
+	 * on different processors then choose the worst case (i.e. if erratum
+	 * is present on one processor and not on another then assume that the
+	 * erratum is present everywhere).
+	 */
+	if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
+		uint64_t len, status;
+		int err;
+
+		len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
+		if (!err)
+			status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
+						      &err);
+
+		spin_lock(&svm_lock);
+		if (err)
+			osvw_status = osvw_len = 0;
+		else {
+			if (len < osvw_len)
+				osvw_len = len;
+			osvw_status |= status;
+			osvw_status &= (1ULL << osvw_len) - 1;
+		}
+		spin_unlock(&svm_lock);
+	} else
+		osvw_status = osvw_len = 0;
+
 	svm_init_erratum_383();
 
 	return 0;
@@ -1185,6 +1238,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	if (kvm_vcpu_is_bsp(&svm->vcpu))
 		svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
 
+	svm_init_osvw(&svm->vcpu);
+
 	return &svm->vcpu;
 
 free_page4:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4c938da..49a7b3c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -598,6 +598,14 @@ static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
 	return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
 }
 
+static bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+	return best && (best->ecx & bit(X86_FEATURE_OSVW));
+}
+
 static void update_cpuid(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
@@ -1700,6 +1708,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 		 */
 		pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
 		break;
+	case MSR_AMD64_OSVW_ID_LENGTH:
+		if (!guest_cpuid_has_osvw(vcpu))
+			return 1;
+		vcpu->arch.osvw.length = data;
+		break;
+	case MSR_AMD64_OSVW_STATUS:
+		if (!guest_cpuid_has_osvw(vcpu))
+			return 1;
+		vcpu->arch.osvw.status = data;
+		break;
 	default:
 		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
 			return xen_hvm_config(vcpu, data);
@@ -1978,6 +1996,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 		 */
 		data = 0xbe702111;
 		break;
+	case MSR_AMD64_OSVW_ID_LENGTH:
+		if (!guest_cpuid_has_osvw(vcpu))
+			return 1;
+		data = vcpu->arch.osvw.length;
+		break;
+	case MSR_AMD64_OSVW_STATUS:
+		if (!guest_cpuid_has_osvw(vcpu))
+			return 1;
+		data = vcpu->arch.osvw.status;
+		break;
 	default:
 		if (!ignore_msrs) {
 			pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
@@ -2451,7 +2479,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 	const u32 kvm_supported_word6_x86_features =
 		F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
 		F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
-		F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+		F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
 		0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
 
 	/* cpuid 0xC0000001.edx */
-- 
1.7.3.4


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] KVM: SVM: Add support for AMD's OSVW feature in guests
  2012-01-04  4:38 [PATCH v2] KVM: SVM: Add support for AMD's OSVW feature in guests Boris Ostrovsky
@ 2012-01-05 11:20 ` Marcelo Tosatti
  2012-01-05 18:37   ` Boris Ostrovsky
  0 siblings, 1 reply; 4+ messages in thread
From: Marcelo Tosatti @ 2012-01-05 11:20 UTC (permalink / raw)
  To: Boris Ostrovsky; +Cc: avi, joerg.roedel, kvm, linux-kernel, Boris Ostrovsky

On Tue, Jan 03, 2012 at 11:38:13PM -0500, Boris Ostrovsky wrote:
> From: Boris Ostrovsky <boris.ostrovsky@amd.com>
> 
> In some cases guests should not provide workarounds for errata even when the
> physical processor is affected. For example, because of erratum 400 on family
> 10h processors a Linux guest will read an MSR (resulting in VMEXIT) before
> going to idle in order to avoid getting stuck in a non-C0 state. This is not
> necessary: HLT and IO instructions are intercepted and therefore there is no
> reason for erratum 400 workaround in the guest.
> 
> This patch allows us to present a guest with certain errata as fixed,
> regardless of the state of actual hardware.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@amd.com>
> ---
>  arch/x86/include/asm/kvm_host.h |    6 ++++
>  arch/x86/kvm/svm.c              |   55 +++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/x86.c              |   30 ++++++++++++++++++++-
>  3 files changed, 90 insertions(+), 1 deletions(-)
> 
> Second version of OSVW patch. svm_init_osvw() is still in svm.c but registers'
> values are kept in virtual MSRs and should be available on reboot after
> cross-vendor migration. The registers can also be set from userland so
> that they are consistent across a cluster.
> 
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index b4973f4..9ef9215 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -452,6 +452,12 @@ struct kvm_vcpu_arch {
>  		u32 id;
>  		bool send_user_only;
>  	} apf;
> +
> +	/* OSVW MSRs (AMD only) */
> +	struct {
> +		u64 length;
> +		u64 status;
> +	} osvw;
>  };
>  
>  struct kvm_arch {
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index e32243e..b19769d 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -110,6 +110,13 @@ struct nested_state {
>  #define MSRPM_OFFSETS	16
>  static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
>  
> +/*
> + * Set osvw_len to higher value when updated Revision Guides
> + * are published and we know what the new status bits are
> + */
> +static uint64_t osvw_len = 4, osvw_status;
> +static DEFINE_SPINLOCK(svm_lock);

No need for this lock, operation already serialized by kvm_lock.

>  struct vcpu_svm {
>  	struct kvm_vcpu vcpu;
>  	struct vmcb *vmcb;
> @@ -556,6 +563,20 @@ static void svm_init_erratum_383(void)
>  	erratum_383_found = true;
>  }
>  
> +static void svm_init_osvw(struct kvm_vcpu *vcpu)
> +{
> +	/*
> +	 * Guests should see errata 400 and 415 as fixed (assuming that
> +	 * HLT and IO instructions are intercepted).
> +	 */
> +	vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
> +	vcpu->arch.osvw.status = osvw_status & ~(6ULL);

Do you really want to expose the hosts osvw_status and osvw_len? If
only exposure of 400 and 415 as fixed is necessary, then dealing with
migration is simpler (that is, you control what workarounds are applied
in the guest instead of making it dependent on particular hosts).

> +	/* Mark erratum 298 as present */
> +	if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
> +		vcpu->arch.osvw.status |= 1;
> +}

Why is it necessary to explicitly do this? Please add documentation.

> +	case MSR_AMD64_OSVW_ID_LENGTH:
> +		if (!guest_cpuid_has_osvw(vcpu))
> +			return 1;
> +		vcpu->arch.osvw.length = data;
> +		break;
> +	case MSR_AMD64_OSVW_STATUS:
> +		if (!guest_cpuid_has_osvw(vcpu))
> +			return 1;
> +		vcpu->arch.osvw.status = data;
> +		break;

If writes are allowed, it is necessary to migrate this.

>  	default:
>  		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
>  			return xen_hvm_config(vcpu, data);
> @@ -1978,6 +1996,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
>  		 */
>  		data = 0xbe702111;
>  		break;
> +	case MSR_AMD64_OSVW_ID_LENGTH:
> +		if (!guest_cpuid_has_osvw(vcpu))
> +			return 1;
> +		data = vcpu->arch.osvw.length;
> +		break;
> +	case MSR_AMD64_OSVW_STATUS:
> +		if (!guest_cpuid_has_osvw(vcpu))
> +			return 1;
> +		data = vcpu->arch.osvw.status;
> +		break;
>  	default:
>  		if (!ignore_msrs) {
>  			pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
> @@ -2451,7 +2479,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
>  	const u32 kvm_supported_word6_x86_features =
>  		F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
>  		F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
> -		F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
> +		F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
>  		0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
>  
>  	/* cpuid 0xC0000001.edx */
> -- 
> 1.7.3.4
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] KVM: SVM: Add support for AMD's OSVW feature in guests
  2012-01-05 11:20 ` Marcelo Tosatti
@ 2012-01-05 18:37   ` Boris Ostrovsky
  2012-01-06 10:49     ` Marcelo Tosatti
  0 siblings, 1 reply; 4+ messages in thread
From: Boris Ostrovsky @ 2012-01-05 18:37 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Boris Ostrovsky, avi, joerg.roedel, kvm, linux-kernel

On 01/05/12 06:20, Marcelo Tosatti wrote:
> On Tue, Jan 03, 2012 at 11:38:13PM -0500, Boris Ostrovsky wrote:
>>
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index e32243e..b19769d 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
>> @@ -110,6 +110,13 @@ struct nested_state {
>>   #define MSRPM_OFFSETS	16
>>   static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
>>
>> +/*
>> + * Set osvw_len to higher value when updated Revision Guides
>> + * are published and we know what the new status bits are
>> + */
>> +static uint64_t osvw_len = 4, osvw_status;
>> +static DEFINE_SPINLOCK(svm_lock);
>
> No need for this lock, operation already serialized by kvm_lock.

Will remove the lock.

>
>>   struct vcpu_svm {
>>   	struct kvm_vcpu vcpu;
>>   	struct vmcb *vmcb;
>> @@ -556,6 +563,20 @@ static void svm_init_erratum_383(void)
>>   	erratum_383_found = true;
>>   }
>>
>> +static void svm_init_osvw(struct kvm_vcpu *vcpu)
>> +{
>> +	/*
>> +	 * Guests should see errata 400 and 415 as fixed (assuming that
>> +	 * HLT and IO instructions are intercepted).
>> +	 */
>> +	vcpu->arch.osvw.length = (osvw_len>= 3) ? (osvw_len) : 3;
>> +	vcpu->arch.osvw.status = osvw_status&  ~(6ULL);
>
> Do you really want to expose the hosts osvw_status and osvw_len? If
> only exposure of 400 and 415 as fixed is necessary, then dealing with
> migration is simpler (that is, you control what workarounds are applied
> in the guest instead of making it dependent on particular hosts).

I do think we should (selectively) expose osvw information to the host. 
As of today we have 4 errata described by these bits. Two of them (400 
and 415) don't need to be seen by the guest and the patch would mask 
them off. As for the other two (errata 383 and 298) --- the guest should 
be aware of them and the patch passes them through.

Since osvw_len is initialized to 4 and cannot become larger no other 
bits will be passed to guests until we update the initial value (by a 
future patch).

If we are concerned about migration we can always ovewrite 
vcpu->arch.osvw registers from userspace when creating a guest.

>
>> +	/* Mark erratum 298 as present */
>> +	if (osvw_len == 0&&  boot_cpu_data.x86 == 0x10)
>> +		vcpu->arch.osvw.status |= 1;
>> +}
>
> Why is it necessary to explicitly do this? Please add documentation.

That's because we may have bumped vcpu->arch.osvw.length to 3 in order 
to signal the guest that 400 and 415 are fixed. By doing this we are 
also telling the guest that it can rely on state of bit0.

If we leave bit0 as 0 the guest will assume that 298 is fixed. However, 
if host's osvw_length is 0 it means that the physical HW *may* still be 
affected. So we take conservative approach and tell the guest that it 
should work around 298.

I'll add a comment to that effect.

>
>> +	case MSR_AMD64_OSVW_ID_LENGTH:
>> +		if (!guest_cpuid_has_osvw(vcpu))
>> +			return 1;
>> +		vcpu->arch.osvw.length = data;
>> +		break;
>> +	case MSR_AMD64_OSVW_STATUS:
>> +		if (!guest_cpuid_has_osvw(vcpu))
>> +			return 1;
>> +		vcpu->arch.osvw.status = data;
>> +		break;
>
> If writes are allowed, it is necessary to migrate this.

Not sure I understand what you mean here. Isn't vcpu->arch state 
migrated with the guest?

Thanks.
-boris


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] KVM: SVM: Add support for AMD's OSVW feature in guests
  2012-01-05 18:37   ` Boris Ostrovsky
@ 2012-01-06 10:49     ` Marcelo Tosatti
  0 siblings, 0 replies; 4+ messages in thread
From: Marcelo Tosatti @ 2012-01-06 10:49 UTC (permalink / raw)
  To: Boris Ostrovsky; +Cc: Boris Ostrovsky, avi, joerg.roedel, kvm, linux-kernel

On Thu, Jan 05, 2012 at 01:37:36PM -0500, Boris Ostrovsky wrote:
> On 01/05/12 06:20, Marcelo Tosatti wrote:
> >On Tue, Jan 03, 2012 at 11:38:13PM -0500, Boris Ostrovsky wrote:
> >>
> >>diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> >>index e32243e..b19769d 100644
> >>--- a/arch/x86/kvm/svm.c
> >>+++ b/arch/x86/kvm/svm.c
> >>@@ -110,6 +110,13 @@ struct nested_state {
> >>  #define MSRPM_OFFSETS	16
> >>  static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
> >>
> >>+/*
> >>+ * Set osvw_len to higher value when updated Revision Guides
> >>+ * are published and we know what the new status bits are
> >>+ */
> >>+static uint64_t osvw_len = 4, osvw_status;
> >>+static DEFINE_SPINLOCK(svm_lock);
> >
> >No need for this lock, operation already serialized by kvm_lock.
> 
> Will remove the lock.
> 
> >
> >>  struct vcpu_svm {
> >>  	struct kvm_vcpu vcpu;
> >>  	struct vmcb *vmcb;
> >>@@ -556,6 +563,20 @@ static void svm_init_erratum_383(void)
> >>  	erratum_383_found = true;
> >>  }
> >>
> >>+static void svm_init_osvw(struct kvm_vcpu *vcpu)
> >>+{
> >>+	/*
> >>+	 * Guests should see errata 400 and 415 as fixed (assuming that
> >>+	 * HLT and IO instructions are intercepted).
> >>+	 */
> >>+	vcpu->arch.osvw.length = (osvw_len>= 3) ? (osvw_len) : 3;
> >>+	vcpu->arch.osvw.status = osvw_status&  ~(6ULL);
> >
> >Do you really want to expose the hosts osvw_status and osvw_len? If
> >only exposure of 400 and 415 as fixed is necessary, then dealing with
> >migration is simpler (that is, you control what workarounds are applied
> >in the guest instead of making it dependent on particular hosts).
> 
> I do think we should (selectively) expose osvw information to the
> host. As of today we have 4 errata described by these bits. Two of
> them (400 and 415) don't need to be seen by the guest and the patch
> would mask them off. As for the other two (errata 383 and 298) ---
> the guest should be aware of them and the patch passes them through.
> 
> Since osvw_len is initialized to 4 and cannot become larger no other
> bits will be passed to guests until we update the initial value (by
> a future patch).

OK.

> If we are concerned about migration we can always ovewrite
> vcpu->arch.osvw registers from userspace when creating a guest.
> 
> >
> >>+	/* Mark erratum 298 as present */
> >>+	if (osvw_len == 0&&  boot_cpu_data.x86 == 0x10)
> >>+		vcpu->arch.osvw.status |= 1;
> >>+}
> >
> >Why is it necessary to explicitly do this? Please add documentation.
> 
> That's because we may have bumped vcpu->arch.osvw.length to 3 in
> order to signal the guest that 400 and 415 are fixed. By doing this
> we are also telling the guest that it can rely on state of bit0.
> 
> If we leave bit0 as 0 the guest will assume that 298 is fixed.
> However, if host's osvw_length is 0 it means that the physical HW
> *may* still be affected. So we take conservative approach and tell
> the guest that it should work around 298.
> 
> I'll add a comment to that effect.

OK thanks.

> >
> >>+	case MSR_AMD64_OSVW_ID_LENGTH:
> >>+		if (!guest_cpuid_has_osvw(vcpu))
> >>+			return 1;
> >>+		vcpu->arch.osvw.length = data;
> >>+		break;
> >>+	case MSR_AMD64_OSVW_STATUS:
> >>+		if (!guest_cpuid_has_osvw(vcpu))
> >>+			return 1;
> >>+		vcpu->arch.osvw.status = data;
> >>+		break;
> >
> >If writes are allowed, it is necessary to migrate this.
> 
> Not sure I understand what you mean here. Isn't vcpu->arch state
> migrated with the guest?

Since the MSR value is setup in the kernel, there is no need to migrate
it, actually.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2012-01-06 10:50 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-01-04  4:38 [PATCH v2] KVM: SVM: Add support for AMD's OSVW feature in guests Boris Ostrovsky
2012-01-05 11:20 ` Marcelo Tosatti
2012-01-05 18:37   ` Boris Ostrovsky
2012-01-06 10:49     ` Marcelo Tosatti

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).