kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] Prepare kvm for lto
@ 2012-09-12 14:50 Avi Kivity
  2012-09-12 14:50 ` [PATCH 1/3] KVM: VMX: Make lto-friendly Avi Kivity
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Avi Kivity @ 2012-09-12 14:50 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: kvm, Andi Kleen

vmx.c has an lto-unfriendly bit, fix it up.

While there, clean up our asm code.

Avi Kivity (3):
  KVM: VMX: Make lto-friendly
  KVM: VMX: Make use of asm.h
  KVM: SVM: Make use of asm.h

 arch/x86/kvm/svm.c | 46 +++++++++++++----------------
 arch/x86/kvm/vmx.c | 85 +++++++++++++++++++++++++-----------------------------
 2 files changed, 60 insertions(+), 71 deletions(-)

-- 
1.7.11.3


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/3] KVM: VMX: Make lto-friendly
  2012-09-12 14:50 [PATCH 0/3] Prepare kvm for lto Avi Kivity
@ 2012-09-12 14:50 ` Avi Kivity
  2012-09-12 14:50 ` [PATCH 2/3] KVM: VMX: Make use of asm.h Avi Kivity
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 9+ messages in thread
From: Avi Kivity @ 2012-09-12 14:50 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: kvm, Andi Kleen

LTO (link-time optimization) doesn't like local labels to be referred to
from a different function, since the two functions may be built in separate
compilation units.  Use an external variable instead.

Signed-off-by: Avi Kivity <avi@redhat.com>
---
 arch/x86/kvm/vmx.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d62b413..0302c0f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -127,6 +127,8 @@
 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
 module_param(ple_window, int, S_IRUGO);
 
+extern const ulong vmx_return;
+
 #define NR_AUTOLOAD_MSRS 8
 #define VMCS02_POOL_SIZE 1
 
@@ -3724,8 +3726,7 @@ static void vmx_set_constant_host_state(void)
 	native_store_idt(&dt);
 	vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
 
-	asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
-	vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
+	vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
 
 	rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
 	vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
@@ -6276,11 +6277,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
 
 		/* Enter guest mode */
-		"jne .Llaunched \n\t"
+		"jne 1f \n\t"
 		__ex(ASM_VMX_VMLAUNCH) "\n\t"
-		"jmp .Lkvm_vmx_return \n\t"
-		".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
-		".Lkvm_vmx_return: "
+		"jmp 2f \n\t"
+		"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
+		"2: "
 		/* Save guest registers, load host registers, keep flags */
 		"mov %0, %c[wordsize](%%"R"sp) \n\t"
 		"pop %0 \n\t"
@@ -6306,6 +6307,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
 		"pop  %%"R"bp; pop  %%"R"dx \n\t"
 		"setbe %c[fail](%0) \n\t"
+		".pushsection .rodata \n\t"
+		"vmx_return: " _ASM_PTR " 2b \n\t"
+		".popsection"
 	      : : "c"(vmx), "d"((unsigned long)HOST_RSP),
 		[launched]"i"(offsetof(struct vcpu_vmx, __launched)),
 		[fail]"i"(offsetof(struct vcpu_vmx, fail)),
-- 
1.7.11.3


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/3] KVM: VMX: Make use of asm.h
  2012-09-12 14:50 [PATCH 0/3] Prepare kvm for lto Avi Kivity
  2012-09-12 14:50 ` [PATCH 1/3] KVM: VMX: Make lto-friendly Avi Kivity
@ 2012-09-12 14:50 ` Avi Kivity
  2012-09-12 14:50 ` [PATCH 3/3] KVM: SVM: " Avi Kivity
  2012-09-12 19:17 ` [PATCH 0/3] Prepare kvm for lto Andi Kleen
  3 siblings, 0 replies; 9+ messages in thread
From: Avi Kivity @ 2012-09-12 14:50 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: kvm, Andi Kleen

Use macros for bitness-insensitive register names, instead of
rolling our own.

Signed-off-by: Avi Kivity <avi@redhat.com>
---
 arch/x86/kvm/vmx.c | 69 ++++++++++++++++++++++++------------------------------
 1 file changed, 30 insertions(+), 39 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0302c0f..04a7334 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6184,14 +6184,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
 					msrs[i].host);
 }
 
-#ifdef CONFIG_X86_64
-#define R "r"
-#define Q "q"
-#else
-#define R "e"
-#define Q "l"
-#endif
-
 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6240,30 +6232,30 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	vmx->__launched = vmx->loaded_vmcs->launched;
 	asm(
 		/* Store host registers */
-		"push %%"R"dx; push %%"R"bp;"
-		"push %%"R"cx \n\t" /* placeholder for guest rcx */
-		"push %%"R"cx \n\t"
-		"cmp %%"R"sp, %c[host_rsp](%0) \n\t"
+		"push %%" _ASM_DX "; push %%" _ASM_BP ";"
+		"push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
+		"push %%" _ASM_CX " \n\t"
+		"cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
 		"je 1f \n\t"
-		"mov %%"R"sp, %c[host_rsp](%0) \n\t"
+		"mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
 		__ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
 		"1: \n\t"
 		/* Reload cr2 if changed */
-		"mov %c[cr2](%0), %%"R"ax \n\t"
-		"mov %%cr2, %%"R"dx \n\t"
-		"cmp %%"R"ax, %%"R"dx \n\t"
+		"mov %c[cr2](%0), %%" _ASM_AX " \n\t"
+		"mov %%cr2, %%" _ASM_DX " \n\t"
+		"cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
 		"je 2f \n\t"
-		"mov %%"R"ax, %%cr2 \n\t"
+		"mov %%" _ASM_AX", %%cr2 \n\t"
 		"2: \n\t"
 		/* Check if vmlaunch of vmresume is needed */
 		"cmpl $0, %c[launched](%0) \n\t"
 		/* Load guest registers.  Don't clobber flags. */
-		"mov %c[rax](%0), %%"R"ax \n\t"
-		"mov %c[rbx](%0), %%"R"bx \n\t"
-		"mov %c[rdx](%0), %%"R"dx \n\t"
-		"mov %c[rsi](%0), %%"R"si \n\t"
-		"mov %c[rdi](%0), %%"R"di \n\t"
-		"mov %c[rbp](%0), %%"R"bp \n\t"
+		"mov %c[rax](%0), %%" _ASM_AX " \n\t"
+		"mov %c[rbx](%0), %%" _ASM_BX " \n\t"
+		"mov %c[rdx](%0), %%" _ASM_DX " \n\t"
+		"mov %c[rsi](%0), %%" _ASM_SI " \n\t"
+		"mov %c[rdi](%0), %%" _ASM_DI " \n\t"
+		"mov %c[rbp](%0), %%" _ASM_BP " \n\t"
 #ifdef CONFIG_X86_64
 		"mov %c[r8](%0),  %%r8  \n\t"
 		"mov %c[r9](%0),  %%r9  \n\t"
@@ -6274,7 +6266,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"mov %c[r14](%0), %%r14 \n\t"
 		"mov %c[r15](%0), %%r15 \n\t"
 #endif
-		"mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
+		"mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
 
 		/* Enter guest mode */
 		"jne 1f \n\t"
@@ -6283,15 +6275,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
 		"2: "
 		/* Save guest registers, load host registers, keep flags */
-		"mov %0, %c[wordsize](%%"R"sp) \n\t"
+		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
 		"pop %0 \n\t"
-		"mov %%"R"ax, %c[rax](%0) \n\t"
-		"mov %%"R"bx, %c[rbx](%0) \n\t"
-		"pop"Q" %c[rcx](%0) \n\t"
-		"mov %%"R"dx, %c[rdx](%0) \n\t"
-		"mov %%"R"si, %c[rsi](%0) \n\t"
-		"mov %%"R"di, %c[rdi](%0) \n\t"
-		"mov %%"R"bp, %c[rbp](%0) \n\t"
+		"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
+		"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
+		__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
+		"mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
+		"mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
+		"mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
+		"mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
 #ifdef CONFIG_X86_64
 		"mov %%r8,  %c[r8](%0) \n\t"
 		"mov %%r9,  %c[r9](%0) \n\t"
@@ -6302,10 +6294,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"mov %%r14, %c[r14](%0) \n\t"
 		"mov %%r15, %c[r15](%0) \n\t"
 #endif
-		"mov %%cr2, %%"R"ax   \n\t"
-		"mov %%"R"ax, %c[cr2](%0) \n\t"
+		"mov %%cr2, %%" _ASM_AX "   \n\t"
+		"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
 
-		"pop  %%"R"bp; pop  %%"R"dx \n\t"
+		"pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
 		"setbe %c[fail](%0) \n\t"
 		".pushsection .rodata \n\t"
 		"vmx_return: " _ASM_PTR " 2b \n\t"
@@ -6334,9 +6326,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
 		[wordsize]"i"(sizeof(ulong))
 	      : "cc", "memory"
-		, R"ax", R"bx", R"di", R"si"
 #ifdef CONFIG_X86_64
+		, "rax", "rbx", "rdi", "rsi"
 		, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+#else
+		, "eax", "ebx", "edi", "esi"
 #endif
 	      );
 
@@ -6388,9 +6382,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	vmx_complete_interrupts(vmx);
 }
 
-#undef R
-#undef Q
-
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-- 
1.7.11.3


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/3] KVM: SVM: Make use of asm.h
  2012-09-12 14:50 [PATCH 0/3] Prepare kvm for lto Avi Kivity
  2012-09-12 14:50 ` [PATCH 1/3] KVM: VMX: Make lto-friendly Avi Kivity
  2012-09-12 14:50 ` [PATCH 2/3] KVM: VMX: Make use of asm.h Avi Kivity
@ 2012-09-12 14:50 ` Avi Kivity
  2012-09-12 19:17 ` [PATCH 0/3] Prepare kvm for lto Andi Kleen
  3 siblings, 0 replies; 9+ messages in thread
From: Avi Kivity @ 2012-09-12 14:50 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: kvm, Andi Kleen

Use macros for bitness-insensitive register names, instead of
rolling our own.

Signed-off-by: Avi Kivity <avi@redhat.com>
---
 arch/x86/kvm/svm.c | 46 ++++++++++++++++++++--------------------------
 1 file changed, 20 insertions(+), 26 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 611c728..818fceb 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3782,12 +3782,6 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
 	svm_complete_interrupts(svm);
 }
 
-#ifdef CONFIG_X86_64
-#define R "r"
-#else
-#define R "e"
-#endif
-
 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -3814,13 +3808,13 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	local_irq_enable();
 
 	asm volatile (
-		"push %%"R"bp; \n\t"
-		"mov %c[rbx](%[svm]), %%"R"bx \n\t"
-		"mov %c[rcx](%[svm]), %%"R"cx \n\t"
-		"mov %c[rdx](%[svm]), %%"R"dx \n\t"
-		"mov %c[rsi](%[svm]), %%"R"si \n\t"
-		"mov %c[rdi](%[svm]), %%"R"di \n\t"
-		"mov %c[rbp](%[svm]), %%"R"bp \n\t"
+		"push %%" _ASM_BP "; \n\t"
+		"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
+		"mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
+		"mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
+		"mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
+		"mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
+		"mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
 #ifdef CONFIG_X86_64
 		"mov %c[r8](%[svm]),  %%r8  \n\t"
 		"mov %c[r9](%[svm]),  %%r9  \n\t"
@@ -3833,20 +3827,20 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
 
 		/* Enter guest mode */
-		"push %%"R"ax \n\t"
-		"mov %c[vmcb](%[svm]), %%"R"ax \n\t"
+		"push %%" _ASM_AX " \n\t"
+		"mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
 		__ex(SVM_VMLOAD) "\n\t"
 		__ex(SVM_VMRUN) "\n\t"
 		__ex(SVM_VMSAVE) "\n\t"
-		"pop %%"R"ax \n\t"
+		"pop %%" _ASM_AX " \n\t"
 
 		/* Save guest registers, load host registers */
-		"mov %%"R"bx, %c[rbx](%[svm]) \n\t"
-		"mov %%"R"cx, %c[rcx](%[svm]) \n\t"
-		"mov %%"R"dx, %c[rdx](%[svm]) \n\t"
-		"mov %%"R"si, %c[rsi](%[svm]) \n\t"
-		"mov %%"R"di, %c[rdi](%[svm]) \n\t"
-		"mov %%"R"bp, %c[rbp](%[svm]) \n\t"
+		"mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
+		"mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
+		"mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
+		"mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
+		"mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
+		"mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
 #ifdef CONFIG_X86_64
 		"mov %%r8,  %c[r8](%[svm]) \n\t"
 		"mov %%r9,  %c[r9](%[svm]) \n\t"
@@ -3857,7 +3851,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 		"mov %%r14, %c[r14](%[svm]) \n\t"
 		"mov %%r15, %c[r15](%[svm]) \n\t"
 #endif
-		"pop %%"R"bp"
+		"pop %%" _ASM_BP
 		:
 		: [svm]"a"(svm),
 		  [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
@@ -3878,9 +3872,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 		  [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
 #endif
 		: "cc", "memory"
-		, R"bx", R"cx", R"dx", R"si", R"di"
 #ifdef CONFIG_X86_64
+		, "rbx", "rcx", "rdx", "rsi", "rdi"
 		, "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
+#else
+		, "ebx", "ecx", "edx", "esi", "edi"
 #endif
 		);
 
@@ -3940,8 +3936,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	mark_all_clean(svm->vmcb);
 }
 
-#undef R
-
 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-- 
1.7.11.3


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 0/3] Prepare kvm for lto
  2012-09-12 14:50 [PATCH 0/3] Prepare kvm for lto Avi Kivity
                   ` (2 preceding siblings ...)
  2012-09-12 14:50 ` [PATCH 3/3] KVM: SVM: " Avi Kivity
@ 2012-09-12 19:17 ` Andi Kleen
  2012-09-13  8:27   ` Avi Kivity
  3 siblings, 1 reply; 9+ messages in thread
From: Andi Kleen @ 2012-09-12 19:17 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm

On Wed, Sep 12, 2012 at 05:50:41PM +0300, Avi Kivity wrote:
> vmx.c has an lto-unfriendly bit, fix it up.
> 
> While there, clean up our asm code.
> 
> Avi Kivity (3):
>   KVM: VMX: Make lto-friendly
>   KVM: VMX: Make use of asm.h
>   KVM: SVM: Make use of asm.h

Works for me in my LTO build, thanks Avi.
I cannot guarantee I always hit the unit splitting case, but it looks
good so far.

I replaced my patches with yours.

Acked-by: Andi Kleen <ak@linux.intel.com>

-Andi
-- 
ak@linux.intel.com -- Speaking for myself only

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 0/3] Prepare kvm for lto
  2012-09-12 19:17 ` [PATCH 0/3] Prepare kvm for lto Andi Kleen
@ 2012-09-13  8:27   ` Avi Kivity
  2012-09-13 15:50     ` Andi Kleen
  0 siblings, 1 reply; 9+ messages in thread
From: Avi Kivity @ 2012-09-13  8:27 UTC (permalink / raw)
  To: Andi Kleen; +Cc: Marcelo Tosatti, kvm

On 09/12/2012 10:17 PM, Andi Kleen wrote:
> On Wed, Sep 12, 2012 at 05:50:41PM +0300, Avi Kivity wrote:
>> vmx.c has an lto-unfriendly bit, fix it up.
>> 
>> While there, clean up our asm code.
>> 
>> Avi Kivity (3):
>>   KVM: VMX: Make lto-friendly
>>   KVM: VMX: Make use of asm.h
>>   KVM: SVM: Make use of asm.h
> 
> Works for me in my LTO build, thanks Avi.
> I cannot guarantee I always hit the unit splitting case, but it looks
> good so far.

Actually I think patch 1 is missing a .global vmx_return.


-- 
error compiling committee.c: too many arguments to function

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 0/3] Prepare kvm for lto
  2012-09-13  8:27   ` Avi Kivity
@ 2012-09-13 15:50     ` Andi Kleen
  0 siblings, 0 replies; 9+ messages in thread
From: Andi Kleen @ 2012-09-13 15:50 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm

On Thu, Sep 13, 2012 at 11:27:43AM +0300, Avi Kivity wrote:
> On 09/12/2012 10:17 PM, Andi Kleen wrote:
> > On Wed, Sep 12, 2012 at 05:50:41PM +0300, Avi Kivity wrote:
> >> vmx.c has an lto-unfriendly bit, fix it up.
> >> 
> >> While there, clean up our asm code.
> >> 
> >> Avi Kivity (3):
> >>   KVM: VMX: Make lto-friendly
> >>   KVM: VMX: Make use of asm.h
> >>   KVM: SVM: Make use of asm.h
> > 
> > Works for me in my LTO build, thanks Avi.
> > I cannot guarantee I always hit the unit splitting case, but it looks
> > good so far.
> 
> Actually I think patch 1 is missing a .global vmx_return.

Ok can you add it please? It always depends how the LTO partitioner
decides to split the subunits.

I can run it with randomconfig in a loop over night. That's the best way I know
to try to cover these cases.

-Andi

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 0/3] Prepare kvm for lto
@ 2012-09-16 12:10 Avi Kivity
  2012-09-17 13:38 ` Marcelo Tosatti
  0 siblings, 1 reply; 9+ messages in thread
From: Avi Kivity @ 2012-09-16 12:10 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: kvm, Andi Kleen

vmx.c has an lto-unfriendly bit, fix it up.

While there, clean up our asm code.

v2: add missing .global in case vmx_return and vmx_set_constant_host_state() become
    separated by lto

Avi Kivity (3):
  KVM: VMX: Make lto-friendly
  KVM: VMX: Make use of asm.h
  KVM: SVM: Make use of asm.h

 arch/x86/kvm/svm.c | 46 +++++++++++++----------------
 arch/x86/kvm/vmx.c | 86 ++++++++++++++++++++++++++----------------------------
 2 files changed, 61 insertions(+), 71 deletions(-)

-- 
1.7.12


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 0/3] Prepare kvm for lto
  2012-09-16 12:10 Avi Kivity
@ 2012-09-17 13:38 ` Marcelo Tosatti
  0 siblings, 0 replies; 9+ messages in thread
From: Marcelo Tosatti @ 2012-09-17 13:38 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Andi Kleen

On Sun, Sep 16, 2012 at 03:10:56PM +0300, Avi Kivity wrote:
> vmx.c has an lto-unfriendly bit, fix it up.
> 
> While there, clean up our asm code.
> 
> v2: add missing .global in case vmx_return and vmx_set_constant_host_state() become
>     separated by lto
> 
> Avi Kivity (3):
>   KVM: VMX: Make lto-friendly
>   KVM: VMX: Make use of asm.h
>   KVM: SVM: Make use of asm.h

Applied, thanks.


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2012-09-17 13:40 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-09-12 14:50 [PATCH 0/3] Prepare kvm for lto Avi Kivity
2012-09-12 14:50 ` [PATCH 1/3] KVM: VMX: Make lto-friendly Avi Kivity
2012-09-12 14:50 ` [PATCH 2/3] KVM: VMX: Make use of asm.h Avi Kivity
2012-09-12 14:50 ` [PATCH 3/3] KVM: SVM: " Avi Kivity
2012-09-12 19:17 ` [PATCH 0/3] Prepare kvm for lto Andi Kleen
2012-09-13  8:27   ` Avi Kivity
2012-09-13 15:50     ` Andi Kleen
  -- strict thread matches above, loose matches on Subject: below --
2012-09-16 12:10 Avi Kivity
2012-09-17 13:38 ` Marcelo Tosatti

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).