public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH][UPDATE] shortcut for lightweight VM Exit
@ 2007-04-30  7:01 Dong, Eddie
       [not found] ` <10EA09EFD8728347A513008B6B0DA77A015EDDA8-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  0 siblings, 1 reply; 8+ messages in thread
From: Dong, Eddie @ 2007-04-30  7:01 UTC (permalink / raw)
  To: kvm-devel


[-- Attachment #1.1: Type: text/plain, Size: 4131 bytes --]

This patch provides short cut handling for light weight VM Exit, which
can boost KB performance 11% under FC5 guest. 
 
Any comments?
Thx,eddie
 
 
Signed-off-by:  Yaozu(Eddie) Dong Eddie.Dong-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org

 
 
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 41634fd..11eb25e 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -251,6 +251,7 @@ struct kvm_stat {
  u32 halt_exits;
  u32 request_irq_exits;
  u32 irq_exits;
+ u32 light_exits;
 };
 
 struct kvm_vcpu {
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 4c5b8db..0945c7f 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -71,6 +71,7 @@ static struct kvm_stats_debugfs_item {
  { "halt_exits", STAT_OFFSET(halt_exits) },
  { "request_irq", STAT_OFFSET(request_irq_exits) },
  { "irq_exits", STAT_OFFSET(irq_exits) },
+ { "light_exits", STAT_OFFSET(light_exits) },
  { NULL }
 };
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 74d058e..c279326 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1815,7 +1815,6 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
  int fs_gs_ldt_reload_needed;
  int r;
 
-again:
  /*
   * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
   * allow segment selectors with cpl > 0 or ti == 1.
@@ -1858,6 +1857,7 @@ again:
  }
 #endif
 
+again:
  asm (
   /* Store host registers */
   "pushf \n\t"
@@ -1977,6 +1977,47 @@ again:
   [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
        : "cc", "memory" );
 
+ ++vcpu->stat.exits;
+
+ vcpu->interrupt_window_open =
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
+
+ asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
+
+ if (fail) {
+  kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+  kvm_run->fail_entry.hardware_entry_failure_reason
+   = vmcs_read32(VM_INSTRUCTION_ERROR);
+  r = 0;
+ } else {
+  /*
+   * Profile KVM exit RIPs:
+   */
+  if (unlikely(prof_on == KVM_PROFILING))
+   profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
+
+  vcpu->launched = 1;
+  r = kvm_handle_exit(kvm_run, vcpu);
+  if (r > 0) {
+   r = -EINTR;
+   kvm_run->exit_reason = KVM_EXIT_INTR;
+   /* Give scheduler a change to reschedule. */
+   if (signal_pending(current)) {
+    ++vcpu->stat.signal_exits;
+    goto out;
+   }
+
+   if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+    ++vcpu->stat.request_irq_exits;
+    goto out;
+   }
+   if (!need_resched()) {
+    ++vcpu->stat.light_exits;
+    goto again;
+   }
+  }
+ }
+
+out:
  /*
   * Reload segment selectors ASAP. (it's needed for a functional
   * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
@@ -1998,8 +2039,6 @@ again:
 
   reload_tss();
  }
- ++vcpu->stat.exits;
-
 #ifdef CONFIG_X86_64
  if (is_long_mode(vcpu)) {
   save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
@@ -2012,45 +2051,6 @@ again:
   fx_restore(vcpu->host_fx_image);
  }
 
- vcpu->interrupt_window_open =
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
-
- asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
-
- if (fail) {
-  kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
-  kvm_run->fail_entry.hardware_entry_failure_reason
-   = vmcs_read32(VM_INSTRUCTION_ERROR);
-  r = 0;
- } else {
-  /*
-   * Profile KVM exit RIPs:
-   */
-  if (unlikely(prof_on == KVM_PROFILING))
-   profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
-
-  vcpu->launched = 1;
-  r = kvm_handle_exit(kvm_run, vcpu);
-  if (r > 0) {
-   /* Give scheduler a change to reschedule. */
-   if (signal_pending(current)) {
-    ++vcpu->stat.signal_exits;
-    post_kvm_run_save(vcpu, kvm_run);
-    kvm_run->exit_reason = KVM_EXIT_INTR;
-    return -EINTR;
-   }
-
-   if (dm_request_for_irq_injection(vcpu, kvm_run)) {
-    ++vcpu->stat.request_irq_exits;
-    post_kvm_run_save(vcpu, kvm_run);
-    kvm_run->exit_reason = KVM_EXIT_INTR;
-    return -EINTR;
-   }
-
-   kvm_resched(vcpu);
-   goto again;
-  }
- }
-
  post_kvm_run_save(vcpu, kvm_run);
  return r;
 }


[-- Attachment #1.2: Type: text/html, Size: 6824 bytes --]

[-- Attachment #2: lightweight3.patch --]
[-- Type: application/octet-stream, Size: 3730 bytes --]

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 41634fd..11eb25e 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -251,6 +251,7 @@ struct kvm_stat {
 	u32 halt_exits;
 	u32 request_irq_exits;
 	u32 irq_exits;
+	u32 light_exits;
 };
 
 struct kvm_vcpu {
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 4c5b8db..0945c7f 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -71,6 +71,7 @@ static struct kvm_stats_debugfs_item {
 	{ "halt_exits", STAT_OFFSET(halt_exits) },
 	{ "request_irq", STAT_OFFSET(request_irq_exits) },
 	{ "irq_exits", STAT_OFFSET(irq_exits) },
+	{ "light_exits", STAT_OFFSET(light_exits) },
 	{ NULL }
 };
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 74d058e..c279326 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1815,7 +1815,6 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	int fs_gs_ldt_reload_needed;
 	int r;
 
-again:
 	/*
 	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
 	 * allow segment selectors with cpl > 0 or ti == 1.
@@ -1858,6 +1857,7 @@ again:
 	}
 #endif
 
+again:
 	asm (
 		/* Store host registers */
 		"pushf \n\t"
@@ -1977,6 +1977,47 @@ again:
 		[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
 	      : "cc", "memory" );
 
+	++vcpu->stat.exits;
+
+	vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
+
+	asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
+
+	if (fail) {
+		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+		kvm_run->fail_entry.hardware_entry_failure_reason
+			= vmcs_read32(VM_INSTRUCTION_ERROR);
+		r = 0;
+	} else {
+		/*
+		 * Profile KVM exit RIPs:
+		 */
+		if (unlikely(prof_on == KVM_PROFILING))
+			profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
+
+		vcpu->launched = 1;
+		r = kvm_handle_exit(kvm_run, vcpu);
+		if (r > 0) {
+			r = -EINTR;
+			kvm_run->exit_reason = KVM_EXIT_INTR;
+			/* Give scheduler a change to reschedule. */
+			if (signal_pending(current)) {
+				++vcpu->stat.signal_exits;
+				goto out;
+			}
+
+			if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+				++vcpu->stat.request_irq_exits;
+				goto out;
+			}
+			if (!need_resched()) {
+				++vcpu->stat.light_exits;
+				goto again;
+			}
+		}
+	}
+
+out:
 	/*
 	 * Reload segment selectors ASAP. (it's needed for a functional
 	 * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
@@ -1998,8 +2039,6 @@ again:
 
 		reload_tss();
 	}
-	++vcpu->stat.exits;
-
 #ifdef CONFIG_X86_64
 	if (is_long_mode(vcpu)) {
 		save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
@@ -2012,45 +2051,6 @@ again:
 		fx_restore(vcpu->host_fx_image);
 	}
 
-	vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
-
-	asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
-
-	if (fail) {
-		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
-		kvm_run->fail_entry.hardware_entry_failure_reason
-			= vmcs_read32(VM_INSTRUCTION_ERROR);
-		r = 0;
-	} else {
-		/*
-		 * Profile KVM exit RIPs:
-		 */
-		if (unlikely(prof_on == KVM_PROFILING))
-			profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
-
-		vcpu->launched = 1;
-		r = kvm_handle_exit(kvm_run, vcpu);
-		if (r > 0) {
-			/* Give scheduler a change to reschedule. */
-			if (signal_pending(current)) {
-				++vcpu->stat.signal_exits;
-				post_kvm_run_save(vcpu, kvm_run);
-				kvm_run->exit_reason = KVM_EXIT_INTR;
-				return -EINTR;
-			}
-
-			if (dm_request_for_irq_injection(vcpu, kvm_run)) {
-				++vcpu->stat.request_irq_exits;
-				post_kvm_run_save(vcpu, kvm_run);
-				kvm_run->exit_reason = KVM_EXIT_INTR;
-				return -EINTR;
-			}
-
-			kvm_resched(vcpu);
-			goto again;
-		}
-	}
-
 	post_kvm_run_save(vcpu, kvm_run);
 	return r;
 }

[-- Attachment #3: Type: text/plain, Size: 286 bytes --]

-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/

[-- Attachment #4: Type: text/plain, Size: 186 bytes --]

_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH][UPDATE] shortcut for lightweight VM Exit
       [not found] ` <10EA09EFD8728347A513008B6B0DA77A015EDDA8-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-04-30  7:49   ` Avi Kivity
       [not found]     ` <46359F90.9010306-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 8+ messages in thread
From: Avi Kivity @ 2007-04-30  7:49 UTC (permalink / raw)
  To: Dong, Eddie; +Cc: kvm-devel

Dong, Eddie wrote:
> This patch provides short cut handling for light weight VM Exit, which 
> can boost KB performance 11% under FC5 guest.
> Any comments?

While the patch looks good, it kills FC6 x86_64 boot with a double 
fault. Guest kernel is 2.6.18-1.2798. dmesg says

inject_page_fault: double fault 0x7fff79b58fd8 @ 0xffffffff8025cca1

I've reproduced this with the your patch applied on top of 
8dfdb0d81fb9e858c14e03fd5e007b20167cd065, which is before my mmio 
optimization that caused another regression, to make sure it is not 
exposing a bug in my patch.

-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH][UPDATE] shortcut for lightweight VM Exit
       [not found]     ` <46359F90.9010306-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-04-30 11:23       ` Dong, Eddie
       [not found]         ` <10EA09EFD8728347A513008B6B0DA77A015EDE49-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  0 siblings, 1 reply; 8+ messages in thread
From: Dong, Eddie @ 2007-04-30 11:23 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm-devel

[-- Attachment #1: Type: text/plain, Size: 955 bytes --]

Avi Kivity wrote:
> Dong, Eddie wrote:
>> This patch provides short cut handling for light weight VM Exit,
>> which can boost KB performance 11% under FC5 guest.
>> Any comments?
> 
> While the patch looks good, it kills FC6 x86_64 boot with a double
> fault. Guest kernel is 2.6.18-1.2798. dmesg says
> 
> inject_page_fault: double fault 0x7fff79b58fd8 @ 0xffffffff8025cca1
> 
I think I know the reason, the SYSCALL_MASK & LSTAR update in guest
needs to 
update the physical side MSR after this patch.  With this update in
vmx_set_msr, 
I get it up on a RHEL5U 64 bits guest but no time to check the detail
performance 
gain yet. (will report after May day national holiday 5/1--5/8 in PRC)

BTW, I have another patch in hand to further reduce MSR save/restore and
thus 
improve performance more for lightweight VM Exit. Base on my observation
for FC5 32 bits
guest, 93% VM Exit will fall into the lightweight path.

Thx, eddie

[-- Attachment #2: lightweight4.patch --]
[-- Type: application/octet-stream, Size: 4149 bytes --]

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 41634fd..11eb25e 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -251,6 +251,7 @@ struct kvm_stat {
 	u32 halt_exits;
 	u32 request_irq_exits;
 	u32 irq_exits;
+	u32 light_exits;
 };
 
 struct kvm_vcpu {
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 4c5b8db..0945c7f 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -71,6 +71,7 @@ static struct kvm_stats_debugfs_item {
 	{ "halt_exits", STAT_OFFSET(halt_exits) },
 	{ "request_irq", STAT_OFFSET(request_irq_exits) },
 	{ "irq_exits", STAT_OFFSET(irq_exits) },
+	{ "light_exits", STAT_OFFSET(light_exits) },
 	{ NULL }
 };
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 74d058e..10ff0c3 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -478,6 +478,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 	case MSR_GS_BASE:
 		vmcs_writel(GUEST_GS_BASE, data);
 		break;
+	case MSR_LSTAR:
+	case MSR_SYSCALL_MASK:
+		msr = find_msr_entry(vcpu, msr_index);
+		if (msr) {
+			msr->data = data;
+		}
+		load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
+		break;
 #endif
 	case MSR_IA32_SYSENTER_CS:
 		vmcs_write32(GUEST_SYSENTER_CS, data);
@@ -1815,7 +1823,6 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	int fs_gs_ldt_reload_needed;
 	int r;
 
-again:
 	/*
 	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
 	 * allow segment selectors with cpl > 0 or ti == 1.
@@ -1858,6 +1865,7 @@ again:
 	}
 #endif
 
+again:
 	asm (
 		/* Store host registers */
 		"pushf \n\t"
@@ -1977,6 +1985,47 @@ again:
 		[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
 	      : "cc", "memory" );
 
+	++vcpu->stat.exits;
+
+	vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
+
+	asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
+
+	if (fail) {
+		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+		kvm_run->fail_entry.hardware_entry_failure_reason
+			= vmcs_read32(VM_INSTRUCTION_ERROR);
+		r = 0;
+	} else {
+		/*
+		 * Profile KVM exit RIPs:
+		 */
+		if (unlikely(prof_on == KVM_PROFILING))
+			profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
+
+		vcpu->launched = 1;
+		r = kvm_handle_exit(kvm_run, vcpu);
+		if (r > 0) {
+			r = -EINTR;
+			kvm_run->exit_reason = KVM_EXIT_INTR;
+			/* Give scheduler a change to reschedule. */
+			if (signal_pending(current)) {
+				++vcpu->stat.signal_exits;
+				goto out;
+			}
+
+			if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+				++vcpu->stat.request_irq_exits;
+				goto out;
+			}
+			if (!need_resched()) {
+				++vcpu->stat.light_exits;
+				goto again;
+			}
+		}
+	}
+
+out:
 	/*
 	 * Reload segment selectors ASAP. (it's needed for a functional
 	 * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
@@ -1998,8 +2047,6 @@ again:
 
 		reload_tss();
 	}
-	++vcpu->stat.exits;
-
 #ifdef CONFIG_X86_64
 	if (is_long_mode(vcpu)) {
 		save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
@@ -2012,45 +2059,6 @@ again:
 		fx_restore(vcpu->host_fx_image);
 	}
 
-	vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
-
-	asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
-
-	if (fail) {
-		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
-		kvm_run->fail_entry.hardware_entry_failure_reason
-			= vmcs_read32(VM_INSTRUCTION_ERROR);
-		r = 0;
-	} else {
-		/*
-		 * Profile KVM exit RIPs:
-		 */
-		if (unlikely(prof_on == KVM_PROFILING))
-			profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
-
-		vcpu->launched = 1;
-		r = kvm_handle_exit(kvm_run, vcpu);
-		if (r > 0) {
-			/* Give scheduler a change to reschedule. */
-			if (signal_pending(current)) {
-				++vcpu->stat.signal_exits;
-				post_kvm_run_save(vcpu, kvm_run);
-				kvm_run->exit_reason = KVM_EXIT_INTR;
-				return -EINTR;
-			}
-
-			if (dm_request_for_irq_injection(vcpu, kvm_run)) {
-				++vcpu->stat.request_irq_exits;
-				post_kvm_run_save(vcpu, kvm_run);
-				kvm_run->exit_reason = KVM_EXIT_INTR;
-				return -EINTR;
-			}
-
-			kvm_resched(vcpu);
-			goto again;
-		}
-	}
-
 	post_kvm_run_save(vcpu, kvm_run);
 	return r;
 }

[-- Attachment #3: Type: text/plain, Size: 286 bytes --]

-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/

[-- Attachment #4: Type: text/plain, Size: 186 bytes --]

_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH][UPDATE] shortcut for lightweight VM Exit
       [not found]         ` <10EA09EFD8728347A513008B6B0DA77A015EDE49-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-04-30 13:10           ` Avi Kivity
       [not found]             ` <4635EAD7.1030405-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  2007-05-08  9:59           ` [PATCH][UPDATE] Shortcut MSR save/restore for lightweight VM Exit (was: RE: shortcut for lightweight VM Exit) Dong, Eddie
  1 sibling, 1 reply; 8+ messages in thread
From: Avi Kivity @ 2007-04-30 13:10 UTC (permalink / raw)
  To: Dong, Eddie; +Cc: kvm-devel

Dong, Eddie wrote:
> Avi Kivity wrote:
>   
>> Dong, Eddie wrote:
>>     
>>> This patch provides short cut handling for light weight VM Exit,
>>> which can boost KB performance 11% under FC5 guest.
>>> Any comments?
>>>       
>> While the patch looks good, it kills FC6 x86_64 boot with a double
>> fault. Guest kernel is 2.6.18-1.2798. dmesg says
>>
>> inject_page_fault: double fault 0x7fff79b58fd8 @ 0xffffffff8025cca1
>>
>>     
> I think I know the reason, the SYSCALL_MASK & LSTAR update in guest
> needs to 
> update the physical side MSR after this patch.  With this update in
> vmx_set_msr, 
> I get it up on a RHEL5U 64 bits guest but no time to check the detail
> performance 
> gain yet. (will report after May day national holiday 5/1--5/8 in PRC)
>
> BTW, I have another patch in hand to further reduce MSR save/restore and
> thus 
> improve performance more for lightweight VM Exit. Base on my observation
> for FC5 32 bits
> guest, 93% VM Exit will fall into the lightweight path.
>
>   

I measured 650 cycles saved from a total of 5300 before the patch.

The patch had a bug where it would exit to userspace if need_resched() 
was true.  This can reduce performance on a high context switch scenario 
(many virtual machines).  I fixed that, and also a minor coding style 
issue, and applied.

Enjoy your holiday :)

-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH][UPDATE] shortcut for lightweight VM Exit
       [not found]             ` <4635EAD7.1030405-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-05-08  5:08               ` Dong, Eddie
  0 siblings, 0 replies; 8+ messages in thread
From: Dong, Eddie @ 2007-05-08  5:08 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm-devel

Avi Kivity wrote:
> I measured 650 cycles saved from a total of 5300 before the patch.
> 
> The patch had a bug where it would exit to userspace if need_resched()
> was true.  This can reduce performance on a high context
Avi:
	This is not a must though it is OK, OS will automatically do
rescheduling if 
letting VMM exit to userspace which can increase the virtual Interrupt
inject efficiency. 
Removing kvm_resched() can simplify the logic.
thx,eddie

-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH][UPDATE] Shortcut MSR save/restore for lightweight VM Exit (was: RE: shortcut for lightweight VM Exit)
       [not found]         ` <10EA09EFD8728347A513008B6B0DA77A015EDE49-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  2007-04-30 13:10           ` Avi Kivity
@ 2007-05-08  9:59           ` Dong, Eddie
       [not found]             ` <10EA09EFD8728347A513008B6B0DA77A0165CEA9-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  1 sibling, 1 reply; 8+ messages in thread
From: Dong, Eddie @ 2007-05-08  9:59 UTC (permalink / raw)
  To: Dong, Eddie, Avi Kivity; +Cc: kvm-devel

[-- Attachment #1: Type: text/plain, Size: 5182 bytes --]

kvm-devel-bounces-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org wrote:
> BTW, I have another patch in hand to further reduce MSR
> save/restore and
> thus
> improve performance more for lightweight VM Exit. Base on my
> observation for FC5 32 bits
> guest, 93% VM Exit will fall into the lightweight path.
> 
This patch reduce the VM Exit handling cost continuously
for those lightweight VM Exit which occupies 93% of VM Exit in
KB case if 64 bits OS has similar situation with 32 bits. In my old 
machine, I saw 20% performance increasement of KB within 64 bits 
RHEL5 guest and flat for 32bits FC5.
	There are still some room to improvment here, but this one
focus on basic MSR save/restore framework only for now and leave
 future to opitmize specific MSRs like GS_BASE etc.
thx,eddie

Signed-off-by:  Yaozu(Eddie) Dong Eddie.Dong-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org

against 5cf48c367dec74ba8553c53ed332cd075fa38b88


commit a7294eae555b7d42f7e44b8d7955becad2feebf8
Author: root <root@vt32-pae.(none)>
Date:   Tue May 8 17:32:24 2007 +0800

    Avoid MSR save/restore for lightweight VM Exit
    
    Signed-off-by:  Yaozu(Eddie) Dong Eddie.Dong-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 11eb25e..86abf2d 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -285,6 +285,7 @@ struct kvm_vcpu {
 	u64 apic_base;
 	u64 ia32_misc_enable_msr;
 	int nmsrs;
+	int sw_save_msrs;
 	struct vmx_msr_entry *guest_msrs;
 	struct vmx_msr_entry *host_msrs;
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 4e04b85..c2d06b5 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -80,23 +80,11 @@ static const u32 vmx_msr_index[] = {
 #ifdef CONFIG_X86_64
 	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
 #endif
-	MSR_EFER, MSR_K6_STAR,
+	MSR_K6_STAR, MSR_EFER,
 };
+#define NR_HW_SAVE_MSRS	1	/* HW save MSR_EFER */
 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
-#ifdef CONFIG_X86_64
-static unsigned msr_offset_kernel_gs_base;
-#define NR_64BIT_MSRS 4
-/*
- * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
- * mechanism (cpu bug AA24)
- */
-#define NR_BAD_MSRS 2
-#else
-#define NR_64BIT_MSRS 0
-#define NR_BAD_MSRS 0
-#endif
-
 static inline int is_page_fault(u32 intr_info)
 {
 	return (intr_info & (INTR_INFO_INTR_TYPE_MASK |
INTR_INFO_VECTOR_MASK |
@@ -339,23 +327,19 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu,
unsigned error_code)
  */
 static void setup_msrs(struct kvm_vcpu *vcpu)
 {
-	int nr_skip, nr_good_msrs;
+	int nr_skip;
 
-	if (is_long_mode(vcpu))
-		nr_skip = NR_BAD_MSRS;
-	else
-		nr_skip = NR_64BIT_MSRS;
-	nr_good_msrs = vcpu->nmsrs - nr_skip;
+	vcpu->sw_save_msrs = nr_skip = vcpu->nmsrs - NR_HW_SAVE_MSRS;
 
 	/*
 	 * MSR_K6_STAR is only needed on long mode guests, and only
 	 * if efer.sce is enabled.
 	 */
 	if (find_msr_entry(vcpu, MSR_K6_STAR)) {
-		--nr_good_msrs;
+		--vcpu->sw_save_msrs;
 #ifdef CONFIG_X86_64
 		if (is_long_mode(vcpu) && (vcpu->shadow_efer &
EFER_SCE))
-			++nr_good_msrs;
+			++vcpu->sw_save_msrs;
 #endif
 	}
 
@@ -365,9 +349,9 @@ static void setup_msrs(struct kvm_vcpu *vcpu)
 		    virt_to_phys(vcpu->guest_msrs + nr_skip));
 	vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
 		    virt_to_phys(vcpu->host_msrs + nr_skip));
-	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2
*/
-	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs);  /* 22.2.2
*/
-	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2
*/
+	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, NR_HW_SAVE_MSRS);
+	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, NR_HW_SAVE_MSRS);
+	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, NR_HW_SAVE_MSRS);
 }
 
 /*
@@ -486,7 +470,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32
msr_index, u64 data)
 		msr = find_msr_entry(vcpu, msr_index);
 		if (msr)
 			msr->data = data;
-		load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
+		load_msrs(vcpu->guest_msrs, vcpu->sw_save_msrs);
 		break;
 #endif
 	case MSR_IA32_SYSENTER_CS:
@@ -1218,10 +1202,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
 		vcpu->host_msrs[j].reserved = 0;
 		vcpu->host_msrs[j].data = data;
 		vcpu->guest_msrs[j] = vcpu->host_msrs[j];
-#ifdef CONFIG_X86_64
-		if (index == MSR_KERNEL_GS_BASE)
-			msr_offset_kernel_gs_base = j;
-#endif
 		++vcpu->nmsrs;
 	}
 
@@ -1861,12 +1841,8 @@ preempted:
 		fx_restore(vcpu->guest_fx_image);
 	}
 
-#ifdef CONFIG_X86_64
-	if (is_long_mode(vcpu)) {
-		save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base,
1);
-		load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
-	}
-#endif
+	save_msrs(vcpu->host_msrs, vcpu->sw_save_msrs);
+	load_msrs(vcpu->guest_msrs, vcpu->sw_save_msrs);
 
 again:
 	asm (
@@ -2052,12 +2028,8 @@ out:
 
 		reload_tss();
 	}
-#ifdef CONFIG_X86_64
-	if (is_long_mode(vcpu)) {
-		save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
-		load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
-	}
-#endif
+	save_msrs(vcpu->guest_msrs, vcpu->sw_save_msrs);
+	load_msrs(vcpu->host_msrs, vcpu->sw_save_msrs);
 
 	if (vcpu->fpu_active) {
 		fx_save(vcpu->guest_fx_image);

[-- Attachment #2: lightweight-msr.patch --]
[-- Type: application/octet-stream, Size: 4050 bytes --]

commit a7294eae555b7d42f7e44b8d7955becad2feebf8
Author: root <root@vt32-pae.(none)>
Date:   Tue May 8 17:32:24 2007 +0800

    Avoid MSR save/restore for lightweight VM Exit
    
    Signed-off-by:  Yaozu(Eddie) Dong Eddie.Dong@intel.com

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 11eb25e..86abf2d 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -285,6 +285,7 @@ struct kvm_vcpu {
 	u64 apic_base;
 	u64 ia32_misc_enable_msr;
 	int nmsrs;
+	int sw_save_msrs;
 	struct vmx_msr_entry *guest_msrs;
 	struct vmx_msr_entry *host_msrs;
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 4e04b85..c2d06b5 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -80,23 +80,11 @@ static const u32 vmx_msr_index[] = {
 #ifdef CONFIG_X86_64
 	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
 #endif
-	MSR_EFER, MSR_K6_STAR,
+	MSR_K6_STAR, MSR_EFER,
 };
+#define NR_HW_SAVE_MSRS	1	/* HW save MSR_EFER */
 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
-#ifdef CONFIG_X86_64
-static unsigned msr_offset_kernel_gs_base;
-#define NR_64BIT_MSRS 4
-/*
- * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
- * mechanism (cpu bug AA24)
- */
-#define NR_BAD_MSRS 2
-#else
-#define NR_64BIT_MSRS 0
-#define NR_BAD_MSRS 0
-#endif
-
 static inline int is_page_fault(u32 intr_info)
 {
 	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
@@ -339,23 +327,19 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
  */
 static void setup_msrs(struct kvm_vcpu *vcpu)
 {
-	int nr_skip, nr_good_msrs;
+	int nr_skip;
 
-	if (is_long_mode(vcpu))
-		nr_skip = NR_BAD_MSRS;
-	else
-		nr_skip = NR_64BIT_MSRS;
-	nr_good_msrs = vcpu->nmsrs - nr_skip;
+	vcpu->sw_save_msrs = nr_skip = vcpu->nmsrs - NR_HW_SAVE_MSRS;
 
 	/*
 	 * MSR_K6_STAR is only needed on long mode guests, and only
 	 * if efer.sce is enabled.
 	 */
 	if (find_msr_entry(vcpu, MSR_K6_STAR)) {
-		--nr_good_msrs;
+		--vcpu->sw_save_msrs;
 #ifdef CONFIG_X86_64
 		if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE))
-			++nr_good_msrs;
+			++vcpu->sw_save_msrs;
 #endif
 	}
 
@@ -365,9 +349,9 @@ static void setup_msrs(struct kvm_vcpu *vcpu)
 		    virt_to_phys(vcpu->guest_msrs + nr_skip));
 	vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
 		    virt_to_phys(vcpu->host_msrs + nr_skip));
-	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
-	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs);  /* 22.2.2 */
-	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
+	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, NR_HW_SAVE_MSRS);
+	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, NR_HW_SAVE_MSRS);
+	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, NR_HW_SAVE_MSRS);
 }
 
 /*
@@ -486,7 +470,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 		msr = find_msr_entry(vcpu, msr_index);
 		if (msr)
 			msr->data = data;
-		load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
+		load_msrs(vcpu->guest_msrs, vcpu->sw_save_msrs);
 		break;
 #endif
 	case MSR_IA32_SYSENTER_CS:
@@ -1218,10 +1202,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
 		vcpu->host_msrs[j].reserved = 0;
 		vcpu->host_msrs[j].data = data;
 		vcpu->guest_msrs[j] = vcpu->host_msrs[j];
-#ifdef CONFIG_X86_64
-		if (index == MSR_KERNEL_GS_BASE)
-			msr_offset_kernel_gs_base = j;
-#endif
 		++vcpu->nmsrs;
 	}
 
@@ -1861,12 +1841,8 @@ preempted:
 		fx_restore(vcpu->guest_fx_image);
 	}
 
-#ifdef CONFIG_X86_64
-	if (is_long_mode(vcpu)) {
-		save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1);
-		load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
-	}
-#endif
+	save_msrs(vcpu->host_msrs, vcpu->sw_save_msrs);
+	load_msrs(vcpu->guest_msrs, vcpu->sw_save_msrs);
 
 again:
 	asm (
@@ -2052,12 +2028,8 @@ out:
 
 		reload_tss();
 	}
-#ifdef CONFIG_X86_64
-	if (is_long_mode(vcpu)) {
-		save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
-		load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
-	}
-#endif
+	save_msrs(vcpu->guest_msrs, vcpu->sw_save_msrs);
+	load_msrs(vcpu->host_msrs, vcpu->sw_save_msrs);
 
 	if (vcpu->fpu_active) {
 		fx_save(vcpu->guest_fx_image);

[-- Attachment #3: Type: text/plain, Size: 286 bytes --]

-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/

[-- Attachment #4: Type: text/plain, Size: 186 bytes --]

_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH][UPDATE] Shortcut MSR save/restore for lightweight VM Exit
       [not found]             ` <10EA09EFD8728347A513008B6B0DA77A0165CEA9-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-05-08 11:15               ` Avi Kivity
       [not found]                 ` <46405BC6.1030001-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 8+ messages in thread
From: Avi Kivity @ 2007-05-08 11:15 UTC (permalink / raw)
  To: Dong, Eddie; +Cc: kvm-devel

Dong, Eddie wrote:
> kvm-devel-bounces-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org wrote:
>   
>> BTW, I have another patch in hand to further reduce MSR
>> save/restore and
>> thus
>> improve performance more for lightweight VM Exit. Base on my
>> observation for FC5 32 bits
>> guest, 93% VM Exit will fall into the lightweight path.
>>
>>     
> This patch reduce the VM Exit handling cost continuously
> for those lightweight VM Exit which occupies 93% of VM Exit in
> KB case if 64 bits OS has similar situation with 32 bits. In my old 
> machine, I saw 20% performance increasement of KB within 64 bits 
> RHEL5 guest and flat for 32bits FC5.
> 	There are still some room to improvment here, but this one
> focus on basic MSR save/restore framework only for now and leave
>  future to opitmize specific MSRs like GS_BASE etc.
> thx,eddie
>
> Signed-off-by:  Yaozu(Eddie) Dong Eddie.Dong-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org
>
> against 5cf48c367dec74ba8553c53ed332cd075fa38b88
>
>   

Much has changed.  Please rebase against HEAD.

Also, there have been a lot of regressions with the msr code.  Please 
test on i386 and on Core Duo i386 (which is a little different) in 
addition to the regular x86_64.

> commit a7294eae555b7d42f7e44b8d7955becad2feebf8
> Author: root <root@vt32-pae.(none)>
> Date:   Tue May 8 17:32:24 2007 +0800
>
>     Avoid MSR save/restore for lightweight VM Exit
>     
>     Signed-off-by:  Yaozu(Eddie) Dong Eddie.Dong-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org
>
> diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
> index 11eb25e..86abf2d 100644
> --- a/drivers/kvm/kvm.h
> +++ b/drivers/kvm/kvm.h
> @@ -285,6 +285,7 @@ struct kvm_vcpu {
>  	u64 apic_base;
>  	u64 ia32_misc_enable_msr;
>  	int nmsrs;
> +	int sw_save_msrs;
>  	struct vmx_msr_entry *guest_msrs;
>  	struct vmx_msr_entry *host_msrs;
>  
> diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
> index 4e04b85..c2d06b5 100644
> --- a/drivers/kvm/vmx.c
> +++ b/drivers/kvm/vmx.c
> @@ -80,23 +80,11 @@ static const u32 vmx_msr_index[] = {
>  #ifdef CONFIG_X86_64
>  	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
>  #endif
> -	MSR_EFER, MSR_K6_STAR,
> +	MSR_K6_STAR, MSR_EFER,
>  };
> +#define NR_HW_SAVE_MSRS	1	/* HW save MSR_EFER */
>  #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
>   

The code has a comment that MSR_K6_STAR should be last... the comment 
should be removed.

Also, does this mean that software msr saving is faster than hardware 
msr saving?  Will this be true in future processors as well?

>  
>  static inline int is_page_fault(u32 intr_info)
>  {
>  	return (intr_info & (INTR_INFO_INTR_TYPE_MASK |
> INTR_INFO_VECTOR_MASK |
> @@ -339,23 +327,19 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu,
> unsigned error_code)
>   */
>  static void setup_msrs(struct kvm_vcpu *vcpu)
>  {
> -	int nr_skip, nr_good_msrs;
> +	int nr_skip;
>  
> -	if (is_long_mode(vcpu))
> -		nr_skip = NR_BAD_MSRS;
> -	else
> -		nr_skip = NR_64BIT_MSRS;
> -	nr_good_msrs = vcpu->nmsrs - nr_skip;
> +	vcpu->sw_save_msrs = nr_skip = vcpu->nmsrs - NR_HW_SAVE_MSRS;
>   

One assignment per statement please.

 


-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH][UPDATE] Shortcut MSR save/restore for lightweight VM Exit
       [not found]                 ` <46405BC6.1030001-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-05-08 11:51                   ` Dong, Eddie
  0 siblings, 0 replies; 8+ messages in thread
From: Dong, Eddie @ 2007-05-08 11:51 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm-devel

Avi Kivity wrote:
> Dong, Eddie wrote:
>> This patch reduce the VM Exit handling cost continuously
>> for those lightweight VM Exit which occupies 93% of VM Exit in
>> KB case if 64 bits OS has similar situation with 32 bits. In my old
>> machine, I saw 20% performance increasement of KB within 64 bits
>> RHEL5 guest and flat for 32bits FC5.
>> 	There are still some room to improvment here, but this one
>> focus on basic MSR save/restore framework only for now and leave
>>  future to opitmize specific MSRs like GS_BASE etc.
>> thx,eddie
>> 
>> Signed-off-by:  Yaozu(Eddie) Dong Eddie.Dong-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org
>> 
>> against 5cf48c367dec74ba8553c53ed332cd075fa38b88
>> 
>> 
> 
> Much has changed.  Please rebase against HEAD.
> 
> Also, there have been a lot of regressions with the msr code.  Please

The previous MSR optmization patch only focus on heavyweight VM exit
path, with 5cf48c367dec74ba8553c53ed332cd075fa38b88, it doesn't cover
the major path. 
This patch remove save/restore for most HW MSRs, which imply to both
heavy weight
and light weight VM Exit path, to heavyweight VM Exit only SW path. 

> test on i386 and on Core Duo i386 (which is a little different) in
> addition to the regular x86_64.

I have tested this on I386, it is exactly same since software save MSRs 
are 0 and HW saved is 1, which is same with before. I will test on
Conroe/Woodcrest to see the exactly performance gain with KB.

thx,eddie

-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2007-05-08 11:51 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-04-30  7:01 [PATCH][UPDATE] shortcut for lightweight VM Exit Dong, Eddie
     [not found] ` <10EA09EFD8728347A513008B6B0DA77A015EDDA8-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-04-30  7:49   ` Avi Kivity
     [not found]     ` <46359F90.9010306-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-04-30 11:23       ` Dong, Eddie
     [not found]         ` <10EA09EFD8728347A513008B6B0DA77A015EDE49-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-04-30 13:10           ` Avi Kivity
     [not found]             ` <4635EAD7.1030405-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-05-08  5:08               ` Dong, Eddie
2007-05-08  9:59           ` [PATCH][UPDATE] Shortcut MSR save/restore for lightweight VM Exit (was: RE: shortcut for lightweight VM Exit) Dong, Eddie
     [not found]             ` <10EA09EFD8728347A513008B6B0DA77A0165CEA9-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-05-08 11:15               ` [PATCH][UPDATE] Shortcut MSR save/restore for lightweight VM Exit Avi Kivity
     [not found]                 ` <46405BC6.1030001-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-05-08 11:51                   ` Dong, Eddie

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox