kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2)
@ 2008-10-08  4:17 Sheng Yang
  2008-10-08  4:17 ` [PATCH 1/7] KVM: VMX: Allocate MSR Bitmap for each vcpu Sheng Yang
                   ` (7 more replies)
  0 siblings, 8 replies; 13+ messages in thread
From: Sheng Yang @ 2008-10-08  4:17 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm

Hi, Avi

Here is the update patchset of enable MTRR/PAT support for KVM, and the last
patch enabled EPT support. The shadow support would follow later, I am still
doing some clean up work.

The change from v1 including bug fix and rename of GUEST_PAT support according
to the newly published spec. For MTRR, I reuse host mtrr struct rather than
duplicate the part of work. This is mostly for the preparation of shadow
MTRR/PAT support.

I think last time you mentioned you have a draft patch for extra MSR
save/restore on hand? If you are busy on other things, I can take it over.

Thanks!
--
regards
Yang, Sheng

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/7] KVM: VMX: Allocate MSR Bitmap for each vcpu
  2008-10-08  4:17 [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Sheng Yang
@ 2008-10-08  4:17 ` Sheng Yang
  2008-10-08 10:14   ` Avi Kivity
  2008-10-08  4:17 ` [PATCH 2/7] KVM: VMX: Add PAT support for EPT Sheng Yang
                   ` (6 subsequent siblings)
  7 siblings, 1 reply; 13+ messages in thread
From: Sheng Yang @ 2008-10-08  4:17 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Sheng Yang

Separate msr_bitmap for each vcpu, prepared for guest PAT support.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/kvm/vmx.c |   53 +++++++++++++++++++++++++++++----------------------
 1 files changed, 30 insertions(+), 23 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4556cc3..8c44e37 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -90,6 +90,7 @@ struct vcpu_vmx {
 	} rmode;
 	int vpid;
 	bool emulation_required;
+	struct page *msr_bitmap;
 };
 
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -106,7 +107,6 @@ static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
 
 static struct page *vmx_io_bitmap_a;
 static struct page *vmx_io_bitmap_b;
-static struct page *vmx_msr_bitmap;
 
 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
 static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -2082,6 +2082,25 @@ static void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
 	kunmap(msr_bitmap);
 }
 
+static int setup_msr_bitmap(struct page *msr_bitmap)
+{
+	void *va;
+
+	va = kmap(msr_bitmap);
+	if (!va)
+		return -EINVAL;
+	memset(va, 0xff, PAGE_SIZE);
+	kunmap(msr_bitmap);
+
+	vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE);
+	vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE);
+	vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS);
+	vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP);
+	vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP);
+
+	return 0;
+}
+
 /*
  * Sets up the vmcs for emulated real mode.
  */
@@ -2099,8 +2118,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
 	vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
 
-	if (cpu_has_vmx_msr_bitmap())
-		vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap));
+	if (cpu_has_vmx_msr_bitmap()) {
+		setup_msr_bitmap(vmx->msr_bitmap);
+		vmcs_write64(MSR_BITMAP, page_to_phys(vmx->msr_bitmap));
+	}
 
 	vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
 
@@ -3368,6 +3389,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 	vmx_free_vmcs(vcpu);
 	kfree(vmx->host_msrs);
 	kfree(vmx->guest_msrs);
+	__free_page(vmx->msr_bitmap);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, vmx);
 }
@@ -3403,6 +3425,10 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 
 	vmcs_clear(vmx->vmcs);
 
+	vmx->msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+	if (!vmx->msr_bitmap)
+		goto free_vmcs;
+
 	cpu = get_cpu();
 	vmx_vcpu_load(&vmx->vcpu, cpu);
 	err = vmx_vcpu_setup(vmx);
@@ -3524,12 +3550,6 @@ static int __init vmx_init(void)
 		goto out;
 	}
 
-	vmx_msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
-	if (!vmx_msr_bitmap) {
-		r = -ENOMEM;
-		goto out1;
-	}
-
 	/*
 	 * Allow direct access to the PC debug port (it is often used for I/O
 	 * delays, but the vmexits simply slow things down).
@@ -3543,21 +3563,11 @@ static int __init vmx_init(void)
 	memset(va, 0xff, PAGE_SIZE);
 	kunmap(vmx_io_bitmap_b);
 
-	va = kmap(vmx_msr_bitmap);
-	memset(va, 0xff, PAGE_SIZE);
-	kunmap(vmx_msr_bitmap);
-
 	set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
 
 	r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
 	if (r)
-		goto out2;
-
-	vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE);
-	vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE);
-	vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS);
-	vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
-	vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
+		goto out1;
 
 	if (vm_need_ept()) {
 		bypass_guest_pf = 0;
@@ -3577,8 +3587,6 @@ static int __init vmx_init(void)
 
 	return 0;
 
-out2:
-	__free_page(vmx_msr_bitmap);
 out1:
 	__free_page(vmx_io_bitmap_b);
 out:
@@ -3588,7 +3596,6 @@ out:
 
 static void __exit vmx_exit(void)
 {
-	__free_page(vmx_msr_bitmap);
 	__free_page(vmx_io_bitmap_b);
 	__free_page(vmx_io_bitmap_a);
 
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 2/7] KVM: VMX: Add PAT support for EPT
  2008-10-08  4:17 [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Sheng Yang
  2008-10-08  4:17 ` [PATCH 1/7] KVM: VMX: Allocate MSR Bitmap for each vcpu Sheng Yang
@ 2008-10-08  4:17 ` Sheng Yang
  2008-10-08  4:17 ` [PATCH 3/7] x86: Rename mtrr_state struct and macro names Sheng Yang
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Sheng Yang @ 2008-10-08  4:17 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Sheng Yang

GUEST_PAT support is a new feature introduced by Intel Core i7 architecture.
With this, cpu would save/load guest and host PAT propably without intercept
PAT MSR access.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/kvm/vmx.c |   22 +++++++++++++++++++---
 arch/x86/kvm/vmx.h |    7 +++++++
 2 files changed, 26 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8c44e37..d7207e7 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1176,12 +1176,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
 #ifdef CONFIG_X86_64
 	min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
 #endif
-	opt = 0;
+	opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
 	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
 				&_vmexit_control) < 0)
 		return -EIO;
 
-	min = opt = 0;
+	min = 0;
+	opt = VM_ENTRY_LOAD_IA32_PAT;
 	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
 				&_vmentry_control) < 0)
 		return -EIO;
@@ -2106,8 +2107,9 @@ static int setup_msr_bitmap(struct page *msr_bitmap)
  */
 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 {
-	u32 host_sysenter_cs;
+	u32 host_sysenter_cs, msr_low, msr_high;
 	u32 junk;
+	u64 host_pat;
 	unsigned long a;
 	struct descriptor_table dt;
 	int i;
@@ -2196,6 +2198,20 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	rdmsrl(MSR_IA32_SYSENTER_EIP, a);
 	vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
 
+	if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
+		rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
+		host_pat = msr_low | ((u64) msr_high << 32);
+		vmcs_write64(HOST_IA32_PAT, host_pat);
+		vmx_disable_intercept_for_msr(vmx->msr_bitmap,
+					      MSR_IA32_CR_PAT);
+	}
+	if (vmcs_config.vmexit_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
+		rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
+		host_pat = msr_low | ((u64) msr_high << 32);
+		/* Write the default value follow host pat */
+		vmcs_write64(GUEST_IA32_PAT, host_pat);
+	}
+
 	for (i = 0; i < NR_VMX_MSR; ++i) {
 		u32 index = vmx_msr_index[i];
 		u32 data_low, data_high;
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index 3e010d2..3ad61dc 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -63,10 +63,13 @@
 
 #define VM_EXIT_HOST_ADDR_SPACE_SIZE            0x00000200
 #define VM_EXIT_ACK_INTR_ON_EXIT                0x00008000
+#define VM_EXIT_SAVE_IA32_PAT			0x00040000
+#define VM_EXIT_LOAD_IA32_PAT			0x00080000
 
 #define VM_ENTRY_IA32E_MODE                     0x00000200
 #define VM_ENTRY_SMM                            0x00000400
 #define VM_ENTRY_DEACT_DUAL_MONITOR             0x00000800
+#define VM_ENTRY_LOAD_IA32_PAT			0x00004000
 
 /* VMCS Encodings */
 enum vmcs_field {
@@ -112,6 +115,8 @@ enum vmcs_field {
 	VMCS_LINK_POINTER_HIGH          = 0x00002801,
 	GUEST_IA32_DEBUGCTL             = 0x00002802,
 	GUEST_IA32_DEBUGCTL_HIGH        = 0x00002803,
+	GUEST_IA32_PAT			= 0x00002804,
+	GUEST_IA32_PAT_HIGH		= 0x00002805,
 	GUEST_PDPTR0                    = 0x0000280a,
 	GUEST_PDPTR0_HIGH               = 0x0000280b,
 	GUEST_PDPTR1                    = 0x0000280c,
@@ -120,6 +125,8 @@ enum vmcs_field {
 	GUEST_PDPTR2_HIGH               = 0x0000280f,
 	GUEST_PDPTR3                    = 0x00002810,
 	GUEST_PDPTR3_HIGH               = 0x00002811,
+	HOST_IA32_PAT			= 0x00002c00,
+	HOST_IA32_PAT_HIGH		= 0x00002c01,
 	PIN_BASED_VM_EXEC_CONTROL       = 0x00004000,
 	CPU_BASED_VM_EXEC_CONTROL       = 0x00004002,
 	EXCEPTION_BITMAP                = 0x00004004,
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 3/7] x86: Rename mtrr_state struct and macro names
  2008-10-08  4:17 [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Sheng Yang
  2008-10-08  4:17 ` [PATCH 1/7] KVM: VMX: Allocate MSR Bitmap for each vcpu Sheng Yang
  2008-10-08  4:17 ` [PATCH 2/7] KVM: VMX: Add PAT support for EPT Sheng Yang
@ 2008-10-08  4:17 ` Sheng Yang
  2008-10-08  4:17 ` [PATCH 4/7] Export some definition of MTRR Sheng Yang
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Sheng Yang @ 2008-10-08  4:17 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Sheng Yang

Prepare for exporting them.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/kernel/cpu/mtrr/generic.c |    8 ++++----
 arch/x86/kernel/cpu/mtrr/main.c    |    4 ++--
 arch/x86/kernel/cpu/mtrr/mtrr.h    |    7 ++++---
 3 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index cb7d3b6..b9574a6 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -14,9 +14,9 @@
 #include <asm/pat.h>
 #include "mtrr.h"
 
-struct mtrr_state {
-	struct mtrr_var_range var_ranges[MAX_VAR_RANGES];
-	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
+struct mtrr_state_type {
+	struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
+	mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
 	unsigned char enabled;
 	unsigned char have_fixed;
 	mtrr_type def_type;
@@ -35,7 +35,7 @@ static struct fixed_range_block fixed_range_blocks[] = {
 };
 
 static unsigned long smp_changes_mask;
-static struct mtrr_state mtrr_state = {};
+static struct mtrr_state_type mtrr_state = {};
 static int mtrr_state_set;
 u64 mtrr_tom2;
 
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index b117d7f..b796642 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -49,7 +49,7 @@
 
 u32 num_var_ranges = 0;
 
-unsigned int mtrr_usage_table[MAX_VAR_RANGES];
+unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
 static DEFINE_MUTEX(mtrr_mutex);
 
 u64 size_or_mask, size_and_mask;
@@ -574,7 +574,7 @@ struct mtrr_value {
 	unsigned long	lsize;
 };
 
-static struct mtrr_value mtrr_state[MAX_VAR_RANGES];
+static struct mtrr_value mtrr_state[MTRR_MAX_VAR_RANGES];
 
 static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
 {
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 2dc4ec6..9885382 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -11,8 +11,9 @@
 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
 
-#define NUM_FIXED_RANGES 88
-#define MAX_VAR_RANGES 256
+#define MTRR_NUM_FIXED_RANGES 88
+#define MTRR_MAX_VAR_RANGES 256
+
 #define MTRRfix64K_00000_MSR 0x250
 #define MTRRfix16K_80000_MSR 0x258
 #define MTRRfix16K_A0000_MSR 0x259
@@ -33,7 +34,7 @@
    an 8 bit field: */
 typedef u8 mtrr_type;
 
-extern unsigned int mtrr_usage_table[MAX_VAR_RANGES];
+extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
 
 struct mtrr_ops {
 	u32	vendor;
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 4/7] Export some definition of MTRR
  2008-10-08  4:17 [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Sheng Yang
                   ` (2 preceding siblings ...)
  2008-10-08  4:17 ` [PATCH 3/7] x86: Rename mtrr_state struct and macro names Sheng Yang
@ 2008-10-08  4:17 ` Sheng Yang
  2008-10-08  4:17 ` [PATCH 5/7] KVM: Improve MTRR structure Sheng Yang
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Sheng Yang @ 2008-10-08  4:17 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Sheng Yang

For KVM can reuse the type define, and need them to support shadow MTRR.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/kernel/cpu/mtrr/generic.c |   12 +++---------
 arch/x86/kernel/cpu/mtrr/mtrr.h    |   17 -----------------
 include/asm-x86/mtrr.h             |   25 +++++++++++++++++++++++++
 3 files changed, 28 insertions(+), 26 deletions(-)

diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index b9574a6..aa414ab 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -14,14 +14,6 @@
 #include <asm/pat.h>
 #include "mtrr.h"
 
-struct mtrr_state_type {
-	struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
-	mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
-	unsigned char enabled;
-	unsigned char have_fixed;
-	mtrr_type def_type;
-};
-
 struct fixed_range_block {
 	int base_msr; /* start address of an MTRR block */
 	int ranges;   /* number of MTRRs in this block  */
@@ -35,10 +27,12 @@ static struct fixed_range_block fixed_range_blocks[] = {
 };
 
 static unsigned long smp_changes_mask;
-static struct mtrr_state_type mtrr_state = {};
 static int mtrr_state_set;
 u64 mtrr_tom2;
 
+struct mtrr_state_type mtrr_state = {};
+EXPORT_SYMBOL_GPL(mtrr_state);
+
 #undef MODULE_PARAM_PREFIX
 #define MODULE_PARAM_PREFIX "mtrr."
 
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 9885382..ffd6040 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -8,12 +8,6 @@
 #define MTRRcap_MSR     0x0fe
 #define MTRRdefType_MSR 0x2ff
 
-#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
-#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
-
-#define MTRR_NUM_FIXED_RANGES 88
-#define MTRR_MAX_VAR_RANGES 256
-
 #define MTRRfix64K_00000_MSR 0x250
 #define MTRRfix16K_80000_MSR 0x258
 #define MTRRfix16K_A0000_MSR 0x259
@@ -30,10 +24,6 @@
 #define MTRR_CHANGE_MASK_VARIABLE  0x02
 #define MTRR_CHANGE_MASK_DEFTYPE   0x04
 
-/* In the Intel processor's MTRR interface, the MTRR type is always held in
-   an 8 bit field: */
-typedef u8 mtrr_type;
-
 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
 
 struct mtrr_ops {
@@ -71,13 +61,6 @@ struct set_mtrr_context {
 	u32 ccr3;
 };
 
-struct mtrr_var_range {
-	u32 base_lo;
-	u32 base_hi;
-	u32 mask_lo;
-	u32 mask_hi;
-};
-
 void set_mtrr_done(struct set_mtrr_context *ctxt);
 void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
 void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h
index a69a01a..2c8657b 100644
--- a/include/asm-x86/mtrr.h
+++ b/include/asm-x86/mtrr.h
@@ -57,6 +57,31 @@ struct mtrr_gentry {
 };
 #endif /* !__i386__ */
 
+struct mtrr_var_range {
+	u32 base_lo;
+	u32 base_hi;
+	u32 mask_lo;
+	u32 mask_hi;
+};
+
+/* In the Intel processor's MTRR interface, the MTRR type is always held in
+   an 8 bit field: */
+typedef u8 mtrr_type;
+
+#define MTRR_NUM_FIXED_RANGES 88
+#define MTRR_MAX_VAR_RANGES 256
+
+struct mtrr_state_type {
+	struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
+	mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
+	unsigned char enabled;
+	unsigned char have_fixed;
+	mtrr_type def_type;
+};
+
+#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
+#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
+
 /*  These are the various ioctls  */
 #define MTRRIOC_ADD_ENTRY        _IOW(MTRR_IOCTL_BASE,  0, struct mtrr_sentry)
 #define MTRRIOC_SET_ENTRY        _IOW(MTRR_IOCTL_BASE,  1, struct mtrr_sentry)
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 5/7] KVM: Improve MTRR structure
  2008-10-08  4:17 [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Sheng Yang
                   ` (3 preceding siblings ...)
  2008-10-08  4:17 ` [PATCH 4/7] Export some definition of MTRR Sheng Yang
@ 2008-10-08  4:17 ` Sheng Yang
  2008-10-08  4:17 ` [PATCH 6/7] Add local get_mtrr_type() to support MTRR Sheng Yang
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Sheng Yang @ 2008-10-08  4:17 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Sheng Yang

As well as reset mmu context when set MTRR.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/kvm/x86.c         |   61 ++++++++++++++++++++++++++++++++++++++++++-
 include/asm-x86/kvm_host.h |    5 +++-
 2 files changed, 63 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4cfdd1b..170e9bf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -40,6 +40,7 @@
 #include <asm/uaccess.h>
 #include <asm/msr.h>
 #include <asm/desc.h>
+#include <asm/mtrr.h>
 
 #define MAX_IO_MSRS 256
 #define CR0_RESERVED_BITS						\
@@ -881,10 +882,38 @@ static bool msr_mtrr_valid(unsigned msr)
 
 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
+	u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
+
 	if (!msr_mtrr_valid(msr))
 		return 1;
 
-	vcpu->arch.mtrr[msr - 0x200] = data;
+	if (msr == MSR_MTRRdefType) {
+		vcpu->arch.mtrr_state.def_type = data;
+		vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
+	} else if (msr == MSR_MTRRfix64K_00000)
+		p[0] = data;
+	else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
+		p[1 + msr - MSR_MTRRfix16K_80000] = data;
+	else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
+		p[3 + msr - MSR_MTRRfix4K_C0000] = data;
+	else if (msr == MSR_IA32_CR_PAT)
+		vcpu->arch.pat = data;
+	else {	/* Variable MTRRs */
+		int idx, is_mtrr_mask;
+		u64 *pt;
+
+		idx = (msr - 0x200) / 2;
+		is_mtrr_mask = msr - 0x200 - 2 * idx;
+		if (!is_mtrr_mask)
+			pt =
+			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
+		else
+			pt =
+			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
+		*pt = data;
+	}
+
+	kvm_mmu_reset_context(vcpu);
 	return 0;
 }
 
@@ -980,10 +1009,37 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
 
 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 {
+	u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
+
 	if (!msr_mtrr_valid(msr))
 		return 1;
 
-	*pdata = vcpu->arch.mtrr[msr - 0x200];
+	if (msr == MSR_MTRRdefType)
+		*pdata = vcpu->arch.mtrr_state.def_type +
+			 (vcpu->arch.mtrr_state.enabled << 10);
+	else if (msr == MSR_MTRRfix64K_00000)
+		*pdata = p[0];
+	else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
+		*pdata = p[1 + msr - MSR_MTRRfix16K_80000];
+	else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
+		*pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
+	else if (msr == MSR_IA32_CR_PAT)
+		*pdata = vcpu->arch.pat;
+	else {	/* Variable MTRRs */
+		int idx, is_mtrr_mask;
+		u64 *pt;
+
+		idx = (msr - 0x200) / 2;
+		is_mtrr_mask = msr - 0x200 - 2 * idx;
+		if (!is_mtrr_mask)
+			pt =
+			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
+		else
+			pt =
+			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
+		*pdata = *pt;
+	}
+
 	return 0;
 }
 
@@ -4154,6 +4210,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 	/* We do fxsave: this must be aligned. */
 	BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
 
+	vcpu->arch.mtrr_state.have_fixed = 1;
 	vcpu_load(vcpu);
 	r = kvm_arch_vcpu_reset(vcpu);
 	if (r == 0)
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 1b114ca..f863fd9 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -21,6 +21,7 @@
 
 #include <asm/pvclock-abi.h>
 #include <asm/desc.h>
+#include <asm/mtrr.h>
 
 #define KVM_MAX_VCPUS 16
 #define KVM_MEMORY_SLOTS 32
@@ -86,6 +87,7 @@
 #define KVM_MIN_FREE_MMU_PAGES 5
 #define KVM_REFILL_PAGES 25
 #define KVM_MAX_CPUID_ENTRIES 40
+#define KVM_NR_FIXED_MTRR_REGION 88
 #define KVM_NR_VAR_MTRR 8
 
 extern spinlock_t kvm_lock;
@@ -328,7 +330,8 @@ struct kvm_vcpu_arch {
 	bool nmi_pending;
 	bool nmi_injected;
 
-	u64 mtrr[0x100];
+	struct mtrr_state_type mtrr_state;
+	u32 pat;
 };
 
 struct kvm_mem_alias {
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 6/7] Add local get_mtrr_type() to support MTRR
  2008-10-08  4:17 [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Sheng Yang
                   ` (4 preceding siblings ...)
  2008-10-08  4:17 ` [PATCH 5/7] KVM: Improve MTRR structure Sheng Yang
@ 2008-10-08  4:17 ` Sheng Yang
  2008-10-08  4:17 ` [PATCH 7/7] Enable MTRR for EPT Sheng Yang
  2008-10-08 10:23 ` [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Avi Kivity
  7 siblings, 0 replies; 13+ messages in thread
From: Sheng Yang @ 2008-10-08  4:17 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Sheng Yang

For EPT memory type support.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/kvm/mmu.c |  104 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 104 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9ff8d90..1124723 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1393,6 +1393,110 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
 	return page;
 }
 
+/*
+ * The function is based on mtrr_type_lookup() in
+ * arch/x86/kernel/cpu/mtrr/generic.c
+ */
+static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
+			 u64 start, u64 end)
+{
+	int i;
+	u64 base, mask;
+	u8 prev_match, curr_match;
+	int num_var_ranges = KVM_NR_VAR_MTRR;
+
+	if (!mtrr_state->enabled)
+		return 0xFF;
+
+	/* Make end inclusive end, instead of exclusive */
+	end--;
+
+	/* Look in fixed ranges. Just return the type as per start */
+	if (mtrr_state->have_fixed && (start < 0x100000)) {
+		int idx;
+
+		if (start < 0x80000) {
+			idx = 0;
+			idx += (start >> 16);
+			return mtrr_state->fixed_ranges[idx];
+		} else if (start < 0xC0000) {
+			idx = 1 * 8;
+			idx += ((start - 0x80000) >> 14);
+			return mtrr_state->fixed_ranges[idx];
+		} else if (start < 0x1000000) {
+			idx = 3 * 8;
+			idx += ((start - 0xC0000) >> 12);
+			return mtrr_state->fixed_ranges[idx];
+		}
+	}
+
+	/*
+	 * Look in variable ranges
+	 * Look of multiple ranges matching this address and pick type
+	 * as per MTRR precedence
+	 */
+	if (!(mtrr_state->enabled & 2))
+		return mtrr_state->def_type;
+
+	prev_match = 0xFF;
+	for (i = 0; i < num_var_ranges; ++i) {
+		unsigned short start_state, end_state;
+
+		if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
+			continue;
+
+		base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
+		       (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
+		mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
+		       (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
+
+		start_state = ((start & mask) == (base & mask));
+		end_state = ((end & mask) == (base & mask));
+		if (start_state != end_state)
+			return 0xFE;
+
+		if ((start & mask) != (base & mask))
+			continue;
+
+		curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
+		if (prev_match == 0xFF) {
+			prev_match = curr_match;
+			continue;
+		}
+
+		if (prev_match == MTRR_TYPE_UNCACHABLE ||
+		    curr_match == MTRR_TYPE_UNCACHABLE)
+			return MTRR_TYPE_UNCACHABLE;
+
+		if ((prev_match == MTRR_TYPE_WRBACK &&
+		     curr_match == MTRR_TYPE_WRTHROUGH) ||
+		    (prev_match == MTRR_TYPE_WRTHROUGH &&
+		     curr_match == MTRR_TYPE_WRBACK)) {
+			prev_match = MTRR_TYPE_WRTHROUGH;
+			curr_match = MTRR_TYPE_WRTHROUGH;
+		}
+
+		if (prev_match != curr_match)
+			return MTRR_TYPE_UNCACHABLE;
+	}
+
+	if (prev_match != 0xFF)
+		return prev_match;
+
+	return mtrr_state->def_type;
+}
+
+static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	u8 mtrr;
+
+	mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
+			     (gfn << PAGE_SHIFT) + PAGE_SIZE);
+	if (mtrr == 0xfe || mtrr == 0xff)
+		mtrr = MTRR_TYPE_WRBACK;
+	return mtrr;
+}
+
 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
 	unsigned index;
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 7/7] Enable MTRR for EPT
  2008-10-08  4:17 [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Sheng Yang
                   ` (5 preceding siblings ...)
  2008-10-08  4:17 ` [PATCH 6/7] Add local get_mtrr_type() to support MTRR Sheng Yang
@ 2008-10-08  4:17 ` Sheng Yang
  2008-10-08 10:23 ` [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Avi Kivity
  7 siblings, 0 replies; 13+ messages in thread
From: Sheng Yang @ 2008-10-08  4:17 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Sheng Yang


Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/kvm/mmu.c         |   11 ++++++++++-
 arch/x86/kvm/svm.c         |    6 ++++++
 arch/x86/kvm/vmx.c         |   12 +++++++++---
 arch/x86/kvm/x86.c         |    2 +-
 include/asm-x86/kvm_host.h |    3 ++-
 5 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1124723..88d0f5e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -168,6 +168,7 @@ static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
 static u64 __read_mostly shadow_user_mask;
 static u64 __read_mostly shadow_accessed_mask;
 static u64 __read_mostly shadow_dirty_mask;
+static u64 __read_mostly shadow_mt_mask;
 
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
 {
@@ -183,13 +184,14 @@ void kvm_mmu_set_base_ptes(u64 base_pte)
 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
 
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
-		u64 dirty_mask, u64 nx_mask, u64 x_mask)
+		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask)
 {
 	shadow_user_mask = user_mask;
 	shadow_accessed_mask = accessed_mask;
 	shadow_dirty_mask = dirty_mask;
 	shadow_nx_mask = nx_mask;
 	shadow_x_mask = x_mask;
+	shadow_mt_mask = mt_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
@@ -1546,6 +1548,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 {
 	u64 spte;
 	int ret = 0;
+	u64 mt_mask = shadow_mt_mask;
+
 	/*
 	 * We don't set the accessed bit, since we sometimes want to see
 	 * whether the guest actually used the pte (in order to detect
@@ -1564,6 +1568,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 		spte |= shadow_user_mask;
 	if (largepage)
 		spte |= PT_PAGE_SIZE_MASK;
+	if (mt_mask) {
+		mt_mask = get_memory_type(vcpu, gfn) <<
+			  kvm_x86_ops->get_mt_mask_shift();
+		spte |= mt_mask;
+	}
 
 	spte |= (u64)pfn << PAGE_SHIFT;
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9c4ce65..05efc4e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1912,6 +1912,11 @@ static int get_npt_level(void)
 #endif
 }
 
+static int svm_get_mt_mask_shift(void)
+{
+	return 0;
+}
+
 static struct kvm_x86_ops svm_x86_ops = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -1967,6 +1972,7 @@ static struct kvm_x86_ops svm_x86_ops = {
 
 	.set_tss_addr = svm_set_tss_addr,
 	.get_tdp_level = get_npt_level,
+	.get_mt_mask_shift = svm_get_mt_mask_shift,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d7207e7..09eb9ee 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3494,6 +3494,11 @@ static int get_ept_level(void)
 	return VMX_EPT_DEFAULT_GAW + 1;
 }
 
+static int vmx_get_mt_mask_shift(void)
+{
+	return VMX_EPT_MT_EPTE_SHIFT;
+}
+
 static struct kvm_x86_ops vmx_x86_ops = {
 	.cpu_has_kvm_support = cpu_has_kvm_support,
 	.disabled_by_bios = vmx_disabled_by_bios,
@@ -3549,6 +3554,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
 	.set_tss_addr = vmx_set_tss_addr,
 	.get_tdp_level = get_ept_level,
+	.get_mt_mask_shift = vmx_get_mt_mask_shift,
 };
 
 static int __init vmx_init(void)
@@ -3588,10 +3594,10 @@ static int __init vmx_init(void)
 	if (vm_need_ept()) {
 		bypass_guest_pf = 0;
 		kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
-			VMX_EPT_WRITABLE_MASK |
-			VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
+			VMX_EPT_WRITABLE_MASK);
 		kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
-				VMX_EPT_EXECUTABLE_MASK);
+				VMX_EPT_EXECUTABLE_MASK,
+				VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
 		kvm_enable_tdp();
 	} else
 		kvm_disable_tdp();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 170e9bf..e2e140c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2851,7 +2851,7 @@ int kvm_arch_init(void *opaque)
 	kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
 	kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
 	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
-			PT_DIRTY_MASK, PT64_NX_MASK, 0);
+			PT_DIRTY_MASK, PT64_NX_MASK, 0, 0);
 	return 0;
 
 out:
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index f863fd9..eba0054 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -477,6 +477,7 @@ struct kvm_x86_ops {
 
 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
 	int (*get_tdp_level)(void);
+	int (*get_mt_mask_shift)(void);
 };
 
 extern struct kvm_x86_ops *kvm_x86_ops;
@@ -490,7 +491,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu);
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
 void kvm_mmu_set_base_ptes(u64 base_pte);
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
-		u64 dirty_mask, u64 nx_mask, u64 x_mask);
+		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask);
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/7] KVM: VMX: Allocate MSR Bitmap for each vcpu
  2008-10-08  4:17 ` [PATCH 1/7] KVM: VMX: Allocate MSR Bitmap for each vcpu Sheng Yang
@ 2008-10-08 10:14   ` Avi Kivity
  2008-10-08 10:41     ` Sheng Yang
  0 siblings, 1 reply; 13+ messages in thread
From: Avi Kivity @ 2008-10-08 10:14 UTC (permalink / raw)
  To: Sheng Yang; +Cc: kvm

Sheng Yang wrote:
> Separate msr_bitmap for each vcpu, prepared for guest PAT support.
>   

Why is this necessary?  True, it reduces the overhead of the guest 
reading and writing the PAT MSRs, but is such access frequent?

I would think guests set the PAT once, and never change it later.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2)
  2008-10-08  4:17 [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Sheng Yang
                   ` (6 preceding siblings ...)
  2008-10-08  4:17 ` [PATCH 7/7] Enable MTRR for EPT Sheng Yang
@ 2008-10-08 10:23 ` Avi Kivity
  7 siblings, 0 replies; 13+ messages in thread
From: Avi Kivity @ 2008-10-08 10:23 UTC (permalink / raw)
  To: Sheng Yang; +Cc: kvm

Sheng Yang wrote:
> Hi, Avi
>
> Here is the update patchset of enable MTRR/PAT support for KVM, and the last
> patch enabled EPT support. The shadow support would follow later, I am still
> doing some clean up work.
>
> The change from v1 including bug fix and rename of GUEST_PAT support according
> to the newly published spec. For MTRR, I reuse host mtrr struct rather than
> duplicate the part of work. This is mostly for the preparation of shadow
> MTRR/PAT support.
>
> I think last time you mentioned you have a draft patch for extra MSR
> save/restore on hand? If you are busy on other things, I can take it over.
>   

Looks good; we just need to clear up the per-vcpu msr bitmap issue.

Regarding msr save/restore, please look at msrs_to_save[] in x86.c.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/7] KVM: VMX: Allocate MSR Bitmap for each vcpu
  2008-10-08 10:14   ` Avi Kivity
@ 2008-10-08 10:41     ` Sheng Yang
  2008-10-08 11:10       ` Avi Kivity
  0 siblings, 1 reply; 13+ messages in thread
From: Sheng Yang @ 2008-10-08 10:41 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm

On Wednesday 08 October 2008 18:14:22 Avi Kivity wrote:
> Sheng Yang wrote:
> > Separate msr_bitmap for each vcpu, prepared for guest PAT support.
>
> Why is this necessary?  True, it reduces the overhead of the guest
> reading and writing the PAT MSRs, but is such access frequent?
>
> I would think guests set the PAT once, and never change it later.

Yeah. In fact, I just think msr_bitmap for each vcpu would be done sooner or 
later, so get it done here. And it's natural to go with GUEST_PAT. And Xen 
use it for another purpose(DEBUGCTLMSR related, I haven't checked it through)
(Also svm.c in KVM use per-vcpu msr bitmap)

And without that, a callback should be implement to hook MSR write and update 
guest pat write for both vmx and svm, or we should update GUEST PAT every 
vmentry according to the vcpu->pat. Either way seems not that natural with 
GUEST_PAT support. 

However, if you think msr_bitmap for each vcpu is a waste, I'd like to add 
callbacks.

--
regards
Yang, Sheng

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/7] KVM: VMX: Allocate MSR Bitmap for each vcpu
  2008-10-08 10:41     ` Sheng Yang
@ 2008-10-08 11:10       ` Avi Kivity
  2008-10-08 11:21         ` Sheng Yang
  0 siblings, 1 reply; 13+ messages in thread
From: Avi Kivity @ 2008-10-08 11:10 UTC (permalink / raw)
  To: Sheng Yang; +Cc: kvm

Sheng Yang wrote:
> On Wednesday 08 October 2008 18:14:22 Avi Kivity wrote:
>   
>> Sheng Yang wrote:
>>     
>>> Separate msr_bitmap for each vcpu, prepared for guest PAT support.
>>>       
>> Why is this necessary?  True, it reduces the overhead of the guest
>> reading and writing the PAT MSRs, but is such access frequent?
>>
>> I would think guests set the PAT once, and never change it later.
>>     
>
> Yeah. In fact, I just think msr_bitmap for each vcpu would be done sooner or 
> later, so get it done here. And it's natural to go with GUEST_PAT. And Xen 
> use it for another purpose(DEBUGCTLMSR related, I haven't checked it through)
> (Also svm.c in KVM use per-vcpu msr bitmap)
>   

svm.c uses it for last branch record; I think these are also rarely 
accessed from the guest.

> And without that, a callback should be implement to hook MSR write and update 
> guest pat write for both vmx and svm, or we should update GUEST PAT every 
> vmentry according to the vcpu->pat. Either way seems not that natural with 
> GUEST_PAT support. 
>   

We need the callbacks (vmx_set_msr and vmx_get_msr, or did you mean 
something else?) anyway for save/restore support.

> However, if you think msr_bitmap for each vcpu is a waste, I'd like to add 
> callbacks.
>   

I agree that we will likely need msr bitmap support one day; but let's 
start without it as this way we test the pat msr callbacks.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/7] KVM: VMX: Allocate MSR Bitmap for each vcpu
  2008-10-08 11:10       ` Avi Kivity
@ 2008-10-08 11:21         ` Sheng Yang
  0 siblings, 0 replies; 13+ messages in thread
From: Sheng Yang @ 2008-10-08 11:21 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm

On Wednesday 08 October 2008 19:10:46 Avi Kivity wrote:
> Sheng Yang wrote:
> > On Wednesday 08 October 2008 18:14:22 Avi Kivity wrote:
> >> Sheng Yang wrote:
> >>> Separate msr_bitmap for each vcpu, prepared for guest PAT support.
> >>
> >> Why is this necessary?  True, it reduces the overhead of the guest
> >> reading and writing the PAT MSRs, but is such access frequent?
> >>
> >> I would think guests set the PAT once, and never change it later.
> >
> > Yeah. In fact, I just think msr_bitmap for each vcpu would be done sooner
> > or later, so get it done here. And it's natural to go with GUEST_PAT. And
> > Xen use it for another purpose(DEBUGCTLMSR related, I haven't checked it
> > through) (Also svm.c in KVM use per-vcpu msr bitmap)
>
> svm.c uses it for last branch record; I think these are also rarely
> accessed from the guest.
>
> > And without that, a callback should be implement to hook MSR write and
> > update guest pat write for both vmx and svm, or we should update GUEST
> > PAT every vmentry according to the vcpu->pat. Either way seems not that
> > natural with GUEST_PAT support.
>
> We need the callbacks (vmx_set_msr and vmx_get_msr, or did you mean
> something else?) anyway for save/restore support.

Nothing else(think something wrong...)
>
> > However, if you think msr_bitmap for each vcpu is a waste, I'd like to
> > add callbacks.
>
> I agree that we will likely need msr bitmap support one day; but let's
> start without it as this way we test the pat msr callbacks.

OK, I will update the patch with callbacks.

Thanks!
--
regards
Yang, Sheng

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2008-10-08 11:23 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-10-08  4:17 [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Sheng Yang
2008-10-08  4:17 ` [PATCH 1/7] KVM: VMX: Allocate MSR Bitmap for each vcpu Sheng Yang
2008-10-08 10:14   ` Avi Kivity
2008-10-08 10:41     ` Sheng Yang
2008-10-08 11:10       ` Avi Kivity
2008-10-08 11:21         ` Sheng Yang
2008-10-08  4:17 ` [PATCH 2/7] KVM: VMX: Add PAT support for EPT Sheng Yang
2008-10-08  4:17 ` [PATCH 3/7] x86: Rename mtrr_state struct and macro names Sheng Yang
2008-10-08  4:17 ` [PATCH 4/7] Export some definition of MTRR Sheng Yang
2008-10-08  4:17 ` [PATCH 5/7] KVM: Improve MTRR structure Sheng Yang
2008-10-08  4:17 ` [PATCH 6/7] Add local get_mtrr_type() to support MTRR Sheng Yang
2008-10-08  4:17 ` [PATCH 7/7] Enable MTRR for EPT Sheng Yang
2008-10-08 10:23 ` [PATCH 0/7] Enable MTRR/PAT support for KVM EPT(v2) Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).