kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] Add code to track call origin for msr assignment.
@ 2012-11-15 20:25 Will Auld
  2012-11-16 21:58 ` Marcelo Tosatti
  2012-11-19  9:02 ` Gleb Natapov
  0 siblings, 2 replies; 5+ messages in thread
From: Will Auld @ 2012-11-15 20:25 UTC (permalink / raw)
  To: gleb, mtosatti, xiantao.zhang@intel.com, jinsong.liu@intel.com,
	will.auld@intel.com, avi@redhat.com, kvm@vger.kernel.org,
	donald.d.dugger@intel.com

In order to track who initiated the call (host or guest) to modify an msr
value I have changed function call parameters along the call path. The
specific change is to add a struct pointer parameter that points to (index,
data, caller) information rather than having this information passed as
individual parameters.

The initial use for this capability is for updating the IA32_TSC_ADJUST
msr while setting the tsc value. It is anticipated that this capability
is useful other tasks.

Signed-off-by: Will Auld <will.auld@intel.com>
---
 arch/x86/include/asm/kvm_host.h | 12 +++++++++---
 arch/x86/kvm/svm.c              | 21 +++++++++++++++------
 arch/x86/kvm/vmx.c              | 24 +++++++++++++++++-------
 arch/x86/kvm/x86.c              | 23 +++++++++++++++++------
 arch/x86/kvm/x86.h              |  2 +-
 5 files changed, 59 insertions(+), 23 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09155d6..da34027 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -598,6 +598,12 @@ struct kvm_vcpu_stat {
 
 struct x86_instruction_info;
 
+struct msr_data {
+    bool host_initiated;
+    u32 index;
+    u64 data;
+};
+
 struct kvm_x86_ops {
 	int (*cpu_has_kvm_support)(void);          /* __init */
 	int (*disabled_by_bios)(void);             /* __init */
@@ -621,7 +627,7 @@ struct kvm_x86_ops {
 	void (*set_guest_debug)(struct kvm_vcpu *vcpu,
 				struct kvm_guest_debug *dbg);
 	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
-	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
 	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
 	void (*get_segment)(struct kvm_vcpu *vcpu,
 			    struct kvm_segment *var, int seg);
@@ -772,7 +778,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
 
 void kvm_enable_efer_bits(u64);
 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
 
 struct x86_emulate_ctxt;
 
@@ -799,7 +805,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
 
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
 
 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index baead95..5ac11f0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1211,6 +1211,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	struct page *msrpm_pages;
 	struct page *hsave_page;
 	struct page *nested_msrpm_pages;
+	struct msr_data msr;
 	int err;
 
 	svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -1255,7 +1256,10 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
 	svm->asid_generation = 0;
 	init_vmcb(svm);
-	kvm_write_tsc(&svm->vcpu, 0);
+	msr.data = 0x0;
+	msr.index = MSR_IA32_TSC;
+	msr.host_initiated = true;
+	kvm_write_tsc(&svm->vcpu, &msr);
 
 	err = fx_init(&svm->vcpu);
 	if (err)
@@ -3147,13 +3151,15 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
 	return 0;
 }
 
-static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
+static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
+	u32 ecx = msr->index;
+	u64 data = msr->data;
 	switch (ecx) {
 	case MSR_IA32_TSC:
-		kvm_write_tsc(vcpu, data);
+		kvm_write_tsc(vcpu, msr);
 		break;
 	case MSR_STAR:
 		svm->vmcb->save.star = data;
@@ -3208,20 +3214,23 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
 		break;
 	default:
-		return kvm_set_msr_common(vcpu, ecx, data);
+		return kvm_set_msr_common(vcpu, msr);
 	}
 	return 0;
 }
 
 static int wrmsr_interception(struct vcpu_svm *svm)
 {
+	struct msr_data msr;
 	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
 	u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
 		| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
-
+	msr.data = data;
+	msr.index = ecx;
+	msr.host_initiated = false;
 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
-	if (svm_set_msr(&svm->vcpu, ecx, data)) {
+	if (svm_set_msr(&svm->vcpu, &msr)) {
 		trace_kvm_msr_write_ex(ecx, data);
 		kvm_inject_gp(&svm->vcpu, 0);
 	} else {
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c00f03d..819970f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2197,15 +2197,17 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  * Returns 0 on success, non-0 otherwise.
  * Assumes vcpu_load() was already called.
  */
-static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct shared_msr_entry *msr;
 	int ret = 0;
+	u32 msr_index = msr_info->index;
+	u64 data = msr_info->data;
 
 	switch (msr_index) {
 	case MSR_EFER:
-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+		ret = kvm_set_msr_common(vcpu, msr_info);
 		break;
 #ifdef CONFIG_X86_64
 	case MSR_FS_BASE:
@@ -2231,7 +2233,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 		vmcs_writel(GUEST_SYSENTER_ESP, data);
 		break;
 	case MSR_IA32_TSC:
-		kvm_write_tsc(vcpu, data);
+		kvm_write_tsc(vcpu, msr_info);
 		break;
 	case MSR_IA32_CR_PAT:
 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
@@ -2239,7 +2241,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 			vcpu->arch.pat = data;
 			break;
 		}
-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+		ret = kvm_set_msr_common(vcpu, msr_info);
 		break;
 	case MSR_TSC_AUX:
 		if (!vmx->rdtscp_enabled)
@@ -2262,7 +2264,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 			}
 			break;
 		}
-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+		ret = kvm_set_msr_common(vcpu, msr_info);
 	}
 
 	return ret;
@@ -3835,6 +3837,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	unsigned long a;
 #endif
 	int i;
+	struct msr_data msr;
 
 	/* I/O */
 	vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
@@ -3918,7 +3921,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
 	set_cr4_guest_host_mask(vmx);
 
-	kvm_write_tsc(&vmx->vcpu, 0);
+	msr.data = 0x0;
+	msr.index = MSR_IA32_TSC;
+	msr.host_initiated = true;
+	kvm_write_tsc(&vmx->vcpu, &msr);
 
 	return 0;
 }
@@ -4649,11 +4655,15 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
 
 static int handle_wrmsr(struct kvm_vcpu *vcpu)
 {
+	struct msr_data msr;
 	u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
 	u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
 		| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
-	if (vmx_set_msr(vcpu, ecx, data) != 0) {
+	msr.data = data;
+	msr.index = ecx;
+	msr.host_initiated = false;
+	if (vmx_set_msr(vcpu, &msr) != 0) {
 		trace_kvm_msr_write_ex(ecx, data);
 		kvm_inject_gp(vcpu, 0);
 		return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 42bce48..a3aac1c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -883,9 +883,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  * Returns 0 on success, non-0 otherwise.
  * Assumes vcpu_load() was already called.
  */
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
-	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
+	return kvm_x86_ops->set_msr(vcpu, msr);
 }
 
 /*
@@ -893,7 +893,11 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  */
 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
 {
-	return kvm_set_msr(vcpu, index, *data);
+	struct msr_data msr;
+	msr.data = *data;
+	msr.index = index;
+	msr.host_initiated = true;
+	return kvm_set_msr(vcpu, &msr);
 }
 
 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
@@ -1043,12 +1047,13 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
 	return tsc;
 }
 
-void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
+void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
 	struct kvm *kvm = vcpu->kvm;
 	u64 offset, ns, elapsed;
 	unsigned long flags;
 	s64 usdiff;
+	u64 data = msr->data;
 
 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
 	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
@@ -1561,9 +1566,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	bool pr = false;
+	u32 msr = msr_info->index;
+	u64 data = msr_info->data;
 
 	switch (msr) {
 	case MSR_EFER:
@@ -4286,7 +4293,11 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
 			    u32 msr_index, u64 data)
 {
-	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
+	struct msr_data msr;
+	msr.data = data;
+	msr.index = msr_index;
+	msr.host_initiated = true;
+	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
 }
 
 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 3d1134d..1dcb8e3 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -112,7 +112,7 @@ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
 
-void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
+void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
 
 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
 	gva_t addr, void *val, unsigned int bytes,
-- 
1.8.0.rc0





^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] Add code to track call origin for msr assignment.
  2012-11-15 20:25 [PATCH 1/2] Add code to track call origin for msr assignment Will Auld
@ 2012-11-16 21:58 ` Marcelo Tosatti
  2012-11-19  9:02 ` Gleb Natapov
  1 sibling, 0 replies; 5+ messages in thread
From: Marcelo Tosatti @ 2012-11-16 21:58 UTC (permalink / raw)
  To: will.auld
  Cc: gleb, xiantao.zhang@intel.com, jinsong.liu@intel.com,
	avi@redhat.com, kvm@vger.kernel.org, donald.d.dugger@intel.com

On Thu, Nov 15, 2012 at 12:25:45PM -0800, Will Auld wrote:
> In order to track who initiated the call (host or guest) to modify an msr
> value I have changed function call parameters along the call path. The
> specific change is to add a struct pointer parameter that points to (index,
> data, caller) information rather than having this information passed as
> individual parameters.
> 
> The initial use for this capability is for updating the IA32_TSC_ADJUST
> msr while setting the tsc value. It is anticipated that this capability
> is useful other tasks.
> 
> Signed-off-by: Will Auld <will.auld@intel.com>
> ---
>  arch/x86/include/asm/kvm_host.h | 12 +++++++++---
>  arch/x86/kvm/svm.c              | 21 +++++++++++++++------
>  arch/x86/kvm/vmx.c              | 24 +++++++++++++++++-------
>  arch/x86/kvm/x86.c              | 23 +++++++++++++++++------
>  arch/x86/kvm/x86.h              |  2 +-
>  5 files changed, 59 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 42bce48..a3aac1c 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -883,9 +883,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
>   * Returns 0 on success, non-0 otherwise.
>   * Assumes vcpu_load() was already called.
>   */
> -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
> +int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  {
> -	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
> +	return kvm_x86_ops->set_msr(vcpu, msr);
>  }
>  
>  /*

> @@ -4286,7 +4293,11 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
>  static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
>  			    u32 msr_index, u64 data)
>  {
> -	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
> +	struct msr_data msr;
> +	msr.data = data;
> +	msr.index = msr_index;
> +	msr.host_initiated = true;

false, this is guest instruction emulation.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] Add code to track call origin for msr assignment.
  2012-11-15 20:25 [PATCH 1/2] Add code to track call origin for msr assignment Will Auld
  2012-11-16 21:58 ` Marcelo Tosatti
@ 2012-11-19  9:02 ` Gleb Natapov
  1 sibling, 0 replies; 5+ messages in thread
From: Gleb Natapov @ 2012-11-19  9:02 UTC (permalink / raw)
  To: will.auld
  Cc: mtosatti, xiantao.zhang@intel.com, jinsong.liu@intel.com,
	avi@redhat.com, kvm@vger.kernel.org, donald.d.dugger@intel.com

On Thu, Nov 15, 2012 at 12:25:45PM -0800, Will Auld wrote:
> In order to track who initiated the call (host or guest) to modify an msr
> value I have changed function call parameters along the call path. The
> specific change is to add a struct pointer parameter that points to (index,
> data, caller) information rather than having this information passed as
> individual parameters.
> 
> The initial use for this capability is for updating the IA32_TSC_ADJUST
> msr while setting the tsc value. It is anticipated that this capability
> is useful other tasks.
> 
> Signed-off-by: Will Auld <will.auld@intel.com>
> ---
>  arch/x86/include/asm/kvm_host.h | 12 +++++++++---
>  arch/x86/kvm/svm.c              | 21 +++++++++++++++------
>  arch/x86/kvm/vmx.c              | 24 +++++++++++++++++-------
>  arch/x86/kvm/x86.c              | 23 +++++++++++++++++------
>  arch/x86/kvm/x86.h              |  2 +-
>  5 files changed, 59 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 09155d6..da34027 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -598,6 +598,12 @@ struct kvm_vcpu_stat {
>  
>  struct x86_instruction_info;
>  
> +struct msr_data {
> +    bool host_initiated;
> +    u32 index;
> +    u64 data;
> +};
> +
>  struct kvm_x86_ops {
>  	int (*cpu_has_kvm_support)(void);          /* __init */
>  	int (*disabled_by_bios)(void);             /* __init */
> @@ -621,7 +627,7 @@ struct kvm_x86_ops {
>  	void (*set_guest_debug)(struct kvm_vcpu *vcpu,
>  				struct kvm_guest_debug *dbg);
>  	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
> -	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
> +	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
You've changed function pointer signature here, but emulator_set_msr()
remained the same.

>  	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
>  	void (*get_segment)(struct kvm_vcpu *vcpu,
>  			    struct kvm_segment *var, int seg);
> @@ -772,7 +778,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
>  
>  void kvm_enable_efer_bits(u64);
>  int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
> -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
> +int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  
>  struct x86_emulate_ctxt;
>  
> @@ -799,7 +805,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
>  int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
>  
>  int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
> -int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
> +int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  
>  unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
>  void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index baead95..5ac11f0 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1211,6 +1211,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
>  	struct page *msrpm_pages;
>  	struct page *hsave_page;
>  	struct page *nested_msrpm_pages;
> +	struct msr_data msr;
>  	int err;
>  
>  	svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
> @@ -1255,7 +1256,10 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
>  	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
>  	svm->asid_generation = 0;
>  	init_vmcb(svm);
> -	kvm_write_tsc(&svm->vcpu, 0);
> +	msr.data = 0x0;
> +	msr.index = MSR_IA32_TSC;
> +	msr.host_initiated = true;
> +	kvm_write_tsc(&svm->vcpu, &msr);
>  
>  	err = fx_init(&svm->vcpu);
>  	if (err)
> @@ -3147,13 +3151,15 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
>  	return 0;
>  }
>  
> -static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
> +static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  
> +	u32 ecx = msr->index;
> +	u64 data = msr->data;
>  	switch (ecx) {
>  	case MSR_IA32_TSC:
> -		kvm_write_tsc(vcpu, data);
> +		kvm_write_tsc(vcpu, msr);
>  		break;
>  	case MSR_STAR:
>  		svm->vmcb->save.star = data;
> @@ -3208,20 +3214,23 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
>  		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
>  		break;
>  	default:
> -		return kvm_set_msr_common(vcpu, ecx, data);
> +		return kvm_set_msr_common(vcpu, msr);
>  	}
>  	return 0;
>  }
>  
>  static int wrmsr_interception(struct vcpu_svm *svm)
>  {
> +	struct msr_data msr;
>  	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
>  	u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
>  		| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
>  
> -
> +	msr.data = data;
> +	msr.index = ecx;
> +	msr.host_initiated = false;
>  	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
> -	if (svm_set_msr(&svm->vcpu, ecx, data)) {
> +	if (svm_set_msr(&svm->vcpu, &msr)) {
>  		trace_kvm_msr_write_ex(ecx, data);
>  		kvm_inject_gp(&svm->vcpu, 0);
>  	} else {
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index c00f03d..819970f 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -2197,15 +2197,17 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
>   * Returns 0 on success, non-0 otherwise.
>   * Assumes vcpu_load() was already called.
>   */
> -static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
> +static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	struct shared_msr_entry *msr;
>  	int ret = 0;
> +	u32 msr_index = msr_info->index;
> +	u64 data = msr_info->data;
>  
>  	switch (msr_index) {
>  	case MSR_EFER:
> -		ret = kvm_set_msr_common(vcpu, msr_index, data);
> +		ret = kvm_set_msr_common(vcpu, msr_info);
>  		break;
>  #ifdef CONFIG_X86_64
>  	case MSR_FS_BASE:
> @@ -2231,7 +2233,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  		vmcs_writel(GUEST_SYSENTER_ESP, data);
>  		break;
>  	case MSR_IA32_TSC:
> -		kvm_write_tsc(vcpu, data);
> +		kvm_write_tsc(vcpu, msr_info);
>  		break;
>  	case MSR_IA32_CR_PAT:
>  		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
> @@ -2239,7 +2241,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  			vcpu->arch.pat = data;
>  			break;
>  		}
> -		ret = kvm_set_msr_common(vcpu, msr_index, data);
> +		ret = kvm_set_msr_common(vcpu, msr_info);
>  		break;
>  	case MSR_TSC_AUX:
>  		if (!vmx->rdtscp_enabled)
> @@ -2262,7 +2264,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  			}
>  			break;
>  		}
> -		ret = kvm_set_msr_common(vcpu, msr_index, data);
> +		ret = kvm_set_msr_common(vcpu, msr_info);
>  	}
>  
>  	return ret;
> @@ -3835,6 +3837,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>  	unsigned long a;
>  #endif
>  	int i;
> +	struct msr_data msr;
>  
>  	/* I/O */
>  	vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
> @@ -3918,7 +3921,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>  	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
>  	set_cr4_guest_host_mask(vmx);
>  
> -	kvm_write_tsc(&vmx->vcpu, 0);
> +	msr.data = 0x0;
> +	msr.index = MSR_IA32_TSC;
> +	msr.host_initiated = true;
> +	kvm_write_tsc(&vmx->vcpu, &msr);
>  
>  	return 0;
>  }
> @@ -4649,11 +4655,15 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
>  
>  static int handle_wrmsr(struct kvm_vcpu *vcpu)
>  {
> +	struct msr_data msr;
>  	u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
>  	u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
>  		| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
>  
> -	if (vmx_set_msr(vcpu, ecx, data) != 0) {
> +	msr.data = data;
> +	msr.index = ecx;
> +	msr.host_initiated = false;
> +	if (vmx_set_msr(vcpu, &msr) != 0) {
>  		trace_kvm_msr_write_ex(ecx, data);
>  		kvm_inject_gp(vcpu, 0);
>  		return 1;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 42bce48..a3aac1c 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -883,9 +883,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
>   * Returns 0 on success, non-0 otherwise.
>   * Assumes vcpu_load() was already called.
>   */
> -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
> +int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  {
> -	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
> +	return kvm_x86_ops->set_msr(vcpu, msr);
>  }
>  
>  /*
> @@ -893,7 +893,11 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>   */
>  static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
>  {
> -	return kvm_set_msr(vcpu, index, *data);
> +	struct msr_data msr;
> +	msr.data = *data;
> +	msr.index = index;
> +	msr.host_initiated = true;
> +	return kvm_set_msr(vcpu, &msr);
>  }
>  
>  static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
> @@ -1043,12 +1047,13 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
>  	return tsc;
>  }
>  
> -void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
> +void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  {
>  	struct kvm *kvm = vcpu->kvm;
>  	u64 offset, ns, elapsed;
>  	unsigned long flags;
>  	s64 usdiff;
> +	u64 data = msr->data;
>  
>  	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
>  	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
> @@ -1561,9 +1566,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  }
>  
> -int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
> +int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  {
>  	bool pr = false;
> +	u32 msr = msr_info->index;
> +	u64 data = msr_info->data;
>  
>  	switch (msr) {
>  	case MSR_EFER:
> @@ -4286,7 +4293,11 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
>  static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
>  			    u32 msr_index, u64 data)
>  {
> -	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
> +	struct msr_data msr;
> +	msr.data = data;
> +	msr.index = msr_index;
> +	msr.host_initiated = true;
> +	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
>  }
>  
>  static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index 3d1134d..1dcb8e3 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -112,7 +112,7 @@ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
>  void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
>  int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
>  
> -void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
> +void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  
>  int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
>  	gva_t addr, void *val, unsigned int bytes,
> -- 
> 1.8.0.rc0
> 
> 
> 

--
			Gleb.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/2] Add code to track call origin for msr assignment.
@ 2012-11-22  2:51 Will Auld
  2012-11-25 13:40 ` Gleb Natapov
  0 siblings, 1 reply; 5+ messages in thread
From: Will Auld @ 2012-11-22  2:51 UTC (permalink / raw)
  To: Gleb, mtosatti@redhat.com, kvm@vger.kernel.org,
	donald.d.dugger@intel.com, jinsong.liu@intel.com,
	xiantao.zhang@intel.com, will.auld@intel.com, avi@redhat.com

In order to track who initiated the call (host or guest) to modify an msr
value I have changed function call parameters along the call path. The
specific change is to add a struct pointer parameter that points to (index,
data, caller) information rather than having this information passed as
individual parameters.

The initial use for this capability is for updating the IA32_TSC_ADJUST
msr while setting the tsc value. It is anticipated that this capability
is useful other tasks.

Signed-off-by: Will Auld <will.auld@intel.com>
---
 arch/x86/include/asm/kvm_host.h | 12 +++++++++---
 arch/x86/kvm/svm.c              | 21 +++++++++++++++------
 arch/x86/kvm/vmx.c              | 24 +++++++++++++++++-------
 arch/x86/kvm/x86.c              | 23 +++++++++++++++++------
 arch/x86/kvm/x86.h              |  2 +-
 5 files changed, 59 insertions(+), 23 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09155d6..da34027 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -598,6 +598,12 @@ struct kvm_vcpu_stat {
 
 struct x86_instruction_info;
 
+struct msr_data {
+    bool host_initiated;
+    u32 index;
+    u64 data;
+};
+
 struct kvm_x86_ops {
 	int (*cpu_has_kvm_support)(void);          /* __init */
 	int (*disabled_by_bios)(void);             /* __init */
@@ -621,7 +627,7 @@ struct kvm_x86_ops {
 	void (*set_guest_debug)(struct kvm_vcpu *vcpu,
 				struct kvm_guest_debug *dbg);
 	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
-	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
 	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
 	void (*get_segment)(struct kvm_vcpu *vcpu,
 			    struct kvm_segment *var, int seg);
@@ -772,7 +778,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
 
 void kvm_enable_efer_bits(u64);
 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
 
 struct x86_emulate_ctxt;
 
@@ -799,7 +805,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
 
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
 
 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index baead95..5ac11f0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1211,6 +1211,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	struct page *msrpm_pages;
 	struct page *hsave_page;
 	struct page *nested_msrpm_pages;
+	struct msr_data msr;
 	int err;
 
 	svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -1255,7 +1256,10 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
 	svm->asid_generation = 0;
 	init_vmcb(svm);
-	kvm_write_tsc(&svm->vcpu, 0);
+	msr.data = 0x0;
+	msr.index = MSR_IA32_TSC;
+	msr.host_initiated = true;
+	kvm_write_tsc(&svm->vcpu, &msr);
 
 	err = fx_init(&svm->vcpu);
 	if (err)
@@ -3147,13 +3151,15 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
 	return 0;
 }
 
-static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
+static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
+	u32 ecx = msr->index;
+	u64 data = msr->data;
 	switch (ecx) {
 	case MSR_IA32_TSC:
-		kvm_write_tsc(vcpu, data);
+		kvm_write_tsc(vcpu, msr);
 		break;
 	case MSR_STAR:
 		svm->vmcb->save.star = data;
@@ -3208,20 +3214,23 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
 		break;
 	default:
-		return kvm_set_msr_common(vcpu, ecx, data);
+		return kvm_set_msr_common(vcpu, msr);
 	}
 	return 0;
 }
 
 static int wrmsr_interception(struct vcpu_svm *svm)
 {
+	struct msr_data msr;
 	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
 	u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
 		| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
-
+	msr.data = data;
+	msr.index = ecx;
+	msr.host_initiated = false;
 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
-	if (svm_set_msr(&svm->vcpu, ecx, data)) {
+	if (svm_set_msr(&svm->vcpu, &msr)) {
 		trace_kvm_msr_write_ex(ecx, data);
 		kvm_inject_gp(&svm->vcpu, 0);
 	} else {
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c00f03d..819970f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2197,15 +2197,17 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  * Returns 0 on success, non-0 otherwise.
  * Assumes vcpu_load() was already called.
  */
-static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct shared_msr_entry *msr;
 	int ret = 0;
+	u32 msr_index = msr_info->index;
+	u64 data = msr_info->data;
 
 	switch (msr_index) {
 	case MSR_EFER:
-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+		ret = kvm_set_msr_common(vcpu, msr_info);
 		break;
 #ifdef CONFIG_X86_64
 	case MSR_FS_BASE:
@@ -2231,7 +2233,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 		vmcs_writel(GUEST_SYSENTER_ESP, data);
 		break;
 	case MSR_IA32_TSC:
-		kvm_write_tsc(vcpu, data);
+		kvm_write_tsc(vcpu, msr_info);
 		break;
 	case MSR_IA32_CR_PAT:
 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
@@ -2239,7 +2241,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 			vcpu->arch.pat = data;
 			break;
 		}
-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+		ret = kvm_set_msr_common(vcpu, msr_info);
 		break;
 	case MSR_TSC_AUX:
 		if (!vmx->rdtscp_enabled)
@@ -2262,7 +2264,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 			}
 			break;
 		}
-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+		ret = kvm_set_msr_common(vcpu, msr_info);
 	}
 
 	return ret;
@@ -3835,6 +3837,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	unsigned long a;
 #endif
 	int i;
+	struct msr_data msr;
 
 	/* I/O */
 	vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
@@ -3918,7 +3921,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
 	set_cr4_guest_host_mask(vmx);
 
-	kvm_write_tsc(&vmx->vcpu, 0);
+	msr.data = 0x0;
+	msr.index = MSR_IA32_TSC;
+	msr.host_initiated = true;
+	kvm_write_tsc(&vmx->vcpu, &msr);
 
 	return 0;
 }
@@ -4649,11 +4655,15 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
 
 static int handle_wrmsr(struct kvm_vcpu *vcpu)
 {
+	struct msr_data msr;
 	u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
 	u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
 		| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
-	if (vmx_set_msr(vcpu, ecx, data) != 0) {
+	msr.data = data;
+	msr.index = ecx;
+	msr.host_initiated = false;
+	if (vmx_set_msr(vcpu, &msr) != 0) {
 		trace_kvm_msr_write_ex(ecx, data);
 		kvm_inject_gp(vcpu, 0);
 		return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 42bce48..a3aac1c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -883,9 +883,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  * Returns 0 on success, non-0 otherwise.
  * Assumes vcpu_load() was already called.
  */
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
-	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
+	return kvm_x86_ops->set_msr(vcpu, msr);
 }
 
 /*
@@ -893,7 +893,11 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  */
 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
 {
-	return kvm_set_msr(vcpu, index, *data);
+	struct msr_data msr;
+	msr.data = *data;
+	msr.index = index;
+	msr.host_initiated = true;
+	return kvm_set_msr(vcpu, &msr);
 }
 
 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
@@ -1043,12 +1047,13 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
 	return tsc;
 }
 
-void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
+void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
 	struct kvm *kvm = vcpu->kvm;
 	u64 offset, ns, elapsed;
 	unsigned long flags;
 	s64 usdiff;
+	u64 data = msr->data;
 
 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
 	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
@@ -1561,9 +1566,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	bool pr = false;
+	u32 msr = msr_info->index;
+	u64 data = msr_info->data;
 
 	switch (msr) {
 	case MSR_EFER:
@@ -4286,7 +4293,11 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
 			    u32 msr_index, u64 data)
 {
-	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
+	struct msr_data msr;
+	msr.data = data;
+	msr.index = msr_index;
+	msr.host_initiated = true;
+	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
 }
 
 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 3d1134d..1dcb8e3 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -112,7 +112,7 @@ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
 
-void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
+void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
 
 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
 	gva_t addr, void *val, unsigned int bytes,
-- 
1.8.0.rc0





^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] Add code to track call origin for msr assignment.
  2012-11-22  2:51 Will Auld
@ 2012-11-25 13:40 ` Gleb Natapov
  0 siblings, 0 replies; 5+ messages in thread
From: Gleb Natapov @ 2012-11-25 13:40 UTC (permalink / raw)
  To: will.auld
  Cc: mtosatti@redhat.com, kvm@vger.kernel.org,
	donald.d.dugger@intel.com, jinsong.liu@intel.com,
	xiantao.zhang@intel.com, avi@redhat.com

On Wed, Nov 21, 2012 at 06:51:20PM -0800, Will Auld wrote:
> In order to track who initiated the call (host or guest) to modify an msr
> value I have changed function call parameters along the call path. The
> specific change is to add a struct pointer parameter that points to (index,
> data, caller) information rather than having this information passed as
> individual parameters.
> 
> The initial use for this capability is for updating the IA32_TSC_ADJUST
> msr while setting the tsc value. It is anticipated that this capability
> is useful other tasks.
> 
Previous comments for this patch are not addressed.
http://comments.gmane.org/gmane.comp.emulators.kvm.devel/100994

Also I would prefer adding host_initiated parameter to kvm_set_msr()
instead of introducing msr_data structure.

> Signed-off-by: Will Auld <will.auld@intel.com>
> ---
>  arch/x86/include/asm/kvm_host.h | 12 +++++++++---
>  arch/x86/kvm/svm.c              | 21 +++++++++++++++------
>  arch/x86/kvm/vmx.c              | 24 +++++++++++++++++-------
>  arch/x86/kvm/x86.c              | 23 +++++++++++++++++------
>  arch/x86/kvm/x86.h              |  2 +-
>  5 files changed, 59 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 09155d6..da34027 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -598,6 +598,12 @@ struct kvm_vcpu_stat {
>  
>  struct x86_instruction_info;
>  
> +struct msr_data {
> +    bool host_initiated;
> +    u32 index;
> +    u64 data;
> +};
> +
>  struct kvm_x86_ops {
>  	int (*cpu_has_kvm_support)(void);          /* __init */
>  	int (*disabled_by_bios)(void);             /* __init */
> @@ -621,7 +627,7 @@ struct kvm_x86_ops {
>  	void (*set_guest_debug)(struct kvm_vcpu *vcpu,
>  				struct kvm_guest_debug *dbg);
>  	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
> -	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
> +	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
>  	void (*get_segment)(struct kvm_vcpu *vcpu,
>  			    struct kvm_segment *var, int seg);
> @@ -772,7 +778,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
>  
>  void kvm_enable_efer_bits(u64);
>  int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
> -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
> +int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  
>  struct x86_emulate_ctxt;
>  
> @@ -799,7 +805,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
>  int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
>  
>  int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
> -int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
> +int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  
>  unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
>  void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index baead95..5ac11f0 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1211,6 +1211,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
>  	struct page *msrpm_pages;
>  	struct page *hsave_page;
>  	struct page *nested_msrpm_pages;
> +	struct msr_data msr;
>  	int err;
>  
>  	svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
> @@ -1255,7 +1256,10 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
>  	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
>  	svm->asid_generation = 0;
>  	init_vmcb(svm);
> -	kvm_write_tsc(&svm->vcpu, 0);
> +	msr.data = 0x0;
> +	msr.index = MSR_IA32_TSC;
> +	msr.host_initiated = true;
> +	kvm_write_tsc(&svm->vcpu, &msr);
>  
>  	err = fx_init(&svm->vcpu);
>  	if (err)
> @@ -3147,13 +3151,15 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
>  	return 0;
>  }
>  
> -static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
> +static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  
> +	u32 ecx = msr->index;
> +	u64 data = msr->data;
>  	switch (ecx) {
>  	case MSR_IA32_TSC:
> -		kvm_write_tsc(vcpu, data);
> +		kvm_write_tsc(vcpu, msr);
>  		break;
>  	case MSR_STAR:
>  		svm->vmcb->save.star = data;
> @@ -3208,20 +3214,23 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
>  		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
>  		break;
>  	default:
> -		return kvm_set_msr_common(vcpu, ecx, data);
> +		return kvm_set_msr_common(vcpu, msr);
>  	}
>  	return 0;
>  }
>  
>  static int wrmsr_interception(struct vcpu_svm *svm)
>  {
> +	struct msr_data msr;
>  	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
>  	u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
>  		| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
>  
> -
> +	msr.data = data;
> +	msr.index = ecx;
> +	msr.host_initiated = false;
>  	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
> -	if (svm_set_msr(&svm->vcpu, ecx, data)) {
> +	if (svm_set_msr(&svm->vcpu, &msr)) {
>  		trace_kvm_msr_write_ex(ecx, data);
>  		kvm_inject_gp(&svm->vcpu, 0);
>  	} else {
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index c00f03d..819970f 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -2197,15 +2197,17 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
>   * Returns 0 on success, non-0 otherwise.
>   * Assumes vcpu_load() was already called.
>   */
> -static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
> +static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	struct shared_msr_entry *msr;
>  	int ret = 0;
> +	u32 msr_index = msr_info->index;
> +	u64 data = msr_info->data;
>  
>  	switch (msr_index) {
>  	case MSR_EFER:
> -		ret = kvm_set_msr_common(vcpu, msr_index, data);
> +		ret = kvm_set_msr_common(vcpu, msr_info);
>  		break;
>  #ifdef CONFIG_X86_64
>  	case MSR_FS_BASE:
> @@ -2231,7 +2233,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  		vmcs_writel(GUEST_SYSENTER_ESP, data);
>  		break;
>  	case MSR_IA32_TSC:
> -		kvm_write_tsc(vcpu, data);
> +		kvm_write_tsc(vcpu, msr_info);
>  		break;
>  	case MSR_IA32_CR_PAT:
>  		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
> @@ -2239,7 +2241,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  			vcpu->arch.pat = data;
>  			break;
>  		}
> -		ret = kvm_set_msr_common(vcpu, msr_index, data);
> +		ret = kvm_set_msr_common(vcpu, msr_info);
>  		break;
>  	case MSR_TSC_AUX:
>  		if (!vmx->rdtscp_enabled)
> @@ -2262,7 +2264,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  			}
>  			break;
>  		}
> -		ret = kvm_set_msr_common(vcpu, msr_index, data);
> +		ret = kvm_set_msr_common(vcpu, msr_info);
>  	}
>  
>  	return ret;
> @@ -3835,6 +3837,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>  	unsigned long a;
>  #endif
>  	int i;
> +	struct msr_data msr;
>  
>  	/* I/O */
>  	vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
> @@ -3918,7 +3921,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>  	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
>  	set_cr4_guest_host_mask(vmx);
>  
> -	kvm_write_tsc(&vmx->vcpu, 0);
> +	msr.data = 0x0;
> +	msr.index = MSR_IA32_TSC;
> +	msr.host_initiated = true;
> +	kvm_write_tsc(&vmx->vcpu, &msr);
>  
>  	return 0;
>  }
> @@ -4649,11 +4655,15 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
>  
>  static int handle_wrmsr(struct kvm_vcpu *vcpu)
>  {
> +	struct msr_data msr;
>  	u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
>  	u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
>  		| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
>  
> -	if (vmx_set_msr(vcpu, ecx, data) != 0) {
> +	msr.data = data;
> +	msr.index = ecx;
> +	msr.host_initiated = false;
> +	if (vmx_set_msr(vcpu, &msr) != 0) {
>  		trace_kvm_msr_write_ex(ecx, data);
>  		kvm_inject_gp(vcpu, 0);
>  		return 1;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 42bce48..a3aac1c 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -883,9 +883,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
>   * Returns 0 on success, non-0 otherwise.
>   * Assumes vcpu_load() was already called.
>   */
> -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
> +int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  {
> -	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
> +	return kvm_x86_ops->set_msr(vcpu, msr);
>  }
>  
>  /*
> @@ -893,7 +893,11 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>   */
>  static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
>  {
> -	return kvm_set_msr(vcpu, index, *data);
> +	struct msr_data msr;
> +	msr.data = *data;
> +	msr.index = index;
> +	msr.host_initiated = true;
> +	return kvm_set_msr(vcpu, &msr);
>  }
>  
>  static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
> @@ -1043,12 +1047,13 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
>  	return tsc;
>  }
>  
> -void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
> +void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  {
>  	struct kvm *kvm = vcpu->kvm;
>  	u64 offset, ns, elapsed;
>  	unsigned long flags;
>  	s64 usdiff;
> +	u64 data = msr->data;
>  
>  	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
>  	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
> @@ -1561,9 +1566,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  }
>  
> -int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
> +int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  {
>  	bool pr = false;
> +	u32 msr = msr_info->index;
> +	u64 data = msr_info->data;
>  
>  	switch (msr) {
>  	case MSR_EFER:
> @@ -4286,7 +4293,11 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
>  static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
>  			    u32 msr_index, u64 data)
>  {
> -	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
> +	struct msr_data msr;
> +	msr.data = data;
> +	msr.index = msr_index;
> +	msr.host_initiated = true;
> +	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
>  }
>  
>  static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index 3d1134d..1dcb8e3 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -112,7 +112,7 @@ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
>  void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
>  int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
>  
> -void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
> +void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  
>  int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
>  	gva_t addr, void *val, unsigned int bytes,
> -- 
> 1.8.0.rc0
> 
> 
> 

--
			Gleb.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2012-11-25 13:40 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-11-15 20:25 [PATCH 1/2] Add code to track call origin for msr assignment Will Auld
2012-11-16 21:58 ` Marcelo Tosatti
2012-11-19  9:02 ` Gleb Natapov
  -- strict thread matches above, loose matches on Subject: below --
2012-11-22  2:51 Will Auld
2012-11-25 13:40 ` Gleb Natapov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).