* [PATCH 4/5] KVM: nVMX: fix limit checking: memory operand size varies for different VMX instructions
@ 2015-08-20 19:37 Eugene Korenevsky
2015-09-07 11:40 ` Paolo Bonzini
0 siblings, 1 reply; 2+ messages in thread
From: Eugene Korenevsky @ 2015-08-20 19:37 UTC (permalink / raw)
To: kvm; +Cc: Paolo Bonzini
When checking limits for VMX opcodes in protected mode, different sizes of
memory operands must be taken into account.
For VMREAD and VMWRITE instructions, memory operand size is 32 or 64 bits
depending on CPU mode. For VMON, VMCLEAR, VMPTRST, VMPTRLD instructions,
memory operand size is 64 bits. For INVEPT instruction, memory operand size
is 128 bits.
Signed-off-by: Eugene Korenevsky <ekorenevsky@gmail.com>
---
arch/x86/kvm/vmx.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4a4d677..f39e24f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6399,7 +6399,8 @@ static int vmx_protmode_seg_check(struct kvm_vcpu *vcpu,
*/
static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
unsigned long exit_qualification,
- u32 vmx_instruction_info, bool wr, gva_t *ret)
+ u32 vmx_instruction_info,
+ bool wr, int mem_op_size, gva_t *ret)
{
gva_t off;
struct kvm_segment s;
@@ -6466,7 +6467,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
int maxphyaddr = cpuid_maxphyaddr(vcpu);
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
+ vmcs_read32(VMX_INSTRUCTION_INFO), false, 8, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
@@ -6971,6 +6972,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t gva = 0;
+ int mem_op_size;
if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu))
@@ -6993,12 +6995,13 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
field_value);
} else {
+ mem_op_size = is_long_mode(vcpu) ? 8 : 4;
if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, true, &gva))
+ vmx_instruction_info, true, mem_op_size, &gva))
return 1;
/* _system ok, as nested_vmx_check_permission verified cpl=0 */
kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
- &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
+ &field_value, mem_op_size, NULL);
}
nested_vmx_succeed(vcpu);
@@ -7021,6 +7024,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
*/
u64 field_value = 0;
struct x86_exception e;
+ int mem_op_size;
if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu))
@@ -7030,11 +7034,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
field_value = kvm_register_readl(vcpu,
(((vmx_instruction_info) >> 3) & 0xf));
else {
+ mem_op_size = is_64_bit_mode(vcpu) ? 8 : 4;
if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, false, &gva))
+ vmx_instruction_info, false, mem_op_size, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
- &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
+ &field_value, mem_op_size, &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
@@ -7123,7 +7128,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
return 1;
if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, true, &vmcs_gva))
+ vmx_instruction_info, true, 8, &vmcs_gva))
return 1;
/* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
@@ -7179,7 +7184,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
* operand is read even if it isn't needed (e.g., for type==global)
*/
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmx_instruction_info, false, &gva))
+ vmx_instruction_info, false, 16, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
sizeof(operand), &e)) {
--
2.1.4
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH 4/5] KVM: nVMX: fix limit checking: memory operand size varies for different VMX instructions
2015-08-20 19:37 [PATCH 4/5] KVM: nVMX: fix limit checking: memory operand size varies for different VMX instructions Eugene Korenevsky
@ 2015-09-07 11:40 ` Paolo Bonzini
0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2015-09-07 11:40 UTC (permalink / raw)
To: Eugene Korenevsky, kvm
On 20/08/2015 21:37, Eugene Korenevsky wrote:
> When checking limits for VMX opcodes in protected mode, different sizes of
> memory operands must be taken into account.
> For VMREAD and VMWRITE instructions, memory operand size is 32 or 64 bits
> depending on CPU mode. For VMON, VMCLEAR, VMPTRST, VMPTRLD instructions,
> memory operand size is 64 bits. For INVEPT instruction, memory operand size
> is 128 bits.
>
> Signed-off-by: Eugene Korenevsky <ekorenevsky@gmail.com>
Looks good, but please provide unit tests in kvm-unit-tests too.
Paolo
> ---
> arch/x86/kvm/vmx.c | 21 +++++++++++++--------
> 1 file changed, 13 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 4a4d677..f39e24f 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -6399,7 +6399,8 @@ static int vmx_protmode_seg_check(struct kvm_vcpu *vcpu,
> */
> static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
> unsigned long exit_qualification,
> - u32 vmx_instruction_info, bool wr, gva_t *ret)
> + u32 vmx_instruction_info,
> + bool wr, int mem_op_size, gva_t *ret)
> {
> gva_t off;
> struct kvm_segment s;
> @@ -6466,7 +6467,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
> int maxphyaddr = cpuid_maxphyaddr(vcpu);
>
> if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
> - vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
> + vmcs_read32(VMX_INSTRUCTION_INFO), false, 8, &gva))
> return 1;
>
> if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
> @@ -6971,6 +6972,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
> unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> gva_t gva = 0;
> + int mem_op_size;
>
> if (!nested_vmx_check_permission(vcpu) ||
> !nested_vmx_check_vmcs12(vcpu))
> @@ -6993,12 +6995,13 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
> kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
> field_value);
> } else {
> + mem_op_size = is_long_mode(vcpu) ? 8 : 4;
> if (get_vmx_mem_address(vcpu, exit_qualification,
> - vmx_instruction_info, true, &gva))
> + vmx_instruction_info, true, mem_op_size, &gva))
> return 1;
> /* _system ok, as nested_vmx_check_permission verified cpl=0 */
> kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
> - &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
> + &field_value, mem_op_size, NULL);
> }
>
> nested_vmx_succeed(vcpu);
> @@ -7021,6 +7024,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
> */
> u64 field_value = 0;
> struct x86_exception e;
> + int mem_op_size;
>
> if (!nested_vmx_check_permission(vcpu) ||
> !nested_vmx_check_vmcs12(vcpu))
> @@ -7030,11 +7034,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
> field_value = kvm_register_readl(vcpu,
> (((vmx_instruction_info) >> 3) & 0xf));
> else {
> + mem_op_size = is_64_bit_mode(vcpu) ? 8 : 4;
> if (get_vmx_mem_address(vcpu, exit_qualification,
> - vmx_instruction_info, false, &gva))
> + vmx_instruction_info, false, mem_op_size, &gva))
> return 1;
> if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
> - &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
> + &field_value, mem_op_size, &e)) {
> kvm_inject_page_fault(vcpu, &e);
> return 1;
> }
> @@ -7123,7 +7128,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
> return 1;
>
> if (get_vmx_mem_address(vcpu, exit_qualification,
> - vmx_instruction_info, true, &vmcs_gva))
> + vmx_instruction_info, true, 8, &vmcs_gva))
> return 1;
> /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
> if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
> @@ -7179,7 +7184,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
> * operand is read even if it isn't needed (e.g., for type==global)
> */
> if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
> - vmx_instruction_info, false, &gva))
> + vmx_instruction_info, false, 16, &gva))
> return 1;
> if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
> sizeof(operand), &e)) {
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2015-09-07 11:40 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-08-20 19:37 [PATCH 4/5] KVM: nVMX: fix limit checking: memory operand size varies for different VMX instructions Eugene Korenevsky
2015-09-07 11:40 ` Paolo Bonzini
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).