From: Joerg Roedel <joerg.roedel@amd.com>
To: Alexander Graf <agraf@suse.de>
Cc: kvm@vger.kernel.org, joro@8bytes.org, anthony@codemonkey.ws,
avi@qumranet.com
Subject: Re: [PATCH 7/9] Add VMRUN handler v3
Date: Fri, 19 Sep 2008 17:59:26 +0200 [thread overview]
Message-ID: <20080919155926.GR24392@amd.com> (raw)
In-Reply-To: <1221658886-14109-8-git-send-email-agraf@suse.de>
On Wed, Sep 17, 2008 at 03:41:24PM +0200, Alexander Graf wrote:
> This patch implements VMRUN. VMRUN enters a virtual CPU and runs that
> in the same context as the normal guest CPU would run.
> So basically it is implemented the same way, a normal CPU would do it.
>
> We also prepare all intercepts that get OR'ed with the original
> intercepts, as we do not allow a level 2 guest to be intercepted less
> than the first level guest.
>
> v2 implements the following improvements:
>
> - fixes the CPL check
> - does not allocate iopm when not used
> - remembers the host's IF in the HIF bit in the hflags
>
> v3:
>
> - make use of the new permission checking
> - add support for V_INTR_MASKING_MASK
>
> Signed-off-by: Alexander Graf <agraf@suse.de>
> ---
> arch/x86/kvm/kvm_svm.h | 9 ++
> arch/x86/kvm/svm.c | 198 +++++++++++++++++++++++++++++++++++++++++++-
> include/asm-x86/kvm_host.h | 2 +
> 3 files changed, 207 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
> index 76ad107..2afe0ce 100644
> --- a/arch/x86/kvm/kvm_svm.h
> +++ b/arch/x86/kvm/kvm_svm.h
> @@ -43,6 +43,15 @@ struct vcpu_svm {
> u32 *msrpm;
>
> u64 nested_hsave;
> + u64 nested_vmcb;
> +
> + /* These are the merged vectors */
> + u32 *nested_msrpm;
> + u32 *nested_iopm;
> +
> + /* gpa pointers to the real vectors */
> + u64 nested_vmcb_msrpm;
> + u64 nested_vmcb_iopm;
> };
>
> #endif
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 0aa22e5..3601e75 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -51,6 +51,9 @@ MODULE_LICENSE("GPL");
> /* Turn on to get debugging output*/
> /* #define NESTED_DEBUG */
>
> +/* Not needed until device passthrough */
> +/* #define NESTED_KVM_MERGE_IOPM */
> +
> #ifdef NESTED_DEBUG
> #define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args)
> #else
> @@ -76,6 +79,11 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
> return container_of(vcpu, struct vcpu_svm, vcpu);
> }
>
> +static inline bool is_nested(struct vcpu_svm *svm)
> +{
> + return svm->nested_vmcb;
> +}
> +
> static unsigned long iopm_base;
>
> struct kvm_ldttss_desc {
> @@ -614,6 +622,7 @@ static void init_vmcb(struct vcpu_svm *svm)
> force_new_asid(&svm->vcpu);
>
> svm->nested_hsave = 0;
> + svm->nested_vmcb = 0;
> svm->vcpu.arch.hflags = HF_GIF_MASK;
> }
>
> @@ -639,6 +648,10 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
> struct vcpu_svm *svm;
> struct page *page;
> struct page *msrpm_pages;
> + struct page *nested_msrpm_pages;
> +#ifdef NESTED_KVM_MERGE_IOPM
> + struct page *nested_iopm_pages;
> +#endif
> int err;
>
> svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
> @@ -661,9 +674,25 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
> msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
> if (!msrpm_pages)
> goto uninit;
> +
> + nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
> + if (!nested_msrpm_pages)
> + goto uninit;
> +
> +#ifdef NESTED_KVM_MERGE_IOPM
> + nested_iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
> + if (!nested_iopm_pages)
> + goto uninit;
> +#endif
> +
> svm->msrpm = page_address(msrpm_pages);
> svm_vcpu_init_msrpm(svm->msrpm);
>
> + svm->nested_msrpm = page_address(nested_msrpm_pages);
> +#ifdef NESTED_KVM_MERGE_IOPM
> + svm->nested_iopm = page_address(nested_iopm_pages);
> +#endif
> +
> svm->vmcb = page_address(page);
> clear_page(svm->vmcb);
> svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
> @@ -693,6 +722,10 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
>
> __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
> __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
> + __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER);
> +#ifdef NESTED_KVM_MERGE_IOPM
> + __free_pages(virt_to_page(svm->nested_iopm), IOPM_ALLOC_ORDER);
> +#endif
> kvm_vcpu_uninit(vcpu);
> kmem_cache_free(kvm_vcpu_cache, svm);
> }
> @@ -1230,6 +1263,138 @@ static int nested_svm_do(struct vcpu_svm *svm,
> return retval;
> }
>
> +
> +static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1,
> + void *arg2, void *opaque)
> +{
> + int i;
> + u32 *nested_msrpm = (u32*)arg1;
> + for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
> + svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
> + svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm);
> +
> + return 0;
> +}
> +
> +#ifdef NESTED_KVM_MERGE_IOPM
> +static int nested_svm_vmrun_iopm(struct vcpu_svm *svm, void *arg1,
> + void *arg2, void *opaque)
> +{
> + int i;
> + u32 *nested_iopm = (u32*)arg1;
> + u32 *iopm = (u32*)__va(iopm_base);
> + for (i=0; i< PAGE_SIZE * (1 << IOPM_ALLOC_ORDER) / 4; i++)
> + svm->nested_iopm[i] = iopm[i] | nested_iopm[i];
> + svm->vmcb->control.iopm_base_pa = __pa(svm->nested_iopm);
> +
> + return 0;
> +}
> +#endif
> +
> +static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
> + void *arg2, void *opaque)
> +{
> + struct vmcb *nested_vmcb = (struct vmcb *)arg1;
> + struct vmcb *hsave = (struct vmcb *)arg2;
> +
> + /* nested_vmcb is our indicator if nested SVM is activated */
> + svm->nested_vmcb = svm->vmcb->save.rax;
> +
> + /* Clear internal status */
> + svm->vcpu.arch.exception.pending = false;
> +
> + /* Save the old vmcb, so we don't need to pick what we save, but
> + can restore everything when a VMEXIT occurs */
> + memcpy(hsave, svm->vmcb, sizeof(struct vmcb));
This is a big security hole. With this we give the guest access to its
own VMCB. The guest can take over or crash the whole host machine by
rewriting its VMCB. We should be more selective what we save in the
hsave area.
Joerg
--
| AMD Saxony Limited Liability Company & Co. KG
Operating | Wilschdorfer Landstr. 101, 01109 Dresden, Germany
System | Register Court Dresden: HRA 4896
Research | General Partner authorized to represent:
Center | AMD Saxony LLC (Wilmington, Delaware, US)
| General Manager of AMD Saxony LLC: Dr. Hans-R. Deppe, Thomas McCoy
next prev parent reply other threads:[~2008-09-19 15:59 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-09-17 13:41 [PATCH 0/9] Add support for nested SVM (kernel) v3 Alexander Graf
2008-09-17 13:41 ` [PATCH 1/9] Add CPUID feature flag for SVM v3 Alexander Graf
2008-09-17 13:41 ` [PATCH 2/9] Clean up VINTR setting v3 Alexander Graf
2008-09-17 13:41 ` [PATCH 3/9] Add helper functions for nested SVM v3 Alexander Graf
2008-09-17 13:41 ` [PATCH 4/9] Implement GIF, clgi and stgi v3 Alexander Graf
2008-09-17 13:41 ` [PATCH 5/9] Implement hsave v3 Alexander Graf
2008-09-17 13:41 ` [PATCH 6/9] Add VMLOAD and VMSAVE handlers v3 Alexander Graf
2008-09-17 13:41 ` [PATCH 7/9] Add VMRUN handler v3 Alexander Graf
2008-09-17 13:41 ` [PATCH 8/9] Add VMEXIT handler and intercepts v3 Alexander Graf
2008-09-17 13:41 ` [PATCH 9/9] Allow setting the SVME bit v3 Alexander Graf
2008-09-19 15:59 ` Joerg Roedel [this message]
2008-09-25 17:32 ` [PATCH 7/9] Add VMRUN handler v3 Alexander Graf
2008-09-25 17:37 ` Joerg Roedel
2008-09-25 20:00 ` Alexander Graf
2008-09-25 21:22 ` joro
2008-09-27 12:59 ` Avi Kivity
2008-09-27 12:58 ` Avi Kivity
2008-09-25 18:47 ` [PATCH 4/9] Implement GIF, clgi and stgi v3 Joerg Roedel
2008-09-25 19:55 ` Alexander Graf
2008-09-25 21:27 ` Joerg Roedel
2008-09-26 9:01 ` Alexander Graf
2008-09-27 12:55 ` Avi Kivity
2008-09-27 12:52 ` Avi Kivity
2008-09-19 14:36 ` [PATCH 0/9] Add support for nested SVM (kernel) v3 Joerg Roedel
2008-09-19 14:39 ` Joerg Roedel
2008-09-19 15:56 ` Joerg Roedel
2008-10-15 17:07 ` Alexander Graf
2008-09-19 21:48 ` First performance numbers Joerg Roedel
2008-09-20 1:30 ` Avi Kivity
2008-09-20 6:55 ` Joerg Roedel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20080919155926.GR24392@amd.com \
--to=joerg.roedel@amd.com \
--cc=agraf@suse.de \
--cc=anthony@codemonkey.ws \
--cc=avi@qumranet.com \
--cc=joro@8bytes.org \
--cc=kvm@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).