From: Paolo Bonzini <pbonzini@redhat.com>
To: Lara Lazier <laramglazier@gmail.com>, qemu-devel@nongnu.org
Subject: Re: [PATCH] target/i386: VMRUN and VMLOAD canonicalizations
Date: Fri, 6 Aug 2021 16:06:02 +0200 [thread overview]
Message-ID: <75d0eb3d-839d-0707-3e6e-0a49557b5bfa@redhat.com> (raw)
In-Reply-To: <20210804113058.45186-1-laramglazier@gmail.com>
On 04/08/21 13:30, Lara Lazier wrote:
> APM2 requires that VMRUN and VMLOAD canonicalize (sign extend to 63
> from 48/57) all base addresses in the segment registers that have been
> respectively loaded.
>
> Signed-off-by: Lara Lazier <laramglazier@gmail.com>
> ---
> target/i386/cpu.c | 19 +++++++++++--------
> target/i386/cpu.h | 2 ++
> target/i386/tcg/sysemu/svm_helper.c | 27 +++++++++++++++++----------
> 3 files changed, 30 insertions(+), 18 deletions(-)
>
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index 71d26cf1bd..de4c8316c9 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -5108,6 +5108,15 @@ static void x86_register_cpudef_types(const X86CPUDefinition *def)
>
> }
>
> +uint32_t cpu_x86_virtual_addr_width(CPUX86State *env)
> +{
> + if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
> + return 57; /* 57 bits virtual */
> + } else {
> + return 48; /* 48 bits virtual */
> + }
> +}
> +
> void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
> uint32_t *eax, uint32_t *ebx,
> uint32_t *ecx, uint32_t *edx)
> @@ -5510,16 +5519,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
> break;
> case 0x80000008:
> /* virtual & phys address size in low 2 bytes. */
> + *eax = cpu->phys_bits;
> if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
> /* 64 bit processor */
> - *eax = cpu->phys_bits; /* configurable physical bits */
> - if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
> - *eax |= 0x00003900; /* 57 bits virtual */
> - } else {
> - *eax |= 0x00003000; /* 48 bits virtual */
> - }
> - } else {
> - *eax = cpu->phys_bits;
> + *eax |= (cpu_x86_virtual_addr_width(env) << 8);
> }
> *ebx = env->features[FEAT_8000_0008_EBX];
> if (cs->nr_cores * cs->nr_threads > 1) {
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index 6c50d3ab4f..c9c7350c76 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -1954,6 +1954,8 @@ typedef struct PropValue {
> } PropValue;
> void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
>
> +uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
> +
> /* cpu.c other functions (cpuid) */
> void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
> uint32_t *eax, uint32_t *ebx,
> diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
> index 6c29a6a778..032561ef8c 100644
> --- a/target/i386/tcg/sysemu/svm_helper.c
> +++ b/target/i386/tcg/sysemu/svm_helper.c
> @@ -41,6 +41,16 @@ static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
> ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
> }
>
> +/*
> + * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
> + * addresses in the segment registers that have been loaded.
> + */
> +static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
> +{
> + uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
> + *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
> +}
> +
> static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
> SegmentCache *sc)
> {
> @@ -53,6 +63,7 @@ static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
> sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
> flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
> sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
> + svm_canonicalization(env, &sc->base);
> }
>
> static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
> @@ -256,16 +267,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
> env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
> offsetof(struct vmcb, control.tsc_offset));
>
> - env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> - save.gdtr.base));
> - env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> - save.gdtr.limit));
> -
> - env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> - save.idtr.base));
> - env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> - save.idtr.limit));
> -
> new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
> if (new_cr0 & SVM_CR0_RESERVED_MASK) {
> cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
> @@ -319,6 +320,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
> R_SS);
> svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
> R_DS);
> + svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
> + &env->idt);
> + svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
> + &env->gdt);
>
> env->eip = x86_ldq_phys(cs,
> env->vm_vmcb + offsetof(struct vmcb, save.rip));
> @@ -456,6 +461,7 @@ void helper_vmload(CPUX86State *env, int aflag)
> env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
> env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
> env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
> + svm_canonicalization(env, &env->kernelgsbase);
> #endif
> env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
> env->sysenter_cs = x86_ldq_phys(cs,
> @@ -464,6 +470,7 @@ void helper_vmload(CPUX86State *env, int aflag)
> save.sysenter_esp));
> env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
> save.sysenter_eip));
> +
> }
>
> void helper_vmsave(CPUX86State *env, int aflag)
>
Queued, thanks.
Paolo
prev parent reply other threads:[~2021-08-06 14:07 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-04 11:30 [PATCH] target/i386: VMRUN and VMLOAD canonicalizations Lara Lazier
2021-08-06 14:06 ` Paolo Bonzini [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=75d0eb3d-839d-0707-3e6e-0a49557b5bfa@redhat.com \
--to=pbonzini@redhat.com \
--cc=laramglazier@gmail.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).