From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: "Kevin Tian" <kevin.tian@intel.com>,
"Wei Liu" <wei.liu2@citrix.com>,
"Jan Beulich" <JBeulich@suse.com>,
"Andrew Cooper" <andrew.cooper3@citrix.com>,
"Jun Nakajima" <jun.nakajima@intel.com>,
"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH 7/9] x86/vmx: Support load-only guest MSR list entries
Date: Tue, 22 May 2018 12:20:44 +0100 [thread overview]
Message-ID: <1526988046-22948-8-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1526988046-22948-1-git-send-email-andrew.cooper3@citrix.com>
Currently, the VMX_MSR_GUEST type maintains completely symmetric guest load
and save lists, by pointing VM_EXIT_MSR_STORE_ADDR and VM_ENTRY_MSR_LOAD_ADDR
at the same page, and setting VM_EXIT_MSR_STORE_COUNT and
VM_ENTRY_MSR_LOAD_COUNT to the same value.
However, for MSRs which we won't let the guest have direct access to, having
hardware save the current value on VMExit is unnecessary overhead.
To avoid this overhead, we must make the load and save lists asymmetric. By
making the entry load count greater than the exit store count, we can maintain
two adjacent lists of MSRs, the first of which is saved and restored, and the
second of which is only restored on VMEntry.
For simplicity:
* Both adjacent lists are still sorted by MSR index.
* It undefined behaviour to insert the same MSR into both lists.
* The total size of both lists is still limited at 256 entries (one 4k page).
Split the current msr_count field into msr_{load,save}_count, and introduce a
new VMX_MSR_GUEST_LOADONLY type, and update vmx_{add,find}_msr() to calculate
which sublist to search, based on type. VMX_MSR_HOST has no logical sublist,
whereas VMX_MSR_GUEST has a sublist between 0 and the save count, while
VMX_MSR_GUEST_LOADONLY has a sublist between the save count and the load
count.
One subtle point is that inserting an MSR into the load-save list involves
moving the entire load-only list, and updating both counts.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
CC: Wei Liu <wei.liu2@citrix.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
---
xen/arch/x86/hvm/vmx/vmcs.c | 46 +++++++++++++++++++++++++++++---------
xen/arch/x86/hvm/vmx/vmx.c | 2 +-
xen/include/asm-x86/hvm/vmx/vmcs.h | 4 +++-
3 files changed, 40 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index b75cc90..7bf19a0 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1313,7 +1313,7 @@ struct vmx_msr_entry *vmx_find_msr(struct vcpu *v, uint32_t msr,
{
struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
struct vmx_msr_entry *start = NULL, *ent, *end;
- unsigned int total;
+ unsigned int substart, subend, total;
ASSERT(v == current || !vcpu_runnable(v));
@@ -1321,12 +1321,23 @@ struct vmx_msr_entry *vmx_find_msr(struct vcpu *v, uint32_t msr,
{
case VMX_MSR_HOST:
start = arch_vmx->host_msr_area;
- total = arch_vmx->host_msr_count;
+ substart = 0;
+ subend = arch_vmx->host_msr_count;
+ total = subend;
break;
case VMX_MSR_GUEST:
start = arch_vmx->msr_area;
- total = arch_vmx->msr_count;
+ substart = 0;
+ subend = arch_vmx->msr_save_count;
+ total = arch_vmx->msr_load_count;
+ break;
+
+ case VMX_MSR_GUEST_LOADONLY:
+ start = arch_vmx->msr_area;
+ substart = arch_vmx->msr_save_count;
+ subend = arch_vmx->msr_load_count;
+ total = subend;
break;
default:
@@ -1337,7 +1348,7 @@ struct vmx_msr_entry *vmx_find_msr(struct vcpu *v, uint32_t msr,
return NULL;
end = start + total;
- ent = locate_msr_entry(start, end, msr);
+ ent = locate_msr_entry(start + substart, start + subend, msr);
return ((ent < end) && (ent->index == msr)) ? ent : NULL;
}
@@ -1347,7 +1358,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
{
struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;
- unsigned int total;
+ unsigned int substart, subend, total;
int rc;
ASSERT(v == current || !vcpu_runnable(v));
@@ -1356,12 +1367,23 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
{
case VMX_MSR_HOST:
ptr = &arch_vmx->host_msr_area;
- total = arch_vmx->host_msr_count;
+ substart = 0;
+ subend = arch_vmx->host_msr_count;
+ total = subend;
break;
case VMX_MSR_GUEST:
ptr = &arch_vmx->msr_area;
- total = arch_vmx->msr_count;
+ substart = 0;
+ subend = arch_vmx->msr_save_count;
+ total = arch_vmx->msr_load_count;
+ break;
+
+ case VMX_MSR_GUEST_LOADONLY:
+ ptr = &arch_vmx->msr_area;
+ substart = arch_vmx->msr_save_count;
+ subend = arch_vmx->msr_load_count;
+ total = subend;
break;
default:
@@ -1391,6 +1413,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
break;
case VMX_MSR_GUEST:
+ case VMX_MSR_GUEST_LOADONLY:
__vmwrite(VM_EXIT_MSR_STORE_ADDR, addr);
__vmwrite(VM_ENTRY_MSR_LOAD_ADDR, addr);
break;
@@ -1399,7 +1422,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
start = *ptr;
end = start + total;
- ent = locate_msr_entry(start, end, msr);
+ ent = locate_msr_entry(start + substart, start + subend, msr);
if ( (ent < end) && (ent->index == msr) )
goto found;
@@ -1423,8 +1446,11 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
break;
case VMX_MSR_GUEST:
- __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++arch_vmx->msr_count);
- __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, arch_vmx->msr_count);
+ __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++arch_vmx->msr_save_count);
+
+ /* Fallthrough */
+ case VMX_MSR_GUEST_LOADONLY:
+ __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, ++arch_vmx->msr_load_count);
break;
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 1783cd8..26e4206 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -4160,7 +4160,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
static void lbr_tsx_fixup(void)
{
struct vcpu *curr = current;
- unsigned int msr_count = curr->arch.hvm_vmx.msr_count;
+ unsigned int msr_count = curr->arch.hvm_vmx.msr_save_count;
struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
struct vmx_msr_entry *msr;
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index accd6fb..b0fccd2 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -139,7 +139,8 @@ struct arch_vmx_struct {
*/
struct vmx_msr_entry *msr_area;
struct vmx_msr_entry *host_msr_area;
- unsigned int msr_count;
+ unsigned int msr_load_count;
+ unsigned int msr_save_count;
unsigned int host_msr_count;
unsigned long eoi_exitmap_changed;
@@ -542,6 +543,7 @@ enum vmx_insn_errno
enum vmx_msr_list_type {
VMX_MSR_HOST,
VMX_MSR_GUEST,
+ VMX_MSR_GUEST_LOADONLY,
};
int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-05-22 11:20 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-05-22 11:20 [PATCH 0/9] x86/vmx: Don't leak EFER.NXE into guest context Andrew Cooper
2018-05-22 11:20 ` [PATCH 1/9] x86/vmx: API improvements for MSR load/save infrastructure Andrew Cooper
2018-05-23 16:01 ` Roger Pau Monné
2018-05-23 17:02 ` Andrew Cooper
2018-05-27 3:26 ` Tian, Kevin
2018-05-22 11:20 ` [PATCH 2/9] x86/vmx: Internal cleanup " Andrew Cooper
2018-05-23 16:28 ` Roger Pau Monné
2018-05-23 16:54 ` Andrew Cooper
2018-05-24 14:45 ` Jan Beulich
2018-05-27 3:30 ` Tian, Kevin
2018-05-22 11:20 ` [PATCH 3/9] x86/vmx: Factor locate_msr_entry() out of vmx_find_msr() and vmx_add_msr() Andrew Cooper
2018-05-23 16:39 ` Roger Pau Monné
2018-05-23 16:55 ` Andrew Cooper
2018-05-24 10:53 ` Roger Pau Monné
2018-05-24 10:59 ` Andrew Cooper
2018-05-24 12:16 ` Roger Pau Monné
2018-05-27 3:38 ` Tian, Kevin
2018-05-22 11:20 ` [PATCH 4/9] x86/vmx: Support remote access to the MSR lists Andrew Cooper
2018-05-24 11:50 ` Roger Pau Monné
2018-05-24 12:03 ` Andrew Cooper
2018-05-24 14:53 ` Jan Beulich
2018-05-27 3:47 ` Tian, Kevin
2018-05-28 15:15 ` Andrew Cooper
2018-05-22 11:20 ` [PATCH 5/9] x86/vmx: Fix handing of MSR_DEBUGCTL on VMExit Andrew Cooper
2018-05-22 12:53 ` Andrew Cooper
2018-05-24 12:14 ` Roger Pau Monné
2018-05-24 12:39 ` Andrew Cooper
2018-05-24 13:53 ` Jan Beulich
2018-05-24 15:08 ` Jan Beulich
2018-05-24 15:51 ` Andrew Cooper
2018-05-27 3:56 ` Tian, Kevin
2018-05-28 15:30 ` Andrew Cooper
2018-05-22 11:20 ` [PATCH 6/9] x86/vmx: Pass an MSR value into vmx_msr_add() Andrew Cooper
2018-05-24 15:12 ` Jan Beulich
2018-05-30 18:09 ` Andrew Cooper
2018-05-22 11:20 ` Andrew Cooper [this message]
2018-05-24 15:19 ` [PATCH 7/9] x86/vmx: Support load-only guest MSR list entries Jan Beulich
2018-05-24 15:37 ` Roger Pau Monné
2018-05-22 11:20 ` [PATCH 8/9] x86/vmx: Support removing MSRs from the host/guest load/save lists Andrew Cooper
2018-05-24 15:42 ` Roger Pau Monné
2018-05-24 15:45 ` Andrew Cooper
2018-05-22 11:20 ` [PATCH 9/9] x86/vmx: Don't leak EFER.NXE into guest context Andrew Cooper
2018-05-24 16:01 ` Roger Pau Monné
2018-05-24 16:48 ` Andrew Cooper
2018-05-25 7:27 ` Jan Beulich
2018-05-25 8:03 ` Andrew Cooper
2018-05-25 6:23 ` Tim Deegan
2018-05-25 7:49 ` Jan Beulich
2018-05-25 8:36 ` Andrew Cooper
2018-05-25 11:36 ` Jan Beulich
2018-05-25 11:48 ` Andrew Cooper
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1526988046-22948-8-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=jun.nakajima@intel.com \
--cc=kevin.tian@intel.com \
--cc=roger.pau@citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).