From mboxrd@z Thu Jan 1 00:00:00 1970 From: Andrew Cooper Subject: Re: [PATCH 5/5] x86/HVM: cache emulated instruction for retry processing Date: Thu, 10 Oct 2013 12:35:31 +0100 Message-ID: <52569103.60308@citrix.com> References: <52498FFC02000078000F8068@nat28.tlf.novell.com> <524991D902000078000F8087@nat28.tlf.novell.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="===============2680845657831380026==" Return-path: In-Reply-To: <524991D902000078000F8087@nat28.tlf.novell.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org, Jan Beulich List-Id: xen-devel@lists.xenproject.org --===============2680845657831380026== Content-Type: multipart/alternative; boundary="------------020805010402040308040605" --------------020805010402040308040605 Content-Type: text/plain; charset="ISO-8859-1" Content-Transfer-Encoding: 7bit On 30/09/13 13:59, Jan Beulich wrote: > Rather than re-reading the instruction bytes upon retry processing, > stash away and re-use tha what we already read. That way we can be > certain that the retry won't do something different from what requested > the retry, getting once again closer to real hardware behavior (where > what we use retries for is simply a bus operation, not involving > redundant decoding of instructions). > > Signed-off-by: Jan Beulich Reviewed-by: Andrew Cooper It occurs to me that this caching is also needed in the shadow emulation path, but that is probably better in a separate patch/series than polluting this HVM series. > > --- a/xen/arch/x86/hvm/emulate.c > +++ b/xen/arch/x86/hvm/emulate.c > @@ -569,9 +569,19 @@ static int hvmemul_insn_fetch( > > /* Fall back if requested bytes are not in the prefetch cache. */ > if ( unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) ) > - return __hvmemul_read( > - seg, offset, p_data, bytes, > - hvm_access_insn_fetch, hvmemul_ctxt); > + { > + int rc = __hvmemul_read(seg, offset, p_data, bytes, > + hvm_access_insn_fetch, hvmemul_ctxt); > + > + if ( rc == X86EMUL_OKAY ) > + { > + ASSERT(insn_off + bytes <= sizeof(hvmemul_ctxt->insn_buf)); > + memcpy(&hvmemul_ctxt->insn_buf[insn_off], p_data, bytes); > + hvmemul_ctxt->insn_buf_bytes = insn_off + bytes; > + } > + > + return rc; > + } > > /* Hit the cache. Simple memcpy. */ > memcpy(p_data, &hvmemul_ctxt->insn_buf[insn_off], bytes); > @@ -1148,17 +1158,27 @@ int hvm_emulate_one( > pfec |= PFEC_user_mode; > > hvmemul_ctxt->insn_buf_eip = regs->eip; > - hvmemul_ctxt->insn_buf_bytes = > - hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) > - ? : > - (hvm_virtual_to_linear_addr( > - x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs], > - regs->eip, sizeof(hvmemul_ctxt->insn_buf), > - hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) && > - !hvm_fetch_from_guest_virt_nofault( > - hvmemul_ctxt->insn_buf, addr, > - sizeof(hvmemul_ctxt->insn_buf), pfec)) > - ? sizeof(hvmemul_ctxt->insn_buf) : 0; > + if ( !vio->mmio_insn_bytes ) > + { > + hvmemul_ctxt->insn_buf_bytes = > + hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?: > + (hvm_virtual_to_linear_addr(x86_seg_cs, > + &hvmemul_ctxt->seg_reg[x86_seg_cs], > + regs->eip, > + sizeof(hvmemul_ctxt->insn_buf), > + hvm_access_insn_fetch, > + hvmemul_ctxt->ctxt.addr_size, > + &addr) && > + hvm_fetch_from_guest_virt_nofault(hvmemul_ctxt->insn_buf, addr, > + sizeof(hvmemul_ctxt->insn_buf), > + pfec) == HVMCOPY_okay) ? > + sizeof(hvmemul_ctxt->insn_buf) : 0; > + } > + else > + { > + hvmemul_ctxt->insn_buf_bytes = vio->mmio_insn_bytes; > + memcpy(hvmemul_ctxt->insn_buf, vio->mmio_insn, vio->mmio_insn_bytes); > + } > > hvmemul_ctxt->exn_pending = 0; > vio->mmio_retrying = vio->mmio_retry; > @@ -1169,7 +1189,16 @@ int hvm_emulate_one( > if ( rc == X86EMUL_OKAY && vio->mmio_retry ) > rc = X86EMUL_RETRY; > if ( rc != X86EMUL_RETRY ) > + { > vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0; > + vio->mmio_insn_bytes = 0; > + } > + else > + { > + BUILD_BUG_ON(sizeof(vio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf)); > + vio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes; > + memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes); > + } > > if ( rc != X86EMUL_OKAY ) > return rc; > --- a/xen/include/asm-x86/hvm/vcpu.h > +++ b/xen/include/asm-x86/hvm/vcpu.h > @@ -66,6 +66,9 @@ struct hvm_vcpu_io { > /* We may write up to m256 as a number of device-model transactions. */ > unsigned int mmio_large_write_bytes; > paddr_t mmio_large_write_pa; > + /* For retries we shouldn't re-fetch the instruction. */ > + unsigned int mmio_insn_bytes; > + unsigned char mmio_insn[16]; > /* > * For string instruction emulation we need to be able to signal a > * necessary retry through other than function return codes. > > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel --------------020805010402040308040605 Content-Type: text/html; charset="ISO-8859-1" Content-Transfer-Encoding: 7bit
On 30/09/13 13:59, Jan Beulich wrote:
Rather than re-reading the instruction bytes upon retry processing,
stash away and re-use tha what we already read. That way we can be
certain that the retry won't do something different from what requested
the retry, getting once again closer to real hardware behavior (where
what we use retries for is simply a bus operation, not involving
redundant decoding of instructions).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

It occurs to me that this caching is also needed in the shadow emulation path, but that is probably better in a separate patch/series than polluting this HVM series.


--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -569,9 +569,19 @@ static int hvmemul_insn_fetch(
 
     /* Fall back if requested bytes are not in the prefetch cache. */
     if ( unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
-        return __hvmemul_read(
-            seg, offset, p_data, bytes,
-            hvm_access_insn_fetch, hvmemul_ctxt);
+    {
+        int rc = __hvmemul_read(seg, offset, p_data, bytes,
+                                hvm_access_insn_fetch, hvmemul_ctxt);
+
+        if ( rc == X86EMUL_OKAY )
+        {
+            ASSERT(insn_off + bytes <= sizeof(hvmemul_ctxt->insn_buf));
+            memcpy(&hvmemul_ctxt->insn_buf[insn_off], p_data, bytes);
+            hvmemul_ctxt->insn_buf_bytes = insn_off + bytes;
+        }
+
+        return rc;
+    }
 
     /* Hit the cache. Simple memcpy. */
     memcpy(p_data, &hvmemul_ctxt->insn_buf[insn_off], bytes);
@@ -1148,17 +1158,27 @@ int hvm_emulate_one(
         pfec |= PFEC_user_mode;
 
     hvmemul_ctxt->insn_buf_eip = regs->eip;
-    hvmemul_ctxt->insn_buf_bytes =
-        hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf)
-        ? :
-        (hvm_virtual_to_linear_addr(
-            x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs],
-            regs->eip, sizeof(hvmemul_ctxt->insn_buf),
-            hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) &&
-         !hvm_fetch_from_guest_virt_nofault(
-             hvmemul_ctxt->insn_buf, addr,
-             sizeof(hvmemul_ctxt->insn_buf), pfec))
-        ? sizeof(hvmemul_ctxt->insn_buf) : 0;
+    if ( !vio->mmio_insn_bytes )
+    {
+        hvmemul_ctxt->insn_buf_bytes =
+            hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?:
+            (hvm_virtual_to_linear_addr(x86_seg_cs,
+                                        &hvmemul_ctxt->seg_reg[x86_seg_cs],
+                                        regs->eip,
+                                        sizeof(hvmemul_ctxt->insn_buf),
+                                        hvm_access_insn_fetch,
+                                        hvmemul_ctxt->ctxt.addr_size,
+                                        &addr) &&
+             hvm_fetch_from_guest_virt_nofault(hvmemul_ctxt->insn_buf, addr,
+                                               sizeof(hvmemul_ctxt->insn_buf),
+                                               pfec) == HVMCOPY_okay) ?
+            sizeof(hvmemul_ctxt->insn_buf) : 0;
+    }
+    else
+    {
+        hvmemul_ctxt->insn_buf_bytes = vio->mmio_insn_bytes;
+        memcpy(hvmemul_ctxt->insn_buf, vio->mmio_insn, vio->mmio_insn_bytes);
+    }
 
     hvmemul_ctxt->exn_pending = 0;
     vio->mmio_retrying = vio->mmio_retry;
@@ -1169,7 +1189,16 @@ int hvm_emulate_one(
     if ( rc == X86EMUL_OKAY && vio->mmio_retry )
         rc = X86EMUL_RETRY;
     if ( rc != X86EMUL_RETRY )
+    {
         vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
+        vio->mmio_insn_bytes = 0;
+    }
+    else
+    {
+        BUILD_BUG_ON(sizeof(vio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf));
+        vio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes;
+        memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes);
+    }
 
     if ( rc != X86EMUL_OKAY )
         return rc;
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -66,6 +66,9 @@ struct hvm_vcpu_io {
     /* We may write up to m256 as a number of device-model transactions. */
     unsigned int mmio_large_write_bytes;
     paddr_t mmio_large_write_pa;
+    /* For retries we shouldn't re-fetch the instruction. */
+    unsigned int mmio_insn_bytes;
+    unsigned char mmio_insn[16];
     /*
      * For string instruction emulation we need to be able to signal a
      * necessary retry through other than function return codes.




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

--------------020805010402040308040605-- --===============2680845657831380026== Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel --===============2680845657831380026==--