From mboxrd@z Thu Jan 1 00:00:00 1970 From: Andrew Cooper Subject: Re: [PATCH 3/5] x86/HVM: don't ignore hvm_copy_to_guest_phys() errors during I/O intercept Date: Tue, 8 Oct 2013 19:20:03 +0100 Message-ID: <52544CD3.5010709@citrix.com> References: <52498FFC02000078000F8068@nat28.tlf.novell.com> <5249918B02000078000F807F@nat28.tlf.novell.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="===============1136008464265564262==" Return-path: Received: from mail6.bemta5.messagelabs.com ([195.245.231.135]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1VTbtA-0003AF-Bf for xen-devel@lists.xenproject.org; Tue, 08 Oct 2013 18:20:10 +0000 In-Reply-To: <5249918B02000078000F807F@nat28.tlf.novell.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Jan Beulich Cc: xen-devel , Keir Fraser List-Id: xen-devel@lists.xenproject.org --===============1136008464265564262== Content-Type: multipart/alternative; boundary="------------000808040603020607070108" --------------000808040603020607070108 Content-Type: text/plain; charset="ISO-8859-1" Content-Transfer-Encoding: 7bit On 30/09/13 13:58, Jan Beulich wrote: > Building upon the extended retry logic we can now also make sure to > not ignore errors resulting from writing data back to guest memory. > > Signed-off-by: Jan Beulich > > --- a/xen/arch/x86/hvm/intercept.c > +++ b/xen/arch/x86/hvm/intercept.c > @@ -47,6 +47,7 @@ static int hvm_mmio_access(struct vcpu * > hvm_mmio_read_t read_handler, > hvm_mmio_write_t write_handler) > { > + struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io; struct vcpu is passed in to this function, which can be used in preference to current. (And is possibly fractionally faster unless the compiler can prove that v is always current) Reviewed-by: Andrew Cooper > unsigned long data; > int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size; > > @@ -54,7 +55,16 @@ static int hvm_mmio_access(struct vcpu * > { > if ( p->dir == IOREQ_READ ) > { > - rc = read_handler(v, p->addr, p->size, &data); > + if ( vio->mmio_retrying ) > + { > + if ( vio->mmio_large_read_bytes != p->size ) > + return X86EMUL_UNHANDLEABLE; > + memcpy(&data, vio->mmio_large_read, p->size); > + vio->mmio_large_read_bytes = 0; > + vio->mmio_retrying = 0; > + } > + else > + rc = read_handler(v, p->addr, p->size, &data); > p->data = data; > } > else /* p->dir == IOREQ_WRITE */ > @@ -66,18 +76,48 @@ static int hvm_mmio_access(struct vcpu * > { > for ( i = 0; i < p->count; i++ ) > { > - int ret; > - > - rc = read_handler(v, p->addr + step * i, p->size, &data); > - if ( rc != X86EMUL_OKAY ) > - break; > - ret = hvm_copy_to_guest_phys(p->data + step * i, &data, p->size); > - if ( (ret == HVMCOPY_gfn_paged_out) || > - (ret == HVMCOPY_gfn_shared) ) > + if ( vio->mmio_retrying ) > + { > + if ( vio->mmio_large_read_bytes != p->size ) > + return X86EMUL_UNHANDLEABLE; > + memcpy(&data, vio->mmio_large_read, p->size); > + vio->mmio_large_read_bytes = 0; > + vio->mmio_retrying = 0; > + } > + else > { > + rc = read_handler(v, p->addr + step * i, p->size, &data); > + if ( rc != X86EMUL_OKAY ) > + break; > + } > + switch ( hvm_copy_to_guest_phys(p->data + step * i, > + &data, p->size) ) > + { > + case HVMCOPY_okay: > + break; > + case HVMCOPY_gfn_paged_out: > + case HVMCOPY_gfn_shared: > rc = X86EMUL_RETRY; > break; > + case HVMCOPY_bad_gfn_to_mfn: > + /* Drop the write as real hardware would. */ > + continue; > + case HVMCOPY_bad_gva_to_gfn: > + ASSERT(0); > + /* fall through */ > + default: > + rc = X86EMUL_UNHANDLEABLE; > + break; > } > + if ( rc != X86EMUL_OKAY) > + break; > + } > + > + if ( rc == X86EMUL_RETRY ) > + { > + vio->mmio_retry = 1; > + vio->mmio_large_read_bytes = p->size; > + memcpy(vio->mmio_large_read, &data, p->size); > } > } > else > @@ -109,6 +149,9 @@ static int hvm_mmio_access(struct vcpu * > if ( rc != X86EMUL_OKAY ) > break; > } > + > + if ( rc == X86EMUL_RETRY ) > + vio->mmio_retry = 1; > } > > if ( i != 0 ) > @@ -137,6 +180,7 @@ int hvm_mmio_intercept(ioreq_t *p) > > static int process_portio_intercept(portio_action_t action, ioreq_t *p) > { > + struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io; > int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size; > uint32_t data; > > @@ -144,7 +188,16 @@ static int process_portio_intercept(port > { > if ( p->dir == IOREQ_READ ) > { > - rc = action(IOREQ_READ, p->addr, p->size, &data); > + if ( vio->mmio_retrying ) > + { > + if ( vio->mmio_large_read_bytes != p->size ) > + return X86EMUL_UNHANDLEABLE; > + memcpy(&data, vio->mmio_large_read, p->size); > + vio->mmio_large_read_bytes = 0; > + vio->mmio_retrying = 0; > + } > + else > + rc = action(IOREQ_READ, p->addr, p->size, &data); > p->data = data; > } > else > @@ -159,10 +212,48 @@ static int process_portio_intercept(port > { > for ( i = 0; i < p->count; i++ ) > { > - rc = action(IOREQ_READ, p->addr, p->size, &data); > - if ( rc != X86EMUL_OKAY ) > + if ( vio->mmio_retrying ) > + { > + if ( vio->mmio_large_read_bytes != p->size ) > + return X86EMUL_UNHANDLEABLE; > + memcpy(&data, vio->mmio_large_read, p->size); > + vio->mmio_large_read_bytes = 0; > + vio->mmio_retrying = 0; > + } > + else > + { > + rc = action(IOREQ_READ, p->addr, p->size, &data); > + if ( rc != X86EMUL_OKAY ) > + break; > + } > + switch ( hvm_copy_to_guest_phys(p->data + step * i, > + &data, p->size) ) > + { > + case HVMCOPY_okay: > + break; > + case HVMCOPY_gfn_paged_out: > + case HVMCOPY_gfn_shared: > + rc = X86EMUL_RETRY; > break; > - (void)hvm_copy_to_guest_phys(p->data + step * i, &data, p->size); > + case HVMCOPY_bad_gfn_to_mfn: > + /* Drop the write as real hardware would. */ > + continue; > + case HVMCOPY_bad_gva_to_gfn: > + ASSERT(0); > + /* fall through */ > + default: > + rc = X86EMUL_UNHANDLEABLE; > + break; > + } > + if ( rc != X86EMUL_OKAY) > + break; > + } > + > + if ( rc == X86EMUL_RETRY ) > + { > + vio->mmio_retry = 1; > + vio->mmio_large_read_bytes = p->size; > + memcpy(vio->mmio_large_read, &data, p->size); > } > } > else /* p->dir == IOREQ_WRITE */ > @@ -195,6 +286,9 @@ static int process_portio_intercept(port > if ( rc != X86EMUL_OKAY ) > break; > } > + > + if ( rc == X86EMUL_RETRY ) > + vio->mmio_retry = 1; > } > > if ( i != 0 ) > > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel --------------000808040603020607070108 Content-Type: text/html; charset="ISO-8859-1" Content-Transfer-Encoding: 7bit
On 30/09/13 13:58, Jan Beulich wrote:
Building upon the extended retry logic we can now also make sure to
not ignore errors resulting from writing data back to guest memory.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -47,6 +47,7 @@ static int hvm_mmio_access(struct vcpu *
                            hvm_mmio_read_t read_handler,
                            hvm_mmio_write_t write_handler)
 {
+    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;

struct vcpu is passed in to this function, which can be used in preference to current.  (And is possibly fractionally faster unless the compiler can prove that v is always current)

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

     unsigned long data;
     int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
 
@@ -54,7 +55,16 @@ static int hvm_mmio_access(struct vcpu *
     {
         if ( p->dir == IOREQ_READ )
         {
-            rc = read_handler(v, p->addr, p->size, &data);
+            if ( vio->mmio_retrying )
+            {
+                if ( vio->mmio_large_read_bytes != p->size )
+                    return X86EMUL_UNHANDLEABLE;
+                memcpy(&data, vio->mmio_large_read, p->size);
+                vio->mmio_large_read_bytes = 0;
+                vio->mmio_retrying = 0;
+            }
+            else
+                rc = read_handler(v, p->addr, p->size, &data);
             p->data = data;
         }
         else /* p->dir == IOREQ_WRITE */
@@ -66,18 +76,48 @@ static int hvm_mmio_access(struct vcpu *
     {
         for ( i = 0; i < p->count; i++ )
         {
-            int ret;
-
-            rc = read_handler(v, p->addr + step * i, p->size, &data);
-            if ( rc != X86EMUL_OKAY )
-                break;
-            ret = hvm_copy_to_guest_phys(p->data + step * i, &data, p->size);
-            if ( (ret == HVMCOPY_gfn_paged_out) || 
-                 (ret == HVMCOPY_gfn_shared) )
+            if ( vio->mmio_retrying )
+            {
+                if ( vio->mmio_large_read_bytes != p->size )
+                    return X86EMUL_UNHANDLEABLE;
+                memcpy(&data, vio->mmio_large_read, p->size);
+                vio->mmio_large_read_bytes = 0;
+                vio->mmio_retrying = 0;
+            }
+            else
             {
+                rc = read_handler(v, p->addr + step * i, p->size, &data);
+                if ( rc != X86EMUL_OKAY )
+                    break;
+            }
+            switch ( hvm_copy_to_guest_phys(p->data + step * i,
+                                            &data, p->size) )
+            {
+            case HVMCOPY_okay:
+                break;
+            case HVMCOPY_gfn_paged_out:
+            case HVMCOPY_gfn_shared:
                 rc = X86EMUL_RETRY;
                 break;
+            case HVMCOPY_bad_gfn_to_mfn:
+                /* Drop the write as real hardware would. */
+                continue;
+            case HVMCOPY_bad_gva_to_gfn:
+                ASSERT(0);
+                /* fall through */
+            default:
+                rc = X86EMUL_UNHANDLEABLE;
+                break;
             }
+            if ( rc != X86EMUL_OKAY)
+                break;
+        }
+
+        if ( rc == X86EMUL_RETRY )
+        {
+            vio->mmio_retry = 1;
+            vio->mmio_large_read_bytes = p->size;
+            memcpy(vio->mmio_large_read, &data, p->size);
         }
     }
     else
@@ -109,6 +149,9 @@ static int hvm_mmio_access(struct vcpu *
             if ( rc != X86EMUL_OKAY )
                 break;
         }
+
+        if ( rc == X86EMUL_RETRY )
+            vio->mmio_retry = 1;
     }
 
     if ( i != 0 )
@@ -137,6 +180,7 @@ int hvm_mmio_intercept(ioreq_t *p)
 
 static int process_portio_intercept(portio_action_t action, ioreq_t *p)
 {
+    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
     int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
     uint32_t data;
 
@@ -144,7 +188,16 @@ static int process_portio_intercept(port
     {
         if ( p->dir == IOREQ_READ )
         {
-            rc = action(IOREQ_READ, p->addr, p->size, &data);
+            if ( vio->mmio_retrying )
+            {
+                if ( vio->mmio_large_read_bytes != p->size )
+                    return X86EMUL_UNHANDLEABLE;
+                memcpy(&data, vio->mmio_large_read, p->size);
+                vio->mmio_large_read_bytes = 0;
+                vio->mmio_retrying = 0;
+            }
+            else
+                rc = action(IOREQ_READ, p->addr, p->size, &data);
             p->data = data;
         }
         else
@@ -159,10 +212,48 @@ static int process_portio_intercept(port
     {
         for ( i = 0; i < p->count; i++ )
         {
-            rc = action(IOREQ_READ, p->addr, p->size, &data);
-            if ( rc != X86EMUL_OKAY )
+            if ( vio->mmio_retrying )
+            {
+                if ( vio->mmio_large_read_bytes != p->size )
+                    return X86EMUL_UNHANDLEABLE;
+                memcpy(&data, vio->mmio_large_read, p->size);
+                vio->mmio_large_read_bytes = 0;
+                vio->mmio_retrying = 0;
+            }
+            else
+            {
+                rc = action(IOREQ_READ, p->addr, p->size, &data);
+                if ( rc != X86EMUL_OKAY )
+                    break;
+            }
+            switch ( hvm_copy_to_guest_phys(p->data + step * i,
+                                            &data, p->size) )
+            {
+            case HVMCOPY_okay:
+                break;
+            case HVMCOPY_gfn_paged_out:
+            case HVMCOPY_gfn_shared:
+                rc = X86EMUL_RETRY;
                 break;
-            (void)hvm_copy_to_guest_phys(p->data + step * i, &data, p->size);
+            case HVMCOPY_bad_gfn_to_mfn:
+                /* Drop the write as real hardware would. */
+                continue;
+            case HVMCOPY_bad_gva_to_gfn:
+                ASSERT(0);
+                /* fall through */
+            default:
+                rc = X86EMUL_UNHANDLEABLE;
+                break;
+            }
+            if ( rc != X86EMUL_OKAY)
+                break;
+        }
+
+        if ( rc == X86EMUL_RETRY )
+        {
+            vio->mmio_retry = 1;
+            vio->mmio_large_read_bytes = p->size;
+            memcpy(vio->mmio_large_read, &data, p->size);
         }
     }
     else /* p->dir == IOREQ_WRITE */
@@ -195,6 +286,9 @@ static int process_portio_intercept(port
             if ( rc != X86EMUL_OKAY )
                 break;
         }
+
+        if ( rc == X86EMUL_RETRY )
+            vio->mmio_retry = 1;
     }
 
     if ( i != 0 )




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

--------------000808040603020607070108-- --===============1136008464265564262== Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel --===============1136008464265564262==--