From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xen.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>,
Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v2] Fix ioreq-server event channel vulnerability
Date: Thu, 24 Jul 2014 10:18:34 +0100 [thread overview]
Message-ID: <1406193514-9008-1-git-send-email-paul.durrant@citrix.com> (raw)
The code in hvm_send_assist_req_to_ioreq_server() and hvm_do_resume() uses
an event channel port number taken from the page of memory shared with the
emulator. This allows an emulator to corrupt values that are then blindly
used by Xen, leading to assertion failures in some cases. Moreover, in the
case of the default ioreq server the page remains in the guest p2m so a
malicious guest could similarly corrupt those values.
This patch changes the afforementioned functions to get the event channel
port number from an internal structure and also adds an extra check to
hvm_send_assist_req_to_ioreq_server() which will crash the domain should the
guest or an emulator corrupt the port number in the shared page.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reported-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
v2:
- hvm_wait_for_io() now returns a bool_t as suggested by Andrew Cooper
xen/arch/x86/hvm/hvm.c | 113 +++++++++++++++++++++++++++++++++---------------
1 file changed, 77 insertions(+), 36 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ef2411c..a573c59 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -392,6 +392,33 @@ bool_t hvm_io_pending(struct vcpu *v)
return 0;
}
+static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
+{
+ /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
+ while ( p->state != STATE_IOREQ_NONE )
+ {
+ switch ( p->state )
+ {
+ case STATE_IORESP_READY: /* IORESP_READY -> NONE */
+ rmb(); /* see IORESP_READY /then/ read contents of ioreq */
+ hvm_io_assist(p);
+ break;
+ case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
+ case STATE_IOREQ_INPROCESS:
+ wait_on_xen_event_channel(sv->ioreq_evtchn,
+ (p->state != STATE_IOREQ_READY) &&
+ (p->state != STATE_IOREQ_INPROCESS));
+ break;
+ default:
+ gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
+ domain_crash(sv->vcpu->domain);
+ return 0; /* bail */
+ }
+ }
+
+ return 1;
+}
+
void hvm_do_resume(struct vcpu *v)
{
struct domain *d = v->domain;
@@ -406,27 +433,18 @@ void hvm_do_resume(struct vcpu *v)
&d->arch.hvm_domain.ioreq_server.list,
list_entry )
{
- ioreq_t *p = get_ioreq(s, v);
+ struct hvm_ioreq_vcpu *sv;
- /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
- while ( p->state != STATE_IOREQ_NONE )
+ list_for_each_entry ( sv,
+ &s->ioreq_vcpu_list,
+ list_entry )
{
- switch ( p->state )
+ if ( sv->vcpu == v )
{
- case STATE_IORESP_READY: /* IORESP_READY -> NONE */
- rmb(); /* see IORESP_READY /then/ read contents of ioreq */
- hvm_io_assist(p);
- break;
- case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
- case STATE_IOREQ_INPROCESS:
- wait_on_xen_event_channel(p->vp_eport,
- (p->state != STATE_IOREQ_READY) &&
- (p->state != STATE_IOREQ_INPROCESS));
+ if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) )
+ return;
+
break;
- default:
- gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
- domain_crash(d);
- return; /* bail */
}
}
}
@@ -2545,35 +2563,58 @@ bool_t hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
- ioreq_t *p;
+ struct hvm_ioreq_vcpu *sv;
if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
return 0; /* implicitly bins the i/o operation */
- p = get_ioreq(s, curr);
-
- if ( unlikely(p->state != STATE_IOREQ_NONE) )
+ list_for_each_entry ( sv,
+ &s->ioreq_vcpu_list,
+ list_entry )
{
- /* This indicates a bug in the device model. Crash the domain. */
- gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
- domain_crash(d);
- return 0;
- }
+ if ( sv->vcpu == curr )
+ {
+ evtchn_port_t port = sv->ioreq_evtchn;
+ ioreq_t *p = get_ioreq(s, curr);
+
+ if ( unlikely(p->state != STATE_IOREQ_NONE) )
+ {
+ gdprintk(XENLOG_ERR,
+ "Device model set bad IO state %d.\n",
+ p->state);
+ goto crash;
+ }
+
+ if ( unlikely(p->vp_eport != port) )
+ {
+ gdprintk(XENLOG_ERR,
+ "Device model set bad event channel %d.\n",
+ p->vp_eport);
+ goto crash;
+ }
- proto_p->state = STATE_IOREQ_NONE;
- proto_p->vp_eport = p->vp_eport;
- *p = *proto_p;
+ proto_p->state = STATE_IOREQ_NONE;
+ proto_p->vp_eport = port;
+ *p = *proto_p;
- prepare_wait_on_xen_event_channel(p->vp_eport);
+ prepare_wait_on_xen_event_channel(port);
- /*
- * Following happens /after/ blocking and setting up ioreq contents.
- * prepare_wait_on_xen_event_channel() is an implicit barrier.
- */
- p->state = STATE_IOREQ_READY;
- notify_via_xen_event_channel(d, p->vp_eport);
+ /*
+ * Following happens /after/ blocking and setting up ioreq
+ * contents. prepare_wait_on_xen_event_channel() is an implicit
+ * barrier.
+ */
+ p->state = STATE_IOREQ_READY;
+ notify_via_xen_event_channel(d, port);
+ break;
+ }
+ }
return 1;
+
+ crash:
+ domain_crash(d);
+ return 0;
}
bool_t hvm_send_assist_req(ioreq_t *p)
--
1.7.10.4
reply other threads:[~2014-07-24 9:18 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1406193514-9008-1-git-send-email-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).