From: Ian Jackson <ian.jackson@eu.citrix.com>
To: qemu-devel@nongnu.org, xen-devel@lists.xensource.com
Cc: Dongxiao Xu <dongxiao.xu@intel.com>,
Ian Jackson <ian.jackson@eu.citrix.com>,
Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Subject: [Qemu-devel] [PATCH 1/2] cpu_ioreq_pio, cpu_ioreq_move: introduce read_phys_req_item, write_phys_req_item
Date: Mon, 10 Dec 2012 18:04:22 +0000 [thread overview]
Message-ID: <1355162663-26956-2-git-send-email-ian.jackson@eu.citrix.com> (raw)
In-Reply-To: <1355162663-26956-1-git-send-email-ian.jackson@eu.citrix.com>
Replace a lot of formulaic multiplications (containing casts, no less)
with calls to a pair of functions. This encapsulates in a single
place the operations which require care relating to integer overflow.
Cc: Dongxiao Xu <dongxiao.xu@intel.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
---
xen-all.c | 73 ++++++++++++++++++++++++++++++++++++-------------------------
1 files changed, 43 insertions(+), 30 deletions(-)
diff --git a/xen-all.c b/xen-all.c
index 046cc2a..97c8ef4 100644
--- a/xen-all.c
+++ b/xen-all.c
@@ -682,11 +682,42 @@ static void do_outp(pio_addr_t addr,
}
}
-static void cpu_ioreq_pio(ioreq_t *req)
+/*
+ * Helper functions which read/write an object from/to physical guest
+ * memory, as part of the implementation of an ioreq.
+ *
+ * Equivalent to
+ * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
+ * val, req->size, 0/1)
+ * except without the integer overflow problems.
+ */
+static void rw_phys_req_item(hwaddr addr,
+ ioreq_t *req, uint32_t i, void *val, int rw)
+{
+ /* Do everything unsigned so overflow just results in a truncated result
+ * and accesses to undesired parts of guest memory, which is up
+ * to the guest */
+ hwaddr offset = (hwaddr)req->size * i;
+ if (req->df) addr -= offset;
+ else addr += offset;
+ cpu_physical_memory_rw(addr, val, req->size, rw);
+}
+
+static inline void read_phys_req_item(hwaddr addr,
+ ioreq_t *req, uint32_t i, void *val)
+{
+ rw_phys_req_item(addr, req, i, val, 0);
+}
+static inline void write_phys_req_item(hwaddr addr,
+ ioreq_t *req, uint32_t i, void *val)
{
- int i, sign;
+ rw_phys_req_item(addr, req, i, val, 1);
+}
- sign = req->df ? -1 : 1;
+
+static void cpu_ioreq_pio(ioreq_t *req)
+{
+ int i;
if (req->dir == IOREQ_READ) {
if (!req->data_is_ptr) {
@@ -696,9 +727,7 @@ static void cpu_ioreq_pio(ioreq_t *req)
for (i = 0; i < req->count; i++) {
tmp = do_inp(req->addr, req->size);
- cpu_physical_memory_write(
- req->data + (sign * i * (int64_t)req->size),
- (uint8_t *) &tmp, req->size);
+ write_phys_req_item(req->data, req, i, &tmp);
}
}
} else if (req->dir == IOREQ_WRITE) {
@@ -708,9 +737,7 @@ static void cpu_ioreq_pio(ioreq_t *req)
for (i = 0; i < req->count; i++) {
uint32_t tmp = 0;
- cpu_physical_memory_read(
- req->data + (sign * i * (int64_t)req->size),
- (uint8_t*) &tmp, req->size);
+ read_phys_req_item(req->data, req, i, &tmp);
do_outp(req->addr, req->size, tmp);
}
}
@@ -719,22 +746,16 @@ static void cpu_ioreq_pio(ioreq_t *req)
static void cpu_ioreq_move(ioreq_t *req)
{
- int i, sign;
-
- sign = req->df ? -1 : 1;
+ int i;
if (!req->data_is_ptr) {
if (req->dir == IOREQ_READ) {
for (i = 0; i < req->count; i++) {
- cpu_physical_memory_read(
- req->addr + (sign * i * (int64_t)req->size),
- (uint8_t *) &req->data, req->size);
+ read_phys_req_item(req->addr, req, i, &req->data);
}
} else if (req->dir == IOREQ_WRITE) {
for (i = 0; i < req->count; i++) {
- cpu_physical_memory_write(
- req->addr + (sign * i * (int64_t)req->size),
- (uint8_t *) &req->data, req->size);
+ write_phys_req_item(req->addr, req, i, &req->data);
}
}
} else {
@@ -742,21 +763,13 @@ static void cpu_ioreq_move(ioreq_t *req)
if (req->dir == IOREQ_READ) {
for (i = 0; i < req->count; i++) {
- cpu_physical_memory_read(
- req->addr + (sign * i * (int64_t)req->size),
- (uint8_t*) &tmp, req->size);
- cpu_physical_memory_write(
- req->data + (sign * i * (int64_t)req->size),
- (uint8_t*) &tmp, req->size);
+ read_phys_req_item(req->addr, req, i, &tmp);
+ write_phys_req_item(req->data, req, i, &tmp);
}
} else if (req->dir == IOREQ_WRITE) {
for (i = 0; i < req->count; i++) {
- cpu_physical_memory_read(
- req->data + (sign * i * (int64_t)req->size),
- (uint8_t*) &tmp, req->size);
- cpu_physical_memory_write(
- req->addr + (sign * i * (int64_t)req->size),
- (uint8_t*) &tmp, req->size);
+ read_phys_req_item(req->data, req, i, &tmp);
+ write_phys_req_item(req->addr, req, i, &tmp);
}
}
}
--
1.7.2.5
next prev parent reply other threads:[~2012-12-10 18:04 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-12-10 18:04 [Qemu-devel] [PATCH 0/2] cpu_ioreq_pio, cpu_ioreq_move: simply and fix Ian Jackson
2012-12-10 18:04 ` Ian Jackson [this message]
2012-12-10 19:29 ` [Qemu-devel] [PATCH 1/2] cpu_ioreq_pio, cpu_ioreq_move: introduce read_phys_req_item, write_phys_req_item Stefano Stabellini
2012-12-10 18:04 ` [Qemu-devel] [PATCH 2/2] cpu_ioreq_pio, cpu_ioreq_move: i should be uint32_t rather than int Ian Jackson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1355162663-26956-2-git-send-email-ian.jackson@eu.citrix.com \
--to=ian.jackson@eu.citrix.com \
--cc=dongxiao.xu@intel.com \
--cc=qemu-devel@nongnu.org \
--cc=stefano.stabellini@eu.citrix.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).