From: Jonathan Cameron via <qemu-devel@nongnu.org>
To: "Paolo Bonzini" <pbonzini@redhat.com>,
"Peter Xu" <peterx@redhat.com>,
"David Hildenbrand" <david@redhat.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
qemu-devel@nongnu.org
Cc: <linuxarm@huawei.com>
Subject: [PATCH 2/3] physmem: Factor out body of flatview_read/write_continue() loop
Date: Thu, 15 Feb 2024 14:28:16 +0000 [thread overview]
Message-ID: <20240215142817.1904-3-Jonathan.Cameron@huawei.com> (raw)
In-Reply-To: <20240215142817.1904-1-Jonathan.Cameron@huawei.com>
This code will be reused for the address_space_cached accessors
shortly.
Also reduce scope of result variable now we aren't directly
calling this in the loop.
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
---
system/physmem.c | 165 ++++++++++++++++++++++++++++-------------------
1 file changed, 98 insertions(+), 67 deletions(-)
diff --git a/system/physmem.c b/system/physmem.c
index 39b5ac751e..74f92bb3b8 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -2677,6 +2677,54 @@ static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs,
return false;
}
+static MemTxResult flatview_write_continue_step(hwaddr addr,
+ MemTxAttrs attrs,
+ const uint8_t *buf,
+ hwaddr len, hwaddr addr1,
+ hwaddr *l, MemoryRegion *mr)
+{
+ if (!flatview_access_allowed(mr, attrs, addr1, *l)) {
+ return MEMTX_ACCESS_ERROR;
+ }
+
+ if (!memory_access_is_direct(mr, true)) {
+ uint64_t val;
+ MemTxResult result;
+ bool release_lock = prepare_mmio_access(mr);
+
+ *l = memory_access_size(mr, *l, addr1);
+ /* XXX: could force current_cpu to NULL to avoid
+ potential bugs */
+
+ /*
+ * Assure Coverity (and ourselves) that we are not going to OVERRUN
+ * the buffer by following ldn_he_p().
+ */
+#ifdef QEMU_STATIC_ANALYSIS
+ assert((*l == 1 && len >= 1) ||
+ (*l == 2 && len >= 2) ||
+ (*l == 4 && len >= 4) ||
+ (*l == 8 && len >= 8));
+#endif
+ val = ldn_he_p(buf, *l);
+ result = memory_region_dispatch_write(mr, addr1, val,
+ size_memop(*l), attrs);
+ if (release_lock) {
+ bql_unlock();
+ }
+
+ return result;
+ } else {
+ /* RAM case */
+ uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, l, false);
+
+ memmove(ram_ptr, buf, *l);
+ invalidate_and_set_dirty(mr, addr1, *l);
+
+ return MEMTX_OK;
+ }
+}
+
/* Called within RCU critical section. */
static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
MemTxAttrs attrs,
@@ -2688,42 +2736,9 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
const uint8_t *buf = ptr;
for (;;) {
- if (!flatview_access_allowed(mr, attrs, addr1, l)) {
- result |= MEMTX_ACCESS_ERROR;
- /* Keep going. */
- } else if (!memory_access_is_direct(mr, true)) {
- uint64_t val;
- bool release_lock = prepare_mmio_access(mr);
-
- l = memory_access_size(mr, l, addr1);
- /* XXX: could force current_cpu to NULL to avoid
- potential bugs */
-
- /*
- * Assure Coverity (and ourselves) that we are not going to OVERRUN
- * the buffer by following ldn_he_p().
- */
-#ifdef QEMU_STATIC_ANALYSIS
- assert((l == 1 && len >= 1) ||
- (l == 2 && len >= 2) ||
- (l == 4 && len >= 4) ||
- (l == 8 && len >= 8));
-#endif
- val = ldn_he_p(buf, l);
- result |= memory_region_dispatch_write(mr, addr1, val,
- size_memop(l), attrs);
- if (release_lock) {
- bql_unlock();
- }
-
- } else {
- /* RAM case */
- uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l,
- false);
- memmove(ram_ptr, buf, l);
- invalidate_and_set_dirty(mr, addr1, l);
- }
+ result |= flatview_write_continue_step(addr, attrs, buf, len, addr1, &l,
+ mr);
len -= l;
buf += l;
@@ -2757,6 +2772,52 @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
addr1, l, mr);
}
+static MemTxResult flatview_read_continue_step(hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf,
+ hwaddr len, hwaddr addr1,
+ hwaddr *l,
+ MemoryRegion *mr)
+{
+ if (!flatview_access_allowed(mr, attrs, addr1, *l)) {
+ return MEMTX_ACCESS_ERROR;
+ }
+
+ if (!memory_access_is_direct(mr, false)) {
+ /* I/O case */
+ uint64_t val;
+ MemTxResult result;
+ bool release_lock = prepare_mmio_access(mr);
+
+ *l = memory_access_size(mr, *l, addr1);
+ result = memory_region_dispatch_read(mr, addr1, &val,
+ size_memop(*l), attrs);
+
+ /*
+ * Assure Coverity (and ourselves) that we are not going to OVERRUN
+ * the buffer by following stn_he_p().
+ */
+#ifdef QEMU_STATIC_ANALYSIS
+ assert((*l == 1 && len >= 1) ||
+ (*l == 2 && len >= 2) ||
+ (*l == 4 && len >= 4) ||
+ (*l == 8 && len >= 8));
+#endif
+ stn_he_p(buf, *l, val);
+
+ if (release_lock) {
+ bql_unlock();
+ }
+ return result;
+ } else {
+ /* RAM case */
+ uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, l, false);
+
+ memcpy(buf, ram_ptr, *l);
+
+ return MEMTX_OK;
+ }
+}
+
/* Called within RCU critical section. */
MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
MemTxAttrs attrs, void *ptr,
@@ -2768,38 +2829,8 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
fuzz_dma_read_cb(addr, len, mr);
for (;;) {
- if (!flatview_access_allowed(mr, attrs, addr1, l)) {
- result |= MEMTX_ACCESS_ERROR;
- /* Keep going. */
- } else if (!memory_access_is_direct(mr, false)) {
- /* I/O case */
- uint64_t val;
- bool release_lock = prepare_mmio_access(mr);
-
- l = memory_access_size(mr, l, addr1);
- result |= memory_region_dispatch_read(mr, addr1, &val,
- size_memop(l), attrs);
-
- /*
- * Assure Coverity (and ourselves) that we are not going to OVERRUN
- * the buffer by following stn_he_p().
- */
-#ifdef QEMU_STATIC_ANALYSIS
- assert((l == 1 && len >= 1) ||
- (l == 2 && len >= 2) ||
- (l == 4 && len >= 4) ||
- (l == 8 && len >= 8));
-#endif
- stn_he_p(buf, l, val);
- if (release_lock) {
- bql_unlock();
- }
- } else {
- /* RAM case */
- uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l,
- false);
- memcpy(buf, ram_ptr, l);
- }
+ result |= flatview_read_continue_step(addr, attrs, buf,
+ len, addr1, &l, mr);
len -= l;
buf += l;
--
2.39.2
next prev parent reply other threads:[~2024-02-15 14:29 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-15 14:28 [PATCH 0/3] physmem: Fix MemoryRegion for second access to cached MMIO Address Space Jonathan Cameron via
2024-02-15 14:28 ` [PATCH 1/3] physmem: Reduce local variable scope in flatview_read/write_continue() Jonathan Cameron via
2024-03-01 5:26 ` Peter Xu
2024-02-15 14:28 ` Jonathan Cameron via [this message]
2024-03-01 5:29 ` [PATCH 2/3] physmem: Factor out body of flatview_read/write_continue() loop Peter Xu
2024-03-01 5:35 ` Peter Xu
2024-03-07 14:09 ` Jonathan Cameron via
2024-02-15 14:28 ` [PATCH 3/3] physmem: Fix wrong MR in large address_space_read/write_cached_slow() Jonathan Cameron via
2024-03-01 5:44 ` Peter Xu
2024-03-07 14:51 ` Jonathan Cameron via
2024-02-29 10:49 ` [PATCH 0/3] physmem: Fix MemoryRegion for second access to cached MMIO Address Space Jonathan Cameron via
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240215142817.1904-3-Jonathan.Cameron@huawei.com \
--to=qemu-devel@nongnu.org \
--cc=Jonathan.Cameron@huawei.com \
--cc=david@redhat.com \
--cc=linuxarm@huawei.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=philmd@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).