From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: stefanha@redhat.com, famz@redhat.com, mst@redhat.com,
borntraeger@de.ibm.com
Subject: [Qemu-devel] [PATCH 01/11] exec: optimize remaining address_space_* cases
Date: Mon, 12 Dec 2016 12:18:47 +0100 [thread overview]
Message-ID: <20161212111857.23399-2-pbonzini@redhat.com> (raw)
In-Reply-To: <20161212111857.23399-1-pbonzini@redhat.com>
Do them right before the next patch generalizes them into a multi-included
file.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
exec.c | 126 +++++++++++++++++++++++++++++++++++++++++++++++++++++------------
1 file changed, 103 insertions(+), 23 deletions(-)
diff --git a/exec.c b/exec.c
index 08c558e..4db0ce5 100644
--- a/exec.c
+++ b/exec.c
@@ -3243,17 +3243,37 @@ uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
}
-/* XXX: optimize */
uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs, MemTxResult *result)
{
- uint8_t val;
+ uint8_t *ptr;
+ uint64_t val;
+ MemoryRegion *mr;
+ hwaddr l = 1;
+ hwaddr addr1;
MemTxResult r;
+ bool release_lock = false;
- r = address_space_rw(as, addr, attrs, &val, 1, 0);
+ rcu_read_lock();
+ mr = address_space_translate(as, addr, &addr1, &l, false);
+ if (!memory_access_is_direct(mr, false)) {
+ release_lock |= prepare_mmio_access(mr);
+
+ /* I/O case */
+ r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs);
+ } else {
+ /* RAM case */
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
+ val = ldub_p(ptr);
+ r = MEMTX_OK;
+ }
if (result) {
*result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
+ rcu_read_unlock();
return val;
}
@@ -3493,17 +3513,35 @@ void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
-/* XXX: optimize */
void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
MemTxAttrs attrs, MemTxResult *result)
{
- uint8_t v = val;
+ uint8_t *ptr;
+ MemoryRegion *mr;
+ hwaddr l = 1;
+ hwaddr addr1;
MemTxResult r;
+ bool release_lock = false;
- r = address_space_rw(as, addr, attrs, &v, 1, 1);
+ rcu_read_lock();
+ mr = address_space_translate(as, addr, &addr1, &l, true);
+ if (!memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+ r = memory_region_dispatch_write(mr, addr1, val, 1, attrs);
+ } else {
+ /* RAM case */
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
+ stb_p(ptr, val);
+ invalidate_and_set_dirty(mr, addr1, 1);
+ r = MEMTX_OK;
+ }
if (result) {
*result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
+ rcu_read_unlock();
}
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
@@ -3602,37 +3640,79 @@ void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
-/* XXX: optimize */
-void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result)
+static inline void address_space_stq_internal(AddressSpace *as,
+ hwaddr addr, uint64_t val,
+ MemTxAttrs attrs,
+ MemTxResult *result,
+ enum device_endian endian)
{
+ uint8_t *ptr;
+ MemoryRegion *mr;
+ hwaddr l = 8;
+ hwaddr addr1;
MemTxResult r;
- val = tswap64(val);
- r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
+ bool release_lock = false;
+
+ rcu_read_lock();
+ mr = address_space_translate(as, addr, &addr1, &l, true);
+ if (l < 8 || !memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+
+#if defined(TARGET_WORDS_BIGENDIAN)
+ if (endian == DEVICE_LITTLE_ENDIAN) {
+ val = bswap64(val);
+ }
+#else
+ if (endian == DEVICE_BIG_ENDIAN) {
+ val = bswap64(val);
+ }
+#endif
+ r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
+ } else {
+ /* RAM case */
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ stq_le_p(ptr, val);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ stq_be_p(ptr, val);
+ break;
+ default:
+ stq_p(ptr, val);
+ break;
+ }
+ invalidate_and_set_dirty(mr, addr1, 8);
+ r = MEMTX_OK;
+ }
if (result) {
*result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
+ rcu_read_unlock();
+}
+
+void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ address_space_stq_internal(as, addr, val, attrs, result,
+ DEVICE_NATIVE_ENDIAN);
}
void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
MemTxAttrs attrs, MemTxResult *result)
{
- MemTxResult r;
- val = cpu_to_le64(val);
- r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
- if (result) {
- *result = r;
- }
+ address_space_stq_internal(as, addr, val, attrs, result,
+ DEVICE_LITTLE_ENDIAN);
}
+
void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
MemTxAttrs attrs, MemTxResult *result)
{
- MemTxResult r;
- val = cpu_to_be64(val);
- r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
- if (result) {
- *result = r;
- }
+ address_space_stq_internal(as, addr, val, attrs, result,
+ DEVICE_BIG_ENDIAN);
}
void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
--
1.8.3.1
next prev parent reply other threads:[~2016-12-12 11:19 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-12-12 11:18 [Qemu-devel] [RFC PATCH 00/11] speedup vring processing with MemoryRegionCaches Paolo Bonzini
2016-12-12 11:18 ` Paolo Bonzini [this message]
2016-12-12 13:27 ` [Qemu-devel] [PATCH 01/11] exec: optimize remaining address_space_* cases Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 02/11] exec: introduce memory_ldst.inc.c Paolo Bonzini
2016-12-12 13:44 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 03/11] exec: introduce address_space_extend_translation Paolo Bonzini
2016-12-12 13:47 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 04/11] exec: introduce MemoryRegionCache Paolo Bonzini
2016-12-12 14:06 ` Stefan Hajnoczi
2016-12-13 13:14 ` Paolo Bonzini
2016-12-12 11:18 ` [Qemu-devel] [PATCH 05/11] virtio: make virtio_should_notify static Paolo Bonzini
2016-12-12 14:07 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 06/11] virtio: add virtio_*_phys_cached Paolo Bonzini
2016-12-12 14:08 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 07/11] virtio: use address_space_map/unmap to access descriptors Paolo Bonzini
2016-12-12 14:12 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 08/11] virtio: use MemoryRegionCache " Paolo Bonzini
2016-12-12 14:17 ` Stefan Hajnoczi
2016-12-13 11:14 ` Paolo Bonzini
2016-12-12 11:18 ` [Qemu-devel] [PATCH 09/11] virtio: add MemoryListener to cache ring translations Paolo Bonzini
2016-12-12 14:24 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 10/11] virtio: use VRingMemoryRegionCaches for descriptor ring Paolo Bonzini
2016-12-12 16:06 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 11/11] virtio: use VRingMemoryRegionCaches for avail and used rings Paolo Bonzini
2016-12-12 16:08 ` Stefan Hajnoczi
2016-12-12 16:11 ` [Qemu-devel] [RFC PATCH 00/11] speedup vring processing with MemoryRegionCaches Stefan Hajnoczi
2016-12-13 12:56 ` Christian Borntraeger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20161212111857.23399-2-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=borntraeger@de.ibm.com \
--cc=famz@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).