From: Avi Kivity <avi@redhat.com>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH 03/16] memory: move mmio access to functions
Date: Mon, 2 Jan 2012 18:33:22 +0200 [thread overview]
Message-ID: <1325522015-503-4-git-send-email-avi@redhat.com> (raw)
In-Reply-To: <1325522015-503-1-git-send-email-avi@redhat.com>
Currently mmio access goes directly to the io_mem_{read,write} arrays.
In preparation for eliminating them, add indirection via a function.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
exec-all.h | 7 ++++-
exec.c | 54 ++++++++++++++++++++++++++--------------------------
memory.c | 13 ++++++++++++
softmmu_template.h | 20 +++++++++---------
4 files changed, 55 insertions(+), 39 deletions(-)
diff --git a/exec-all.h b/exec-all.h
index c211242..23c4598 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -299,8 +299,11 @@ extern void *tci_tb_ptr;
#if !defined(CONFIG_USER_ONLY)
-extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
-extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
+uint64_t io_mem_read(int index, target_phys_addr_t addr, unsigned size);
+void io_mem_write(int index, target_phys_addr_t addr, uint64_t value,
+ unsigned size);
+extern CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
+extern CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
diff --git a/exec.c b/exec.c
index d21413a..8a3f621 100644
--- a/exec.c
+++ b/exec.c
@@ -205,8 +205,8 @@
static void memory_map_init(void);
/* io memory support */
-CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
-CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
+CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
+CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
static char io_mem_used[IO_MEM_NB_ENTRIES];
static int io_mem_watch;
@@ -3350,7 +3350,7 @@ static inline uint32_t subpage_readlen (subpage_t *mmio,
addr += mmio->region_offset[idx];
idx = mmio->sub_io_index[idx];
- return io_mem_read[idx][len](io_mem_opaque[idx], addr);
+ return io_mem_read(idx, addr, 1 <<len);
}
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
@@ -3364,7 +3364,7 @@ static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
addr += mmio->region_offset[idx];
idx = mmio->sub_io_index[idx];
- io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
+ io_mem_write(idx, addr, value, 1 << len);
}
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
@@ -3553,11 +3553,11 @@ static int cpu_register_io_memory_fixed(int io_index,
}
for (i = 0; i < 3; ++i) {
- io_mem_read[io_index][i]
+ _io_mem_read[io_index][i]
= (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
}
for (i = 0; i < 3; ++i) {
- io_mem_write[io_index][i]
+ _io_mem_write[io_index][i]
= (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
}
io_mem_opaque[io_index] = opaque;
@@ -3578,8 +3578,8 @@ void cpu_unregister_io_memory(int io_table_address)
int io_index = io_table_address >> IO_MEM_SHIFT;
for (i=0;i < 3; i++) {
- io_mem_read[io_index][i] = unassigned_mem_read[i];
- io_mem_write[io_index][i] = unassigned_mem_write[i];
+ _io_mem_read[io_index][i] = unassigned_mem_read[i];
+ _io_mem_write[io_index][i] = unassigned_mem_write[i];
}
io_mem_opaque[io_index] = NULL;
io_mem_used[io_index] = 0;
@@ -3697,17 +3697,17 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit write access */
val = ldl_p(buf);
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
+ io_mem_write(io_index, addr1, val, 4);
l = 4;
} else if (l >= 2 && ((addr1 & 1) == 0)) {
/* 16 bit write access */
val = lduw_p(buf);
- io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
+ io_mem_write(io_index, addr1, val, 2);
l = 2;
} else {
/* 8 bit write access */
val = ldub_p(buf);
- io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
+ io_mem_write(io_index, addr1, val, 1);
l = 1;
}
} else {
@@ -3734,17 +3734,17 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit read access */
- val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
+ val = io_mem_read(io_index, addr1, 4);
stl_p(buf, val);
l = 4;
} else if (l >= 2 && ((addr1 & 1) == 0)) {
/* 16 bit read access */
- val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
+ val = io_mem_read(io_index, addr1, 2);
stw_p(buf, val);
l = 2;
} else {
/* 8 bit read access */
- val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
+ val = io_mem_read(io_index, addr1, 1);
stb_p(buf, val);
l = 1;
}
@@ -3957,7 +3957,7 @@ static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
/* I/O case */
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
- val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+ val = io_mem_read(io_index, addr, 4);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
@@ -4023,11 +4023,11 @@ static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
/* XXX This is broken when device endian != cpu endian.
Fix and add "endian" variable check */
#ifdef TARGET_WORDS_BIGENDIAN
- val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
- val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
+ val = io_mem_read(io_index, addr, 4) << 32;
+ val |= io_mem_read(io_index, addr + 4, 4);
#else
- val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
- val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
+ val = io_mem_read(io_index, addr, 4);
+ val |= io_mem_read(io_index, addr + 4, 4) << 32;
#endif
} else {
/* RAM case */
@@ -4089,7 +4089,7 @@ static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
/* I/O case */
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
- val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
+ val = io_mem_read(io_index, addr, 2);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
@@ -4149,7 +4149,7 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ io_mem_write(io_index, addr, val, 4);
} else {
unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
ptr = qemu_get_ram_ptr(addr1);
@@ -4181,11 +4181,11 @@ void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
#ifdef TARGET_WORDS_BIGENDIAN
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
+ io_mem_write(io_index, addr, val >> 32, 4);
+ io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
#else
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
+ io_mem_write(io_index, addr, (uint32_t)val, 4);
+ io_mem_write(io_index, addr + 4, val >> 32, 4);
#endif
} else {
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
@@ -4218,7 +4218,7 @@ static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
val = bswap32(val);
}
#endif
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ io_mem_write(io_index, addr, val, 4);
} else {
unsigned long addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
@@ -4291,7 +4291,7 @@ static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
val = bswap16(val);
}
#endif
- io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
+ io_mem_write(io_index, addr, val, 2);
} else {
unsigned long addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
diff --git a/memory.c b/memory.c
index 6f9fea1..5c46833 100644
--- a/memory.c
+++ b/memory.c
@@ -1580,6 +1580,19 @@ void set_system_io_map(MemoryRegion *mr)
memory_region_update_topology(NULL);
}
+uint64_t io_mem_read(int io_index, target_phys_addr_t addr, unsigned size)
+{
+ return _io_mem_read[io_index][bitops_ffsl(size)](io_mem_opaque[io_index],
+ addr);
+}
+
+void io_mem_write(int io_index, target_phys_addr_t addr,
+ uint64_t val, unsigned size)
+{
+ _io_mem_write[io_index][bitops_ffsl(size)](io_mem_opaque[io_index],
+ addr, val);
+}
+
typedef struct MemoryRegionList MemoryRegionList;
struct MemoryRegionList {
diff --git a/softmmu_template.h b/softmmu_template.h
index 36eb2e8..a030b5f 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -72,14 +72,14 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
env->mem_io_vaddr = addr;
#if SHIFT <= 2
- res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
+ res = io_mem_read(index, physaddr, 1 << SHIFT);
#else
#ifdef TARGET_WORDS_BIGENDIAN
- res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
- res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
+ res = io_mem_read(index, physaddr, 4) << 32;
+ res |= io_mem_read(index, physaddr + 4, 4);
#else
- res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
- res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
+ res = io_mem_read(index, physaddr, 4);
+ res |= io_mem_read(index, physaddr + 4, 4) << 32;
#endif
#endif /* SHIFT > 2 */
return res;
@@ -215,14 +215,14 @@ static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
env->mem_io_vaddr = addr;
env->mem_io_pc = (unsigned long)retaddr;
#if SHIFT <= 2
- io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
+ io_mem_write(index, physaddr, val, 1 << SHIFT);
#else
#ifdef TARGET_WORDS_BIGENDIAN
- io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
- io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
+ io_mem_write(index, physaddr, (val >> 32), 4);
+ io_mem_write(index, physaddr + 4, (uint32_t)val, 4);
#else
- io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
- io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
+ io_mem_write(index, physaddr, (uint32_t)val, 4);
+ io_mem_write(index, physaddr + 4, val >> 32, 4);
#endif
#endif /* SHIFT > 2 */
}
--
1.7.7.1
next prev parent reply other threads:[~2012-01-02 16:34 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-01-02 16:33 [Qemu-devel] [PATCH 00/16] Kill old-style I/O dispatch Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 01/16] memory: move endianness compensation to memory core Avi Kivity
2012-01-05 18:26 ` Andreas Färber
2012-01-07 7:52 ` Andreas Färber
2012-01-02 16:33 ` [Qemu-devel] [PATCH 02/16] exec: make phys_page_find() return a temporary Avi Kivity
2012-01-02 16:33 ` Avi Kivity [this message]
2012-01-02 16:33 ` [Qemu-devel] [PATCH 04/16] memory: remove MemoryRegion::backend_registered Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 05/16] Fix wrong region_offset when overlaying a page with another Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 06/16] Avoid range comparisons on io index types Avi Kivity
2012-01-02 21:58 ` Richard Henderson
2012-01-03 8:46 ` Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 07/16] Uninline get_page_addr_code() Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 08/16] Convert IO_MEM_{RAM, ROM, UNASSIGNED, NOTDIRTY} to MemoryRegions Avi Kivity
2012-01-02 22:16 ` Richard Henderson
2012-01-03 8:54 ` Avi Kivity
2012-01-06 8:31 ` Stefan Hajnoczi
2012-01-02 16:33 ` [Qemu-devel] [PATCH 09/16] Switch cpu_register_physical_memory_log() to use MemoryRegions Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 10/16] Convert the subpage wrapper to be a MemoryRegion Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 11/16] Convert IO_MEM_SUBPAGE_RAM " Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 12/16] Convert io_mem_watch " Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 13/16] Direct dispatch through MemoryRegion Avi Kivity
2012-01-07 7:55 ` Andreas Färber
2012-01-02 16:33 ` [Qemu-devel] [PATCH 14/16] Remove IO_MEM_SUBPAGE Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 15/16] Drop IO_MEM_ROMD Avi Kivity
2012-01-02 16:33 ` [Qemu-devel] [PATCH 16/16] Remove IO_MEM_SHIFT Avi Kivity
2012-01-02 22:36 ` [Qemu-devel] [PATCH 00/16] Kill old-style I/O dispatch Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1325522015-503-4-git-send-email-avi@redhat.com \
--to=avi@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).