From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1LD3IV-0008Lx-SR for qemu-devel@nongnu.org; Wed, 17 Dec 2008 15:47:12 -0500 Received: from exim by lists.gnu.org with spam-scanned (Exim 4.43) id 1LD3IV-0008LC-2S for qemu-devel@nongnu.org; Wed, 17 Dec 2008 15:47:11 -0500 Received: from [199.232.76.173] (port=35716 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1LD3IU-0008L5-MG for qemu-devel@nongnu.org; Wed, 17 Dec 2008 15:47:10 -0500 Received: from mx2.redhat.com ([66.187.237.31]:36329) by monty-python.gnu.org with esmtp (Exim 4.60) (envelope-from ) id 1LD3IT-0008PU-RF for qemu-devel@nongnu.org; Wed, 17 Dec 2008 15:47:10 -0500 From: Glauber Costa Date: Wed, 17 Dec 2008 15:46:59 -0500 Message-Id: <1229546822-11972-3-git-send-email-glommer@redhat.com> In-Reply-To: <1229546822-11972-2-git-send-email-glommer@redhat.com> References: <1229546822-11972-1-git-send-email-glommer@redhat.com> <1229546822-11972-2-git-send-email-glommer@redhat.com> Subject: [Qemu-devel] [PATCH 2/5] isolate io handling routing Reply-To: qemu-devel@nongnu.org List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: Ian.Jackson@eu.citrix.com, avi@redhat.com, kvm@vger.kernel.org, stefano.stabellini@eu.citrix.com introduce cpu_physical_memory_do_io, which handles the mmio part of cpu_physical_memory_rw. KVM can use it to do mmio, since mmio is essentially the same for both KVM and tcg. Signed-off-by: Glauber Costa --- cpu-all.h | 2 + exec.c | 89 ++++++++++++++++++++++++++++++++++-------------------------- 2 files changed, 52 insertions(+), 39 deletions(-) diff --git a/cpu-all.h b/cpu-all.h index 648264c..d46da05 100644 --- a/cpu-all.h +++ b/cpu-all.h @@ -910,6 +910,8 @@ int cpu_register_io_memory(int io_index, CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index); CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index); +int cpu_physical_memory_do_io(target_phys_addr_t addr, uint8_t *buf, int l, + int is_write, unsigned long pd); void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, int len, int is_write); static inline void cpu_physical_memory_read(target_phys_addr_t addr, diff --git a/exec.c b/exec.c index 44f6a42..04eadfe 100644 --- a/exec.c +++ b/exec.c @@ -2891,12 +2891,58 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, } #else +int cpu_physical_memory_do_io(target_phys_addr_t addr, uint8_t *buf, int l, int is_write, unsigned long pd) +{ + int io_index; + uint32_t val; + + io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); + if (is_write) { + /* XXX: could force cpu_single_env to NULL to avoid + potential bugs */ + if (l >= 4 && ((addr & 3) == 0)) { + /* 32 bit write access */ + val = ldl_p(buf); + io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); + l = 4; + } else if (l >= 2 && ((addr & 1) == 0)) { + /* 16 bit write access */ + val = lduw_p(buf); + io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); + l = 2; + } else { + /* 8 bit write access */ + val = ldub_p(buf); + io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); + l = 1; + } + + } else { + if (l >= 4 && ((addr & 3) == 0)) { + /* 32 bit read access */ + val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); + stl_p(buf, val); + l = 4; + } else if (l >= 2 && ((addr & 1) == 0)) { + /* 16 bit read access */ + val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); + stw_p(buf, val); + l = 2; + } else { + /* 8 bit read access */ + val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); + stb_p(buf, val); + l = 1; + } + } + return l; +} + void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, int len, int is_write) { - int l, io_index; + int l; uint8_t *ptr; - uint32_t val; target_phys_addr_t page; unsigned long pd; PhysPageDesc *p; @@ -2915,27 +2961,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, if (is_write) { if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { - io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); if (p) addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; - /* XXX: could force cpu_single_env to NULL to avoid - potential bugs */ - if (l >= 4 && ((addr & 3) == 0)) { - /* 32 bit write access */ - val = ldl_p(buf); - io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); - l = 4; - } else if (l >= 2 && ((addr & 1) == 0)) { - /* 16 bit write access */ - val = lduw_p(buf); - io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); - l = 2; - } else { - /* 8 bit write access */ - val = ldub_p(buf); - io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); - l = 1; - } + l = cpu_physical_memory_do_io(addr, buf, len, is_write, pd); } else { unsigned long addr1; addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); @@ -2953,26 +2981,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, } else { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { - /* I/O case */ - io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); if (p) addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; - if (l >= 4 && ((addr & 3) == 0)) { - /* 32 bit read access */ - val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); - stl_p(buf, val); - l = 4; - } else if (l >= 2 && ((addr & 1) == 0)) { - /* 16 bit read access */ - val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); - stw_p(buf, val); - l = 2; - } else { - /* 8 bit read access */ - val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); - stb_p(buf, val); - l = 1; - } + l = cpu_physical_memory_do_io(addr, buf, len, is_write, pd); } else { /* RAM case */ ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + -- 1.5.6.5