From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1LQ2te-0003A2-Nt for qemu-devel@nongnu.org; Thu, 22 Jan 2009 11:59:14 -0500 Received: from exim by lists.gnu.org with spam-scanned (Exim 4.43) id 1LQ2td-00039X-Oz for qemu-devel@nongnu.org; Thu, 22 Jan 2009 11:59:13 -0500 Received: from [199.232.76.173] (port=53699 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1LQ2td-00039U-HM for qemu-devel@nongnu.org; Thu, 22 Jan 2009 11:59:13 -0500 Received: from savannah.gnu.org ([199.232.41.3]:37254 helo=sv.gnu.org) by monty-python.gnu.org with esmtps (TLS-1.0:RSA_AES_256_CBC_SHA1:32) (Exim 4.60) (envelope-from ) id 1LQ2td-00040W-35 for qemu-devel@nongnu.org; Thu, 22 Jan 2009 11:59:13 -0500 Received: from cvs.savannah.gnu.org ([199.232.41.69]) by sv.gnu.org with esmtp (Exim 4.63) (envelope-from ) id 1LQ2tc-0003K5-K6 for qemu-devel@nongnu.org; Thu, 22 Jan 2009 16:59:12 +0000 Received: from aliguori by cvs.savannah.gnu.org with local (Exim 4.63) (envelope-from ) id 1LQ2tc-0003K0-8w for qemu-devel@nongnu.org; Thu, 22 Jan 2009 16:59:12 +0000 MIME-Version: 1.0 Errors-To: aliguori Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From: Anthony Liguori Message-Id: Date: Thu, 22 Jan 2009 16:59:12 +0000 Subject: [Qemu-devel] [6394] Add target memory mapping API (Avi Kivity) Reply-To: qemu-devel@nongnu.org List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Revision: 6394 http://svn.sv.gnu.org/viewvc/?view=rev&root=qemu&revision=6394 Author: aliguori Date: 2009-01-22 16:59:11 +0000 (Thu, 22 Jan 2009) Log Message: ----------- Add target memory mapping API (Avi Kivity) Devices accessing large amounts of memory (as with DMA) will wish to obtain a pointer to guest memory rather than access it indirectly via cpu_physical_memory_rw(). Add a new API to convert target addresses to host pointers. In case the target address does not correspond to RAM, a bounce buffer is allocated. To prevent the guest from causing the host to allocate unbounded amounts of bounce buffer, this memory is limited (currently to one page). Signed-off-by: Avi Kivity Signed-off-by: Anthony Liguori Modified Paths: -------------- trunk/cpu-all.h trunk/exec.c Modified: trunk/cpu-all.h =================================================================== --- trunk/cpu-all.h 2009-01-22 16:18:33 UTC (rev 6393) +++ trunk/cpu-all.h 2009-01-22 16:59:11 UTC (rev 6394) @@ -923,6 +923,12 @@ { cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1); } +void *cpu_physical_memory_map(target_phys_addr_t addr, + target_phys_addr_t *plen, + int is_write); +void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, + int is_write, target_phys_addr_t access_len); + uint32_t ldub_phys(target_phys_addr_t addr); uint32_t lduw_phys(target_phys_addr_t addr); uint32_t ldl_phys(target_phys_addr_t addr); Modified: trunk/exec.c =================================================================== --- trunk/exec.c 2009-01-22 16:18:33 UTC (rev 6393) +++ trunk/exec.c 2009-01-22 16:59:11 UTC (rev 6394) @@ -3045,7 +3045,109 @@ } } +typedef struct { + void *buffer; + target_phys_addr_t addr; + target_phys_addr_t len; +} BounceBuffer; +static BounceBuffer bounce; + +/* Map a physical memory region into a host virtual address. + * May map a subset of the requested range, given by and returned in *plen. + * May return NULL if resources needed to perform the mapping are exhausted. + * Use only for reads OR writes - not for read-modify-write operations. + */ +void *cpu_physical_memory_map(target_phys_addr_t addr, + target_phys_addr_t *plen, + int is_write) +{ + target_phys_addr_t len = *plen; + target_phys_addr_t done = 0; + int l; + uint8_t *ret = NULL; + uint8_t *ptr; + target_phys_addr_t page; + unsigned long pd; + PhysPageDesc *p; + unsigned long addr1; + + while (len > 0) { + page = addr & TARGET_PAGE_MASK; + l = (page + TARGET_PAGE_SIZE) - addr; + if (l > len) + l = len; + p = phys_page_find(page >> TARGET_PAGE_BITS); + if (!p) { + pd = IO_MEM_UNASSIGNED; + } else { + pd = p->phys_offset; + } + + if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { + if (done || bounce.buffer) { + break; + } + bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); + bounce.addr = addr; + bounce.len = l; + if (!is_write) { + cpu_physical_memory_rw(addr, bounce.buffer, l, 0); + } + ptr = bounce.buffer; + } else { + addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); + ptr = phys_ram_base + addr1; + } + if (!done) { + ret = ptr; + } else if (ret + done != ptr) { + break; + } + + len -= l; + addr += l; + done += l; + } + *plen = done; + return ret; +} + +/* Unmaps a memory region previously mapped by cpu_physical_memory_map(). + * Will also mark the memory as dirty if is_write == 1. access_len gives + * the amount of memory that was actually read or written by the caller. + */ +void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, + int is_write, target_phys_addr_t access_len) +{ + if (buffer != bounce.buffer) { + if (is_write) { + unsigned long addr1 = (uint8_t *)buffer - phys_ram_base; + while (access_len) { + unsigned l; + l = TARGET_PAGE_SIZE; + if (l > access_len) + l = access_len; + if (!cpu_physical_memory_is_dirty(addr1)) { + /* invalidate code */ + tb_invalidate_phys_page_range(addr1, addr1 + l, 0); + /* set dirty bit */ + phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= + (0xff & ~CODE_DIRTY_FLAG); + } + addr1 += l; + access_len -= l; + } + } + return; + } + if (is_write) { + cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len); + } + qemu_free(bounce.buffer); + bounce.buffer = NULL; +} + /* warning: addr must be aligned */ uint32_t ldl_phys(target_phys_addr_t addr) {