From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:35217) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SU5x6-0003mS-DH for qemu-devel@nongnu.org; Mon, 14 May 2012 20:49:26 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1SU5x4-0006fh-0f for qemu-devel@nongnu.org; Mon, 14 May 2012 20:49:23 -0400 Received: from mail-ob0-f173.google.com ([209.85.214.173]:57948) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SU5x3-0006dL-Px for qemu-devel@nongnu.org; Mon, 14 May 2012 20:49:21 -0400 Received: by obbwd20 with SMTP id wd20so10010191obb.4 for ; Mon, 14 May 2012 17:49:20 -0700 (PDT) Message-ID: <4FB1A80C.1010103@codemonkey.ws> Date: Mon, 14 May 2012 19:49:16 -0500 From: Anthony Liguori MIME-Version: 1.0 References: <1336625347-10169-1-git-send-email-benh@kernel.crashing.org> <1336625347-10169-9-git-send-email-benh@kernel.crashing.org> In-Reply-To: <1336625347-10169-9-git-send-email-benh@kernel.crashing.org> Content-Type: text/plain; charset=ISO-8859-1; format=flowed Content-Transfer-Encoding: 7bit Subject: Re: [Qemu-devel] [PATCH 08/13] iommu: Introduce IOMMU emulation infrastructure List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Benjamin Herrenschmidt Cc: Richard Henderson , "Michael S. Tsirkin" , qemu-devel@nongnu.org, David Gibson , Eduard - Gabriel Munteanu On 05/09/2012 11:49 PM, Benjamin Herrenschmidt wrote: > From: David Gibson > > This patch adds the basic infrastructure necessary to emulate an IOMMU > visible to the guest. The DMAContext structure is extended with > information and a callback describing the translation, and the various > DMA functions used by devices will now perform IOMMU translation using > this callback. > > Cc: Michael S. Tsirkin > Cc: Richard Henderson > > Signed-off-by: Eduard - Gabriel Munteanu > Signed-off-by: David Gibson > Signed-off-by: Benjamin Herrenschmidt > --- > dma-helpers.c | 214 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ > dma.h | 108 ++++++++++++++++++++++------- > hw/qdev-dma.h | 4 +- > 3 files changed, 299 insertions(+), 27 deletions(-) > > diff --git a/dma-helpers.c b/dma-helpers.c > index 2dc4691..09591ef 100644 > --- a/dma-helpers.c > +++ b/dma-helpers.c > @@ -9,6 +9,10 @@ > > #include "dma.h" > #include "trace.h" > +#include "range.h" > +#include "qemu-thread.h" > + > +/* #define DEBUG_IOMMU */ > > void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma) > { > @@ -244,3 +248,213 @@ void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, > { > bdrv_acct_start(bs, cookie, sg->size, type); > } > + > +bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, > + DMADirection dir) > +{ > + target_phys_addr_t paddr, plen; > + > +#ifdef DEBUG_IOMMU > + fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT > + " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir); > +#endif > + > + while (len) { > + if (dma->translate(dma, addr,&paddr,&plen, dir) != 0) { > + return false; > + } > + > + /* The translation might be valid for larger regions. */ > + if (plen> len) { > + plen = len; > + } > + > + len -= plen; > + addr += plen; > + } > + > + return true; > +} > + > +int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, > + void *buf, dma_addr_t len, DMADirection dir) > +{ > + target_phys_addr_t paddr, plen; > + int err; > + > +#ifdef DEBUG_IOMMU > + fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x" > + DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir); > +#endif > + > + while (len) { > + err = dma->translate(dma, addr,&paddr,&plen, dir); > + if (err) { > + return -1; > + } > + > + /* The translation might be valid for larger regions. */ > + if (plen> len) { > + plen = len; > + } > + > + cpu_physical_memory_rw(paddr, buf, plen, > + dir == DMA_DIRECTION_FROM_DEVICE); > + > + len -= plen; > + addr += plen; > + buf += plen; > + } > + > + return 0; > +} > + > +int iommu_dma_memory_zero(DMAContext *dma, dma_addr_t addr, dma_addr_t len) > +{ > + target_phys_addr_t paddr, plen; > + int err; > + > +#ifdef DEBUG_IOMMU > + fprintf(stderr, "dma_memory_zero context=%p addr=0x" DMA_ADDR_FMT > + " len=0x" DMA_ADDR_FMT "\n", dma, addr, len); > +#endif > + > + while (len) { > + err = dma->translate(dma, addr,&paddr,&plen, > + DMA_DIRECTION_FROM_DEVICE); > + if (err) { > + return err; > + } > + > + /* The translation might be valid for larger regions. */ > + if (plen> len) { > + plen = len; > + } > + > + cpu_physical_memory_zero(paddr, plen); > + > + len -= plen; > + addr += plen; > + } > + > + return 0; > +} > + > +typedef struct { > + unsigned long count; > + QemuCond cond; > +} DMAInvalidationState; > + > +typedef struct DMAMemoryMap DMAMemoryMap; > +struct DMAMemoryMap { > + dma_addr_t addr; > + size_t len; > + void *buf; > + > + DMAInvalidationState *invalidate; > + QLIST_ENTRY(DMAMemoryMap) list; > +}; > + > +void dma_context_init(DMAContext *dma, DMATranslateFunc fn) > +{ > +#ifdef DEBUG_IOMMU > + fprintf(stderr, "dma_context_init(%p, %p)\n", dma, fn); > +#endif > + dma->translate = fn; > + QLIST_INIT(&dma->memory_maps); > +} > + > +void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len, > + DMADirection dir) > +{ > + int err; > + target_phys_addr_t paddr, plen; > + void *buf; > + DMAMemoryMap *map; > + > + plen = *len; > + err = dma->translate(dma, addr,&paddr,&plen, dir); > + if (err) { > + return NULL; > + } > + > + /* > + * If this is true, the virtual region is contiguous, > + * but the translated physical region isn't. We just > + * clamp *len, much like cpu_physical_memory_map() does. > + */ > + if (plen< *len) { > + *len = plen; > + } > + > + buf = cpu_physical_memory_map(paddr,&plen, > + dir == DMA_DIRECTION_FROM_DEVICE); > + *len = plen; > + > + /* We treat maps as remote TLBs to cope with stuff like AIO. */ > + map = g_malloc(sizeof(DMAMemoryMap)); > + map->addr = addr; > + map->len = *len; > + map->buf = buf; > + map->invalidate = NULL; > + > + QLIST_INSERT_HEAD(&dma->memory_maps, map, list); > + > + return buf; > +} > + > +void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len, > + DMADirection dir, dma_addr_t access_len) > +{ > + DMAMemoryMap *map; > + > + cpu_physical_memory_unmap(buffer, len, > + dir == DMA_DIRECTION_FROM_DEVICE, > + access_len); > + > + QLIST_FOREACH(map,&dma->memory_maps, list) { > + if ((map->buf == buffer)&& (map->len == len)) { > + QLIST_REMOVE(map, list); > + > + if (map->invalidate) { > + /* If this mapping was invalidated */ > + if (--map->invalidate->count == 0) { > + /* And we're the last mapping invalidated at the time */ > + /* Then wake up whoever was waiting for the > + * invalidation to complete */ > + qemu_cond_signal(&map->invalidate->cond); > + } > + } > + > + free(map); > + } > + } > + > + > + /* unmap called on a buffer that wasn't mapped */ > + assert(false); > +} > + > +extern QemuMutex qemu_global_mutex; > + > +void iommu_wait_for_invalidated_maps(DMAContext *dma, > + dma_addr_t addr, dma_addr_t len) > +{ > + DMAMemoryMap *map; > + DMAInvalidationState is; > + > + is.count = 0; > + qemu_cond_init(&is.cond); > + > + QLIST_FOREACH(map,&dma->memory_maps, list) { > + if (ranges_overlap(addr, len, map->addr, map->len)) { > + is.count++; > + map->invalidate =&is; > + } > + } > + > + if (is.count) { > + qemu_cond_wait(&is.cond,&qemu_global_mutex); > + } > + assert(is.count == 0); > +} I don't get what's going on here but I don't think it can possibly be right. What is the purpose of this function? Regards, Anthony Liguori