From: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
To: mst@redhat.com
Cc: aliguori@us.ibm.com, david@gibson.dropbear.id.au,
kvm@vger.kernel.org, rth@twiddle.net, aik@ozlabs.ru,
joro@8bytes.org, seabios@seabios.org, qemu-devel@nongnu.org,
agraf@suse.de, blauwirbel@gmail.com, yamahata@valinux.co.jp,
kevin@koconnor.net, avi@redhat.com,
Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>,
dwg@au1.ibm.com, paul@codesourcery.com
Subject: [Qemu-devel] [RFC PATCH 04/13] ide: use the DMA memory access interface for PCI IDE controllers
Date: Wed, 1 Jun 2011 04:38:26 +0300 [thread overview]
Message-ID: <1306892315-7306-5-git-send-email-eduard.munteanu@linux360.ro> (raw)
In-Reply-To: <1306892315-7306-1-git-send-email-eduard.munteanu@linux360.ro>
Emulated PCI IDE controllers now use the memory access interface. This
also allows an emulated IOMMU to translate and check accesses.
Map invalidation results in cancelling DMA transfers. Since the guest OS
can't properly recover the DMA results in case the mapping is changed,
this is a fairly good approximation.
Note this doesn't handle AHCI emulation yet!
Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
---
dma-helpers.c | 23 ++++++++++++++++++-----
dma.h | 4 +++-
hw/ide/ahci.c | 3 ++-
hw/ide/internal.h | 1 +
hw/ide/macio.c | 4 ++--
hw/ide/pci.c | 18 +++++++++++-------
6 files changed, 37 insertions(+), 16 deletions(-)
diff --git a/dma-helpers.c b/dma-helpers.c
index 712ed89..29a74a4 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -10,12 +10,13 @@
#include "dma.h"
#include "block_int.h"
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMADevice *dma)
{
qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
qsg->nsg = 0;
qsg->nalloc = alloc_hint;
qsg->size = 0;
+ qsg->dma = dma;
}
void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
@@ -73,12 +74,23 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
int i;
for (i = 0; i < dbs->iov.niov; ++i) {
- cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
- dbs->iov.iov[i].iov_len, !dbs->is_write,
- dbs->iov.iov[i].iov_len);
+ dma_memory_unmap(dbs->sg->dma,
+ dbs->iov.iov[i].iov_base,
+ dbs->iov.iov[i].iov_len, !dbs->is_write,
+ dbs->iov.iov[i].iov_len);
}
}
+static void dma_bdrv_cancel(void *opaque)
+{
+ DMAAIOCB *dbs = opaque;
+
+ bdrv_aio_cancel(dbs->acb);
+ dma_bdrv_unmap(dbs);
+ qemu_iovec_destroy(&dbs->iov);
+ qemu_aio_release(dbs);
+}
+
static void dma_bdrv_cb(void *opaque, int ret)
{
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
@@ -100,7 +112,8 @@ static void dma_bdrv_cb(void *opaque, int ret)
while (dbs->sg_cur_index < dbs->sg->nsg) {
cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
- mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
+ mem = dma_memory_map(dbs->sg->dma, dma_bdrv_cancel, dbs,
+ cur_addr, &cur_len, !dbs->is_write);
if (!mem)
break;
qemu_iovec_add(&dbs->iov, mem, cur_len);
diff --git a/dma.h b/dma.h
index f3bb275..2417b32 100644
--- a/dma.h
+++ b/dma.h
@@ -14,6 +14,7 @@
//#include "cpu.h"
#include "hw/hw.h"
#include "block.h"
+#include "hw/dma_rw.h"
typedef struct {
target_phys_addr_t base;
@@ -25,9 +26,10 @@ typedef struct {
int nsg;
int nalloc;
target_phys_addr_t size;
+ DMADevice *dma;
} QEMUSGList;
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMADevice *dma);
void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
target_phys_addr_t len);
void qemu_sglist_destroy(QEMUSGList *qsg);
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index c6e0c77..68b87d2 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -680,7 +680,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist)
if (sglist_alloc_hint > 0) {
AHCI_SG *tbl = (AHCI_SG *)prdt;
- qemu_sglist_init(sglist, sglist_alloc_hint);
+ /* FIXME: pass a proper DMADevice. */
+ qemu_sglist_init(sglist, sglist_alloc_hint, NULL);
for (i = 0; i < sglist_alloc_hint; i++) {
/* flags_size is zero-based */
qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr),
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index aa198b6..b830d67 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -474,6 +474,7 @@ struct IDEDMA {
struct iovec iov;
QEMUIOVector qiov;
BlockDriverAIOCB *aiocb;
+ DMADevice *dev;
};
struct IDEBus {
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index 7107f6b..a111481 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -78,7 +78,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
s->io_buffer_size = io->len;
- qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
+ qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL);
qemu_sglist_add(&s->sg, io->addr, io->len);
io->addr += io->len;
io->len = 0;
@@ -140,7 +140,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
s->io_buffer_index = 0;
s->io_buffer_size = io->len;
- qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
+ qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL);
qemu_sglist_add(&s->sg, io->addr, io->len);
io->addr += io->len;
io->len = 0;
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 65cb56c..a14f2ae 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -63,7 +63,8 @@ static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
} prd;
int l, len;
- qemu_sglist_init(&s->sg, s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
+ qemu_sglist_init(&s->sg,
+ s->nsector / (BMDMA_PAGE_SIZE / 512) + 1, dma->dev);
s->io_buffer_size = 0;
for(;;) {
if (bm->cur_prd_len == 0) {
@@ -71,7 +72,7 @@ static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
if (bm->cur_prd_last ||
(bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
return s->io_buffer_size != 0;
- cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
+ dma_memory_read(dma->dev, bm->cur_addr, (uint8_t *)&prd, 8);
bm->cur_addr += 8;
prd.addr = le32_to_cpu(prd.addr);
prd.size = le32_to_cpu(prd.size);
@@ -113,7 +114,7 @@ static int bmdma_rw_buf(IDEDMA *dma, int is_write)
if (bm->cur_prd_last ||
(bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
return 0;
- cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
+ dma_memory_read(dma->dev, bm->cur_addr, (uint8_t *)&prd, 8);
bm->cur_addr += 8;
prd.addr = le32_to_cpu(prd.addr);
prd.size = le32_to_cpu(prd.size);
@@ -128,11 +129,11 @@ static int bmdma_rw_buf(IDEDMA *dma, int is_write)
l = bm->cur_prd_len;
if (l > 0) {
if (is_write) {
- cpu_physical_memory_write(bm->cur_prd_addr,
- s->io_buffer + s->io_buffer_index, l);
+ dma_memory_write(dma->dev, bm->cur_prd_addr,
+ s->io_buffer + s->io_buffer_index, l);
} else {
- cpu_physical_memory_read(bm->cur_prd_addr,
- s->io_buffer + s->io_buffer_index, l);
+ dma_memory_read(dma->dev, bm->cur_prd_addr,
+ s->io_buffer + s->io_buffer_index, l);
}
bm->cur_prd_addr += l;
bm->cur_prd_len -= l;
@@ -436,6 +437,9 @@ void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
continue;
ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
}
+
+ d->bmdma[0].dma.dev = &dev->dma;
+ d->bmdma[1].dma.dev = &dev->dma;
}
static const struct IDEDMAOps bmdma_ops = {
--
1.7.3.4
next prev parent reply other threads:[~2011-06-01 1:39 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-06-01 1:38 [Qemu-devel] [RFC PATCH 00/13] AMD IOMMU emulation patches, another try Eduard - Gabriel Munteanu
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 01/13] Generic DMA memory access interface Eduard - Gabriel Munteanu
2011-06-01 14:01 ` Richard Henderson
2011-06-01 14:29 ` Avi Kivity
2011-06-01 15:16 ` Richard Henderson
2011-06-02 10:22 ` David Gibson
2011-06-01 14:52 ` Eduard - Gabriel Munteanu
2011-06-01 15:09 ` Richard Henderson
2011-06-01 15:35 ` Eduard - Gabriel Munteanu
2011-06-01 15:45 ` Richard Henderson
2011-06-02 9:38 ` David Gibson
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 02/13] pci: add IOMMU support via the generic DMA layer Eduard - Gabriel Munteanu
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 03/13] AMD IOMMU emulation Eduard - Gabriel Munteanu
2011-06-01 1:38 ` Eduard - Gabriel Munteanu [this message]
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 05/13] rtl8139: use the DMA memory access interface Eduard - Gabriel Munteanu
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 06/13] eepro100: " Eduard - Gabriel Munteanu
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 07/13] ac97: " Eduard - Gabriel Munteanu
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 08/13] es1370: " Eduard - Gabriel Munteanu
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 09/13] e1000: " Eduard - Gabriel Munteanu
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 10/13] lsi53c895a: " Eduard - Gabriel Munteanu
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 11/13] pcnet: " Eduard - Gabriel Munteanu
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 12/13] usb-uhci: " Eduard - Gabriel Munteanu
2011-06-01 1:38 ` [Qemu-devel] [RFC PATCH 13/13] usb-ohci: " Eduard - Gabriel Munteanu
2011-06-01 18:49 ` [Qemu-devel] [RFC PATCH 00/13] AMD IOMMU emulation patches, another try Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1306892315-7306-5-git-send-email-eduard.munteanu@linux360.ro \
--to=eduard.munteanu@linux360.ro \
--cc=agraf@suse.de \
--cc=aik@ozlabs.ru \
--cc=aliguori@us.ibm.com \
--cc=avi@redhat.com \
--cc=blauwirbel@gmail.com \
--cc=david@gibson.dropbear.id.au \
--cc=dwg@au1.ibm.com \
--cc=joro@8bytes.org \
--cc=kevin@koconnor.net \
--cc=kvm@vger.kernel.org \
--cc=mst@redhat.com \
--cc=paul@codesourcery.com \
--cc=qemu-devel@nongnu.org \
--cc=rth@twiddle.net \
--cc=seabios@seabios.org \
--cc=yamahata@valinux.co.jp \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).