* [Qemu-devel] PATCH, RFC: Generic DMA framework
@ 2007-08-14 19:48 Blue Swirl
2007-08-16 18:18 ` [Qemu-devel] " Blue Swirl
2007-08-24 19:40 ` Blue Swirl
0 siblings, 2 replies; 20+ messages in thread
From: Blue Swirl @ 2007-08-14 19:48 UTC (permalink / raw)
To: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 215 bytes --]
Hi,
The first patch implements a simple generic DMA framework. The next
patches convert first Sparc32 IOMMU and then ESP and Lance.
Would the framework need any changes to support other targets? Comments welcome.
[-- Attachment #2: gdma.diff --]
[-- Type: text/x-diff, Size: 1635 bytes --]
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-14 19:25:52.000000000 +0000
+++ qemu/vl.h 2007-08-14 19:26:34.000000000 +0000
@@ -734,6 +734,50 @@
#include "hw/irq.h"
+/* Generic DMA API */
+typedef void (*qemu_dma_handler)(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write);
+
+typedef struct QEMUDMAState {
+ qemu_dma_handler handler;
+ void *opaque;
+} qemu_dma;
+
+static inline void dma_memory_read(qemu_dma *dma_opaque,
+ target_phys_addr_t addr,
+ uint8_t *buf, int len)
+{
+ if (dma_opaque)
+ dma_opaque->handler(dma_opaque->opaque, addr, buf, len, 0);
+}
+
+static inline void dma_memory_write(qemu_dma *dma_opaque,
+ target_phys_addr_t addr,
+ uint8_t *buf, int len)
+{
+ if (dma_opaque)
+ dma_opaque->handler(dma_opaque->opaque, addr, buf, len, 1);
+}
+
+static inline void dma_memory_rw(qemu_dma *dma_opaque,
+ target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ if (dma_opaque)
+ dma_opaque->handler(dma_opaque->opaque, addr, buf, len, is_write);
+}
+
+static inline qemu_dma *qemu_init_dma(qemu_dma_handler handler, void *opaque)
+{
+ qemu_dma *s;
+
+ s = (qemu_dma *)qemu_mallocz(sizeof(qemu_dma));
+ s->handler = handler;
+ s->opaque = opaque;
+
+ return s;
+}
+
/* ISA bus */
extern target_phys_addr_t isa_mem_base;
[-- Attachment #3: sparc_gdma.diff --]
[-- Type: text/x-diff, Size: 3656 bytes --]
Index: qemu/hw/sun4m.c
===================================================================
--- qemu.orig/hw/sun4m.c 2007-08-14 19:26:56.000000000 +0000
+++ qemu/hw/sun4m.c 2007-08-14 19:37:20.000000000 +0000
@@ -289,6 +289,12 @@
slavio_set_power_fail(slavio_misc, 1);
}
+static void sparc_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
static void main_cpu_reset(void *opaque)
{
CPUState *env = opaque;
@@ -315,6 +321,7 @@
const sparc_def_t *def;
qemu_irq *cpu_irqs[MAX_CPUS], *slavio_irq, *slavio_cpu_irq,
*espdma_irq, *ledma_irq;
+ qemu_dma *physical_dma, *dvma;
/* init CPUs */
sparc_find_by_name(cpu_model, &def);
@@ -343,7 +350,10 @@
/* allocate RAM */
cpu_register_physical_memory(0, RAM_size, 0);
- iommu = iommu_init(hwdef->iommu_base);
+ physical_dma = qemu_init_dma(sparc_dma_memory_rw, NULL);
+
+ iommu = iommu_init(hwdef->iommu_base, physical_dma, &dvma);
+
slavio_intctl = slavio_intctl_init(hwdef->intctl_base,
hwdef->intctl_base + 0x10000ULL,
&hwdef->intbit_to_level[0],
Index: qemu/hw/iommu.c
===================================================================
--- qemu.orig/hw/iommu.c 2007-08-14 19:27:35.000000000 +0000
+++ qemu/hw/iommu.c 2007-08-14 19:36:33.000000000 +0000
@@ -104,6 +104,7 @@
target_phys_addr_t addr;
uint32_t regs[IOMMU_NREGS];
target_phys_addr_t iostart;
+ qemu_dma *parent_dma;
} IOMMUState;
static uint32_t iommu_mem_readw(void *opaque, target_phys_addr_t addr)
@@ -245,6 +246,7 @@
void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int is_write)
{
+ IOMMUState *s = opaque;
int l;
uint32_t flags;
target_phys_addr_t page, phys_addr;
@@ -265,10 +267,8 @@
iommu_bad_addr(opaque, page, is_write);
return;
}
- cpu_physical_memory_write(phys_addr, buf, len);
- } else {
- cpu_physical_memory_read(phys_addr, buf, len);
}
+ dma_memory_rw(s->parent_dma, phys_addr, buf, len, is_write);
len -= l;
buf += l;
addr += l;
@@ -309,7 +309,8 @@
s->regs[IOMMU_CTRL] = IOMMU_VERSION;
}
-void *iommu_init(target_phys_addr_t addr)
+void *iommu_init(target_phys_addr_t addr, qemu_dma *parent_dma,
+ qemu_dma **dvma)
{
IOMMUState *s;
int iommu_io_memory;
@@ -322,7 +323,10 @@
iommu_io_memory = cpu_register_io_memory(0, iommu_mem_read, iommu_mem_write, s);
cpu_register_physical_memory(addr, IOMMU_NREGS * 4, iommu_io_memory);
-
+
+ s->parent_dma = parent_dma;
+ *dvma = qemu_init_dma(sparc_iommu_memory_rw, s);
+
register_savevm("iommu", addr, 2, iommu_save, iommu_load, s);
qemu_register_reset(iommu_reset, s);
return s;
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-14 19:27:35.000000000 +0000
+++ qemu/vl.h 2007-08-14 19:36:54.000000000 +0000
@@ -1263,7 +1263,8 @@
extern QEMUMachine ss5_machine, ss10_machine;
/* iommu.c */
-void *iommu_init(target_phys_addr_t addr);
+void *iommu_init(target_phys_addr_t addr, qemu_dma *parent_dma,
+ qemu_dma **dvma);
void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int is_write);
static inline void sparc_iommu_memory_read(void *opaque,
[-- Attachment #4: sparc32_dma_esp_le_to_gdma.diff --]
[-- Type: text/x-diff, Size: 13719 bytes --]
Index: qemu/hw/sparc32_dma.c
===================================================================
--- qemu.orig/hw/sparc32_dma.c 2007-08-14 19:25:42.000000000 +0000
+++ qemu/hw/sparc32_dma.c 2007-08-14 19:39:11.000000000 +0000
@@ -58,63 +58,12 @@
struct DMAState {
uint32_t dmaregs[DMA_REGS];
qemu_irq irq;
- void *iommu, *dev_opaque;
+ void *dev_opaque;
void (*dev_reset)(void *dev_opaque);
qemu_irq *pic;
+ qemu_dma *dma;
};
-/* Note: on sparc, the lance 16 bit bus is swapped */
-void ledma_memory_read(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap)
-{
- DMAState *s = opaque;
- int i;
-
- DPRINTF("DMA write, direction: %c, addr 0x%8.8x\n",
- s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- addr |= s->dmaregs[3];
- if (do_bswap) {
- sparc_iommu_memory_read(s->iommu, addr, buf, len);
- } else {
- addr &= ~1;
- len &= ~1;
- sparc_iommu_memory_read(s->iommu, addr, buf, len);
- for(i = 0; i < len; i += 2) {
- bswap16s((uint16_t *)(buf + i));
- }
- }
-}
-
-void ledma_memory_write(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap)
-{
- DMAState *s = opaque;
- int l, i;
- uint16_t tmp_buf[32];
-
- DPRINTF("DMA read, direction: %c, addr 0x%8.8x\n",
- s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- addr |= s->dmaregs[3];
- if (do_bswap) {
- sparc_iommu_memory_write(s->iommu, addr, buf, len);
- } else {
- addr &= ~1;
- len &= ~1;
- while (len > 0) {
- l = len;
- if (l > sizeof(tmp_buf))
- l = sizeof(tmp_buf);
- for(i = 0; i < l; i += 2) {
- tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i));
- }
- sparc_iommu_memory_write(s->iommu, addr, (uint8_t *)tmp_buf, l);
- len -= l;
- buf += l;
- addr += l;
- }
- }
-}
-
static void dma_set_irq(void *opaque, int irq, int level)
{
DMAState *s = opaque;
@@ -129,24 +78,29 @@
}
}
-void espdma_memory_read(void *opaque, uint8_t *buf, int len)
+static void ledma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
{
DMAState *s = opaque;
- DPRINTF("DMA read, direction: %c, addr 0x%8.8x\n",
- s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- sparc_iommu_memory_read(s->iommu, s->dmaregs[1], buf, len);
- s->dmaregs[0] |= DMA_INTR;
- s->dmaregs[1] += len;
+ DPRINTF("DMA %s, direction: %c, addr 0x%8.8x\n",
+ is_write ? "write" : "read",
+ s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', addr);
+ addr |= s->dmaregs[3];
+ addr &= ~1;
+ len &= ~1;
+ dma_memory_rw(s->dma, addr, buf, len, is_write);
}
-void espdma_memory_write(void *opaque, uint8_t *buf, int len)
+static void espdma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
{
DMAState *s = opaque;
- DPRINTF("DMA write, direction: %c, addr 0x%8.8x\n",
+ DPRINTF("DMA %s, direction: %c, addr 0x%8.8x\n",
+ is_write ? "write" : "read",
s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- sparc_iommu_memory_write(s->iommu, s->dmaregs[1], buf, len);
+ dma_memory_rw(s->dma, s->dmaregs[1], buf, len, is_write);
s->dmaregs[0] |= DMA_INTR;
s->dmaregs[1] += len;
}
@@ -238,7 +192,8 @@
}
void *sparc32_dma_init(target_phys_addr_t daddr, qemu_irq parent_irq,
- void *iommu, qemu_irq **dev_irq)
+ qemu_irq **dev_irq, qemu_dma *parent_dma,
+ qemu_dma **dev_dma, int is_espdma)
{
DMAState *s;
int dma_io_memory;
@@ -248,7 +203,7 @@
return NULL;
s->irq = parent_irq;
- s->iommu = iommu;
+ s->dma = parent_dma;
dma_io_memory = cpu_register_io_memory(0, dma_mem_read, dma_mem_write, s);
cpu_register_physical_memory(daddr, DMA_SIZE, dma_io_memory);
@@ -257,6 +212,11 @@
qemu_register_reset(dma_reset, s);
*dev_irq = qemu_allocate_irqs(dma_set_irq, s, 1);
+ if (is_espdma)
+ *dev_dma = qemu_init_dma(espdma_memory_rw, s);
+ else
+ *dev_dma = qemu_init_dma(ledma_memory_rw, s);
+
return s;
}
Index: qemu/hw/sun4m.c
===================================================================
--- qemu.orig/hw/sun4m.c 2007-08-14 19:37:20.000000000 +0000
+++ qemu/hw/sun4m.c 2007-08-14 19:39:11.000000000 +0000
@@ -321,7 +321,7 @@
const sparc_def_t *def;
qemu_irq *cpu_irqs[MAX_CPUS], *slavio_irq, *slavio_cpu_irq,
*espdma_irq, *ledma_irq;
- qemu_dma *physical_dma, *dvma;
+ qemu_dma *physical_dma, *dvma, *esp_dvma, *le_dvma;
/* init CPUs */
sparc_find_by_name(cpu_model, &def);
@@ -362,9 +362,11 @@
hwdef->clock_irq);
espdma = sparc32_dma_init(hwdef->dma_base, slavio_irq[hwdef->esp_irq],
- iommu, &espdma_irq);
+ &espdma_irq, dvma, &esp_dvma, 1);
+
ledma = sparc32_dma_init(hwdef->dma_base + 16ULL,
- slavio_irq[hwdef->le_irq], iommu, &ledma_irq);
+ slavio_irq[hwdef->le_irq], &ledma_irq, dvma,
+ &le_dvma, 0);
if (graphic_depth != 8 && graphic_depth != 24) {
fprintf(stderr, "qemu: Unsupported depth: %d\n", graphic_depth);
@@ -375,7 +377,7 @@
if (nd_table[0].model == NULL
|| strcmp(nd_table[0].model, "lance") == 0) {
- lance_init(&nd_table[0], hwdef->le_base, ledma, *ledma_irq);
+ lance_init(&nd_table[0], hwdef->le_base, ledma, *ledma_irq, le_dvma);
} else if (strcmp(nd_table[0].model, "?") == 0) {
fprintf(stderr, "qemu: Supported NICs: lance\n");
exit (1);
@@ -399,7 +401,9 @@
slavio_serial_init(hwdef->serial_base, slavio_irq[hwdef->ser_irq],
serial_hds[1], serial_hds[0]);
fdctrl_init(slavio_irq[hwdef->fd_irq], 0, 1, hwdef->fd_base, fd_table);
- main_esp = esp_init(bs_table, hwdef->esp_base, espdma, *espdma_irq);
+
+ main_esp = esp_init(bs_table, hwdef->esp_base, espdma, *espdma_irq,
+ esp_dvma);
for (i = 0; i < MAX_DISKS; i++) {
if (bs_table[i]) {
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-14 19:36:54.000000000 +0000
+++ qemu/vl.h 2007-08-14 19:39:45.000000000 +0000
@@ -1098,7 +1098,7 @@
void pci_pcnet_init(PCIBus *bus, NICInfo *nd, int devfn);
void lance_init(NICInfo *nd, target_phys_addr_t leaddr, void *dma_opaque,
- qemu_irq irq);
+ qemu_irq irq, qemu_dma *parent_dma);
/* vmmouse.c */
void *vmmouse_init(void *m);
@@ -1265,21 +1265,6 @@
/* iommu.c */
void *iommu_init(target_phys_addr_t addr, qemu_dma *parent_dma,
qemu_dma **dvma);
-void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int is_write);
-static inline void sparc_iommu_memory_read(void *opaque,
- target_phys_addr_t addr,
- uint8_t *buf, int len)
-{
- sparc_iommu_memory_rw(opaque, addr, buf, len, 0);
-}
-
-static inline void sparc_iommu_memory_write(void *opaque,
- target_phys_addr_t addr,
- uint8_t *buf, int len)
-{
- sparc_iommu_memory_rw(opaque, addr, buf, len, 1);
-}
/* tcx.c */
void tcx_init(DisplayState *ds, target_phys_addr_t addr, uint8_t *vram_base,
@@ -1318,17 +1303,12 @@
/* esp.c */
void esp_scsi_attach(void *opaque, BlockDriverState *bd, int id);
void *esp_init(BlockDriverState **bd, target_phys_addr_t espaddr,
- void *dma_opaque, qemu_irq irq);
+ void *dma_opaque, qemu_irq irq, qemu_dma *parent_dma);
/* sparc32_dma.c */
void *sparc32_dma_init(target_phys_addr_t daddr, qemu_irq parent_irq,
- void *iommu, qemu_irq **dev_irq);
-void ledma_memory_read(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap);
-void ledma_memory_write(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap);
-void espdma_memory_read(void *opaque, uint8_t *buf, int len);
-void espdma_memory_write(void *opaque, uint8_t *buf, int len);
+ qemu_irq **dev_irq, qemu_dma *parent_dma,
+ qemu_dma **dev_dma, int is_espdma);
void sparc32_dma_set_reset_data(void *opaque, void (*dev_reset)(void *opaque),
void *dev_opaque);
Index: qemu/hw/esp.c
===================================================================
--- qemu.orig/hw/esp.c 2007-08-14 19:25:42.000000000 +0000
+++ qemu/hw/esp.c 2007-08-14 19:39:11.000000000 +0000
@@ -52,6 +52,7 @@
struct ESPState {
qemu_irq irq;
+ qemu_dma *parent_dma;
BlockDriverState **bd;
uint8_t rregs[ESP_REGS];
uint8_t wregs[ESP_REGS];
@@ -73,7 +74,6 @@
uint32_t dma_counter;
uint8_t *async_buf;
uint32_t async_len;
- void *dma_opaque;
};
#define STAT_DO 0x00
@@ -105,7 +105,7 @@
target = s->wregs[4] & 7;
DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
if (s->dma) {
- espdma_memory_read(s->dma_opaque, buf, dmalen);
+ dma_memory_read(s->parent_dma, 0, buf, dmalen);
} else {
buf[0] = 0;
memcpy(&buf[1], s->ti_buf, dmalen);
@@ -189,7 +189,7 @@
s->ti_buf[0] = s->sense;
s->ti_buf[1] = 0;
if (s->dma) {
- espdma_memory_write(s->dma_opaque, s->ti_buf, 2);
+ dma_memory_write(s->parent_dma, 0, s->ti_buf, 2);
s->rregs[4] = STAT_IN | STAT_TC | STAT_ST;
s->rregs[5] = INTR_BS | INTR_FC;
s->rregs[6] = SEQ_CD;
@@ -222,7 +222,7 @@
len = s->dma_left;
if (s->do_cmd) {
DPRINTF("command len %d + %d\n", s->cmdlen, len);
- espdma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
+ dma_memory_read(s->parent_dma, 0, &s->cmdbuf[s->cmdlen], len);
s->ti_size = 0;
s->cmdlen = 0;
s->do_cmd = 0;
@@ -237,9 +237,9 @@
len = s->async_len;
}
if (to_device) {
- espdma_memory_read(s->dma_opaque, s->async_buf, len);
+ dma_memory_read(s->parent_dma, 0, s->async_buf, len);
} else {
- espdma_memory_write(s->dma_opaque, s->async_buf, len);
+ dma_memory_write(s->parent_dma, 0, s->async_buf, len);
}
s->dma_left -= len;
s->async_buf += len;
@@ -569,7 +569,7 @@
}
void *esp_init(BlockDriverState **bd, target_phys_addr_t espaddr,
- void *dma_opaque, qemu_irq irq)
+ void *dma_opaque, qemu_irq irq, qemu_dma *parent_dma)
{
ESPState *s;
int esp_io_memory;
@@ -580,7 +580,7 @@
s->bd = bd;
s->irq = irq;
- s->dma_opaque = dma_opaque;
+ s->parent_dma = parent_dma;
sparc32_dma_set_reset_data(dma_opaque, esp_reset, s);
esp_io_memory = cpu_register_io_memory(0, esp_mem_read, esp_mem_write, s);
Index: qemu/hw/pcnet.c
===================================================================
--- qemu.orig/hw/pcnet.c 2007-08-14 19:25:42.000000000 +0000
+++ qemu/hw/pcnet.c 2007-08-14 19:39:11.000000000 +0000
@@ -75,6 +75,7 @@
void (*phys_mem_write)(void *dma_opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int do_bswap);
void *dma_opaque;
+ qemu_dma *parent_dma;
};
struct qemu_ether_header {
@@ -2011,6 +2012,46 @@
#if defined (TARGET_SPARC) && !defined(TARGET_SPARC64) // Avoid compile failure
+/* Note: on sparc, the lance 16 bit bus is swapped */
+static void ledma_memory_read(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int do_bswap)
+{
+ int i;
+
+ if (do_bswap) {
+ dma_memory_read(opaque, addr, buf, len);
+ } else {
+ dma_memory_read(opaque, addr, buf, len);
+ for(i = 0; i < len; i += 2) {
+ bswap16s((uint16_t *)(buf + i));
+ }
+ }
+}
+
+static void ledma_memory_write(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int do_bswap)
+{
+ int l, i;
+ uint16_t tmp_buf[32];
+
+ if (do_bswap) {
+ dma_memory_write(opaque, addr, buf, len);
+ } else {
+ while (len > 0) {
+ l = len;
+ if (l > sizeof(tmp_buf))
+ l = sizeof(tmp_buf);
+ for(i = 0; i < l; i += 2) {
+ tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i));
+ }
+ dma_memory_write(opaque, addr, (uint8_t *)tmp_buf, l);
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+ }
+}
+
static void lance_mem_writew(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
@@ -2047,7 +2088,7 @@
};
void lance_init(NICInfo *nd, target_phys_addr_t leaddr, void *dma_opaque,
- qemu_irq irq)
+ qemu_irq irq, qemu_dma *parent_dma)
{
PCNetState *d;
int lance_io_memory;
@@ -2059,7 +2100,7 @@
lance_io_memory =
cpu_register_io_memory(0, lance_mem_read, lance_mem_write, d);
- d->dma_opaque = dma_opaque;
+ d->dma_opaque = parent_dma;
sparc32_dma_set_reset_data(dma_opaque, pcnet_h_reset, d);
cpu_register_physical_memory(leaddr, 4, lance_io_memory);
^ permalink raw reply [flat|nested] 20+ messages in thread
* [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-14 19:48 [Qemu-devel] PATCH, RFC: Generic DMA framework Blue Swirl
@ 2007-08-16 18:18 ` Blue Swirl
2007-08-16 19:58 ` malc
2007-08-24 19:40 ` Blue Swirl
1 sibling, 1 reply; 20+ messages in thread
From: Blue Swirl @ 2007-08-16 18:18 UTC (permalink / raw)
To: qemu-devel
On 8/14/07, Blue Swirl <blauwirbel@gmail.com> wrote:
> Would the framework need any changes to support other targets? Comments welcome.
Replying to myself: Yes, changes may be needed. Some of the DMA
controllers move the data outside CPU loop, but that does not make
much difference.
Background: I want to use the framework for at least devices that
Sparc32/64 use. For Sparc32 the reason is that on Sun4c (Sparcstation
1, 2, IPX etc.) there is no IOMMU, but instead the CPU MMU is used for
address translation. The DMA framework makes it possible to remove the
IOMMU without changing the devices.
On Sparc64 an IOMMU needs to be inserted between PCI devices and RAM
without disturbing other targets.
About the devices: Users of PC ISA DMA controller (SB16, FDC) pass the
DMA position parameter to controller. I'm not sure this can be removed
easily. Of course a real DMA controller does not get any position data
from target. For Sparc32/64 I would not need to touch the PC ISA DMA
devices, except maybe for FDC. On Sparc32, the FDC DMA is not even
used. I have to think about this part.
PCI DMA-like devices (eepro100, pcnet, rtl8139, ide) as well as PXA
use cpu_physical_memory_rw to transfer data (eepro100 also uses
ldl_phys, which looks very suspicious). These could be converted to
generic DMA easily.
OMAP DMA is strange, but fortunately I'm not interested in those devices.
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-16 18:18 ` [Qemu-devel] " Blue Swirl
@ 2007-08-16 19:58 ` malc
2007-08-19 17:46 ` Blue Swirl
0 siblings, 1 reply; 20+ messages in thread
From: malc @ 2007-08-16 19:58 UTC (permalink / raw)
To: qemu-devel
On Thu, 16 Aug 2007, Blue Swirl wrote:
> On 8/14/07, Blue Swirl <blauwirbel@gmail.com> wrote:
>> Would the framework need any changes to support other targets? Comments welcome.
>
> Replying to myself: Yes, changes may be needed. Some of the DMA
> controllers move the data outside CPU loop, but that does not make
> much difference.
>
> Background: I want to use the framework for at least devices that
> Sparc32/64 use. For Sparc32 the reason is that on Sun4c (Sparcstation
> 1, 2, IPX etc.) there is no IOMMU, but instead the CPU MMU is used for
> address translation. The DMA framework makes it possible to remove the
> IOMMU without changing the devices.
>
> On Sparc64 an IOMMU needs to be inserted between PCI devices and RAM
> without disturbing other targets.
>
> About the devices: Users of PC ISA DMA controller (SB16, FDC) pass the
> DMA position parameter to controller. I'm not sure this can be removed
> easily. Of course a real DMA controller does not get any position data
> from target. For Sparc32/64 I would not need to touch the PC ISA DMA
> devices, except maybe for FDC. On Sparc32, the FDC DMA is not even
> used. I have to think about this part.
Very long time ago i changed the ISA DMA API to address some of the
critique that Fabrice expressed, i can't remember offhand if that
included removal of explicit position passing or not (the patch is on
some off-line HDD so it's not easy to check whether it's in fact so)
http://www.mail-archive.com/qemu-devel@nongnu.org/msg06594.html
If needed i can try to locate the patch but the FDC problem still needs
to be addressed by someone.
> PCI DMA-like devices (eepro100, pcnet, rtl8139, ide) as well as PXA
> use cpu_physical_memory_rw to transfer data (eepro100 also uses
> ldl_phys, which looks very suspicious). These could be converted to
> generic DMA easily.
>
> OMAP DMA is strange, but fortunately I'm not interested in those devices.
>
>
--
vale
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-16 19:58 ` malc
@ 2007-08-19 17:46 ` Blue Swirl
0 siblings, 0 replies; 20+ messages in thread
From: Blue Swirl @ 2007-08-19 17:46 UTC (permalink / raw)
To: qemu-devel
In 8/16/07, malc <av1474@comtv.ru> wrote:
> Very long time ago i changed the ISA DMA API to address some of the
> critique that Fabrice expressed, i can't remember offhand if that
> included removal of explicit position passing or not (the patch is on
> some off-line HDD so it's not easy to check whether it's in fact so)
>
> http://www.mail-archive.com/qemu-devel@nongnu.org/msg06594.html
>
> If needed i can try to locate the patch but the FDC problem still needs
> to be addressed by someone.
That could be interesting, please try to find it. I guess the patch
would be 7_aqemu?
^ permalink raw reply [flat|nested] 20+ messages in thread
* [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-14 19:48 [Qemu-devel] PATCH, RFC: Generic DMA framework Blue Swirl
2007-08-16 18:18 ` [Qemu-devel] " Blue Swirl
@ 2007-08-24 19:40 ` Blue Swirl
2007-08-24 20:18 ` Paul Brook
1 sibling, 1 reply; 20+ messages in thread
From: Blue Swirl @ 2007-08-24 19:40 UTC (permalink / raw)
To: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 574 bytes --]
I have now converted the ISA DMA devices (SB16, FDC), most PCI devices
and targets.
gdma.diff: Generic DMA
pc_ppc_dma_to_gdma.diff: Convert x86 and PPC to GDMA
pc_sb16_to_gdma.diff: Convert SB16 to GDMA
pc_fdc_to_gdma.diff: FDC
pc_dma_cleanup.diff: Remove unused functions
sparc_gdma.diff: Convert Sparc32 to GDMA
sparc32_dma_esp_le_to_gdma.diff: Convert ESP and Lance
sun4c.diff: Preliminary Sun4c (Sparcstation-1) support
pci_gdma.diff: Convert PCI devices and targets
Any comments? The patches are a bit intrusive and I can't test the
targets except that they compile.
[-- Attachment #2: gdma.diff --]
[-- Type: text/x-diff, Size: 1635 bytes --]
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-20 17:57:05.000000000 +0000
+++ qemu/vl.h 2007-08-22 19:29:57.000000000 +0000
@@ -733,6 +733,50 @@
#include "hw/irq.h"
+/* Generic DMA API */
+typedef void (*qemu_dma_handler)(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write);
+
+typedef struct QEMUDMAState {
+ qemu_dma_handler handler;
+ void *opaque;
+} qemu_dma;
+
+static inline void dma_memory_read(qemu_dma *dma_opaque,
+ target_phys_addr_t addr,
+ uint8_t *buf, int len)
+{
+ if (dma_opaque)
+ dma_opaque->handler(dma_opaque->opaque, addr, buf, len, 0);
+}
+
+static inline void dma_memory_write(qemu_dma *dma_opaque,
+ target_phys_addr_t addr,
+ uint8_t *buf, int len)
+{
+ if (dma_opaque)
+ dma_opaque->handler(dma_opaque->opaque, addr, buf, len, 1);
+}
+
+static inline void dma_memory_rw(qemu_dma *dma_opaque,
+ target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ if (dma_opaque)
+ dma_opaque->handler(dma_opaque->opaque, addr, buf, len, is_write);
+}
+
+static inline qemu_dma *qemu_init_dma(qemu_dma_handler handler, void *opaque)
+{
+ qemu_dma *p;
+
+ p = (qemu_dma *)qemu_mallocz(sizeof(qemu_dma));
+ p->handler = handler;
+ p->opaque = opaque;
+
+ return p;
+}
+
/* ISA bus */
extern target_phys_addr_t isa_mem_base;
[-- Attachment #3: pc_ppc_dma_to_gdma.diff --]
[-- Type: text/x-diff, Size: 6955 bytes --]
Index: qemu/hw/pc.c
===================================================================
--- qemu.orig/hw/pc.c 2007-08-22 19:29:57.000000000 +0000
+++ qemu/hw/pc.c 2007-08-22 19:42:13.000000000 +0000
@@ -601,6 +601,12 @@
cpu_reset(env);
}
+static void isa_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
static const int ide_iobase[2] = { 0x1f0, 0x170 };
static const int ide_iobase2[2] = { 0x3f6, 0x376 };
static const int ide_irq[2] = { 14, 15 };
@@ -675,6 +681,7 @@
NICInfo *nd;
qemu_irq *cpu_irq;
qemu_irq *i8259;
+ qemu_dma *physical_dma, **isa_dma;
linux_boot = (kernel_filename != NULL);
@@ -779,7 +786,9 @@
/* map all the bios at the top of memory */
cpu_register_physical_memory((uint32_t)(-bios_size),
bios_size, bios_offset | IO_MEM_ROM);
-
+
+ physical_dma = qemu_init_dma(isa_dma_memory_rw, NULL);
+
bochs_bios_init();
if (linux_boot)
@@ -887,7 +896,7 @@
}
i8042_init(i8259[1], i8259[12], 0x60);
- DMA_init(0);
+ DMA_init(0, physical_dma, &isa_dma);
#ifdef HAS_AUDIO
audio_init(pci_enabled ? pci_bus : NULL, i8259);
#endif
Index: qemu/hw/dma.c
===================================================================
--- qemu.orig/hw/dma.c 2007-08-22 19:29:57.000000000 +0000
+++ qemu/hw/dma.c 2007-08-22 19:44:23.000000000 +0000
@@ -48,6 +48,7 @@
uint8_t eop;
DMA_transfer_handler transfer_handler;
void *opaque;
+ qemu_dma *parent_dma;
};
#define ADDR 0
@@ -380,16 +381,15 @@
r->opaque = opaque;
}
-int DMA_read_memory (int nchan, void *buf, int pos, int len)
+static int dma_read_memory (struct dma_regs *r, void *buf, int pos, int len)
{
- struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
if (r->mode & 0x20) {
int i;
uint8_t *p = buf;
- cpu_physical_memory_read (addr - pos - len, buf, len);
+ dma_memory_read (r->parent_dma, addr - pos - len, buf, len);
/* What about 16bit transfers? */
for (i = 0; i < len >> 1; i++) {
uint8_t b = p[len - i - 1];
@@ -397,21 +397,20 @@
}
}
else
- cpu_physical_memory_read (addr + pos, buf, len);
+ dma_memory_read (r->parent_dma, addr + pos, buf, len);
return len;
}
-int DMA_write_memory (int nchan, void *buf, int pos, int len)
+static int dma_write_memory (struct dma_regs *r, void *buf, int pos, int len)
{
- struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
if (r->mode & 0x20) {
int i;
uint8_t *p = buf;
- cpu_physical_memory_write (addr - pos - len, buf, len);
+ dma_memory_write (r->parent_dma, addr - pos - len, buf, len);
/* What about 16bit transfers? */
for (i = 0; i < len; i++) {
uint8_t b = p[len - i - 1];
@@ -419,11 +418,36 @@
}
}
else
- cpu_physical_memory_write (addr + pos, buf, len);
+ dma_memory_write (r->parent_dma, addr + pos, buf, len);
return len;
}
+static void DMA_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ struct dma_regs *r = (struct dma_regs *)opaque;
+
+ if (is_write)
+ dma_write_memory(r, buf, 0, len);
+ else
+ dma_read_memory(r, buf, 0, len);
+}
+
+int DMA_read_memory (int nchan, void *buf, int pos, int len)
+{
+ struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
+
+ return dma_read_memory(r, buf, pos, len);
+}
+
+int DMA_write_memory (int nchan, void *buf, int pos, int len)
+{
+ struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
+
+ return dma_write_memory(r, buf, pos, len);
+}
+
/* request the emulator to transfer a new DMA memory block ASAP */
void DMA_schedule(int nchan)
{
@@ -526,12 +550,22 @@
return 0;
}
-void DMA_init (int high_page_enable)
+void DMA_init (int high_page_enable, qemu_dma *parent_dma, qemu_dma ***isa_dma)
{
+ unsigned int i;
+
dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
high_page_enable ? 0x480 : -1);
dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
high_page_enable ? 0x488 : -1);
+
+ *isa_dma = qemu_mallocz(sizeof(void *) * 8);
+ for (i = 0; i < 8; i++) {
+ *isa_dma[i] = qemu_init_dma(DMA_memory_rw,
+ &dma_controllers[i > 3].regs[i & 3]);
+ dma_controllers[i > 3].regs[i & 3].parent_dma = parent_dma;
+ }
+
register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]);
register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]);
}
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-22 19:29:57.000000000 +0000
+++ qemu/vl.h 2007-08-22 19:42:54.000000000 +0000
@@ -1063,7 +1063,7 @@
void DMA_release_DREQ (int nchan);
void DMA_schedule(int nchan);
void DMA_run (void);
-void DMA_init (int high_page_enable);
+void DMA_init (int high_page_enable, qemu_dma *parent_dma, qemu_dma ***isa_dma);
void DMA_register_channel (int nchan,
DMA_transfer_handler transfer_handler,
void *opaque);
Index: qemu/hw/ppc_prep.c
===================================================================
--- qemu.orig/hw/ppc_prep.c 2007-08-22 19:29:57.000000000 +0000
+++ qemu/hw/ppc_prep.c 2007-08-22 19:46:27.000000000 +0000
@@ -510,6 +510,12 @@
#define NVRAM_SIZE 0x2000
+static void ppc_isa_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
/* PowerPC PREP hardware initialisation */
static void ppc_prep_init (int ram_size, int vga_ram_size, int boot_device,
DisplayState *ds, const char **fd_filename,
@@ -528,6 +534,7 @@
ppc_def_t *def;
PCIBus *pci_bus;
qemu_irq *i8259;
+ qemu_dma *physical_dma, **isa_dma;
sysctrl = qemu_mallocz(sizeof(sysctrl_t));
if (sysctrl == NULL)
@@ -598,6 +605,8 @@
initrd_size = 0;
}
+ physical_dma = qemu_init_dma(ppc_isa_dma_memory_rw, NULL);
+
isa_mem_base = 0xc0000000;
if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) {
cpu_abort(env, "Only 6xx bus is supported on PREP machine\n");
@@ -641,7 +650,7 @@
bs_table[2 * i], bs_table[2 * i + 1]);
}
i8042_init(i8259[1], i8259[12], 0x60);
- DMA_init(1);
+ DMA_init(1, physical_dma, &isa_dma);
// AUD_init();
// SB16_init();
[-- Attachment #4: pc_sb16_to_gdma.diff --]
[-- Type: text/x-diff, Size: 3189 bytes --]
Index: qemu/hw/pc.c
===================================================================
--- qemu.orig/hw/pc.c 2007-08-22 19:42:13.000000000 +0000
+++ qemu/hw/pc.c 2007-08-22 19:47:22.000000000 +0000
@@ -623,7 +623,8 @@
static int parallel_irq[MAX_PARALLEL_PORTS] = { 7, 7, 7 };
#ifdef HAS_AUDIO
-static void audio_init (PCIBus *pci_bus, qemu_irq *pic)
+static void audio_init (PCIBus *pci_bus, qemu_irq *pic, qemu_dma *parent_dma,
+ qemu_dma *parent_hdma)
{
struct soundhw *c;
int audio_enabled = 0;
@@ -640,7 +641,7 @@
for (c = soundhw; c->name; ++c) {
if (c->enabled) {
if (c->isa) {
- c->init.init_isa (s, pic);
+ c->init.init_isa (s, pic, parent_dma, parent_hdma);
}
else {
if (pci_bus) {
@@ -898,7 +899,7 @@
i8042_init(i8259[1], i8259[12], 0x60);
DMA_init(0, physical_dma, &isa_dma);
#ifdef HAS_AUDIO
- audio_init(pci_enabled ? pci_bus : NULL, i8259);
+ audio_init(pci_enabled ? pci_bus : NULL, i8259, isa_dma[1], isa_dma[5]);
#endif
floppy_controller = fdctrl_init(i8259[6], 2, 0, 0x3f0, fd_table);
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-22 19:42:54.000000000 +0000
+++ qemu/vl.h 2007-08-22 19:47:22.000000000 +0000
@@ -936,7 +936,8 @@
int enabled;
int isa;
union {
- int (*init_isa) (AudioState *s, qemu_irq *pic);
+ int (*init_isa) (AudioState *s, qemu_irq *pic, qemu_dma *parent_dma,
+ qemu_dma *parent_hdma);
int (*init_pci) (PCIBus *bus, AudioState *s);
} init;
};
@@ -1046,7 +1047,8 @@
int es1370_init (PCIBus *bus, AudioState *s);
/* sb16.c */
-int SB16_init (AudioState *s, qemu_irq *pic);
+int SB16_init (AudioState *audio, qemu_irq *pic, qemu_dma *parent_dma,
+ qemu_dma *parent_hdma);
/* adlib.c */
int Adlib_init (AudioState *s, qemu_irq *pic);
Index: qemu/hw/sb16.c
===================================================================
--- qemu.orig/hw/sb16.c 2007-08-22 19:41:43.000000000 +0000
+++ qemu/hw/sb16.c 2007-08-22 19:47:22.000000000 +0000
@@ -57,7 +57,9 @@
qemu_irq *pic;
int irq;
int dma;
+ qemu_dma *parent_dma;
int hdma;
+ qemu_dma *parent_hdma;
int port;
int ver;
@@ -1169,8 +1171,12 @@
to_copy = sizeof (tmpbuf);
}
- copied = DMA_read_memory (nchan, tmpbuf, dma_pos, to_copy);
- copied = AUD_write (s->voice, tmpbuf, copied);
+ if (s->use_hdma)
+ dma_memory_read(s->parent_hdma, 0, tmpbuf + dma_pos, to_copy);
+ else
+ dma_memory_read(s->parent_dma, 0, tmpbuf + dma_pos, to_copy);
+
+ copied = AUD_write (s->voice, tmpbuf, to_copy);
temp -= copied;
dma_pos = (dma_pos + copied) % dma_len;
@@ -1390,7 +1396,8 @@
return 0;
}
-int SB16_init (AudioState *audio, qemu_irq *pic)
+int SB16_init (AudioState *audio, qemu_irq *pic, qemu_dma *parent_dma,
+ qemu_dma *parent_hdma)
{
SB16State *s;
int i;
[-- Attachment #5: pc_fdc_to_gdma.diff --]
[-- Type: text/x-diff, Size: 3755 bytes --]
Index: qemu/hw/fdc.c
===================================================================
--- qemu.orig/hw/fdc.c 2007-08-22 19:41:40.000000000 +0000
+++ qemu/hw/fdc.c 2007-08-22 19:47:59.000000000 +0000
@@ -370,6 +370,7 @@
/* HW */
qemu_irq irq;
int dma_chann;
+ qemu_dma *parent_dma;
target_phys_addr_t io_base;
/* Controller state */
QEMUTimer *result_timer;
@@ -580,7 +581,8 @@
fdctrl_t *fdctrl_init (qemu_irq irq, int dma_chann, int mem_mapped,
target_phys_addr_t io_base,
- BlockDriverState **fds)
+ BlockDriverState **fds,
+ qemu_dma *parent_dma)
{
fdctrl_t *fdctrl;
int io_mem;
@@ -598,6 +600,7 @@
fdctrl->dma_chann = dma_chann;
fdctrl->io_base = io_base;
fdctrl->config = 0x60; /* Implicit seek, polling & FIFO enabled */
+ fdctrl->parent_dma = parent_dma;
if (fdctrl->dma_chann != -1) {
fdctrl->dma_en = 1;
DMA_register_channel(dma_chann, &fdctrl_transfer_handler, fdctrl);
@@ -1087,17 +1090,13 @@
switch (fdctrl->data_dir) {
case FD_DIR_READ:
/* READ commands */
- DMA_write_memory (nchan, fdctrl->fifo + rel_pos,
- fdctrl->data_pos, len);
-/* cpu_physical_memory_write(addr + fdctrl->data_pos, */
-/* fdctrl->fifo + rel_pos, len); */
+ dma_memory_write(fdctrl->parent_dma, 0, fdctrl->fifo + rel_pos +
+ fdctrl->data_pos, len);
break;
case FD_DIR_WRITE:
/* WRITE commands */
- DMA_read_memory (nchan, fdctrl->fifo + rel_pos,
- fdctrl->data_pos, len);
-/* cpu_physical_memory_read(addr + fdctrl->data_pos, */
-/* fdctrl->fifo + rel_pos, len); */
+ dma_memory_read(fdctrl->parent_dma, 0, fdctrl->fifo + rel_pos +
+ fdctrl->data_pos, len);
if (bdrv_write(cur_drv->bs, fd_sector(cur_drv),
fdctrl->fifo, 1) < 0) {
FLOPPY_ERROR("writting sector %d\n", fd_sector(cur_drv));
@@ -1110,9 +1109,8 @@
{
uint8_t tmpbuf[FD_SECTOR_LEN];
int ret;
- DMA_read_memory (nchan, tmpbuf, fdctrl->data_pos, len);
-/* cpu_physical_memory_read(addr + fdctrl->data_pos, */
-/* tmpbuf, len); */
+ dma_memory_read(fdctrl->parent_dma, 0, tmpbuf +
+ fdctrl->data_pos, len);
ret = memcmp(tmpbuf, fdctrl->fifo + rel_pos, len);
if (ret == 0) {
status2 = 0x08;
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-22 19:47:22.000000000 +0000
+++ qemu/vl.h 2007-08-22 19:47:59.000000000 +0000
@@ -1077,7 +1077,8 @@
fdctrl_t *fdctrl_init (qemu_irq irq, int dma_chann, int mem_mapped,
target_phys_addr_t io_base,
- BlockDriverState **fds);
+ BlockDriverState **fds,
+ qemu_dma *parent_dma);
int fdctrl_get_drive_type(fdctrl_t *fdctrl, int drive_num);
/* eepro100.c */
Index: qemu/hw/pc.c
===================================================================
--- qemu.orig/hw/pc.c 2007-08-22 19:47:22.000000000 +0000
+++ qemu/hw/pc.c 2007-08-22 19:47:59.000000000 +0000
@@ -902,7 +902,7 @@
audio_init(pci_enabled ? pci_bus : NULL, i8259, isa_dma[1], isa_dma[5]);
#endif
- floppy_controller = fdctrl_init(i8259[6], 2, 0, 0x3f0, fd_table);
+ floppy_controller = fdctrl_init(i8259[6], 2, 0, 0x3f0, fd_table, isa_dma[2]);
cmos_init(ram_size, boot_device, bs_table);
[-- Attachment #6: pc_dma_cleanup.diff --]
[-- Type: text/x-diff, Size: 1329 bytes --]
Index: qemu/hw/dma.c
===================================================================
--- qemu.orig/hw/dma.c 2007-08-22 19:44:23.000000000 +0000
+++ qemu/hw/dma.c 2007-08-22 19:51:31.000000000 +0000
@@ -434,20 +434,6 @@
dma_read_memory(r, buf, 0, len);
}
-int DMA_read_memory (int nchan, void *buf, int pos, int len)
-{
- struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
-
- return dma_read_memory(r, buf, pos, len);
-}
-
-int DMA_write_memory (int nchan, void *buf, int pos, int len)
-{
- struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
-
- return dma_write_memory(r, buf, pos, len);
-}
-
/* request the emulator to transfer a new DMA memory block ASAP */
void DMA_schedule(int nchan)
{
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-22 19:47:59.000000000 +0000
+++ qemu/vl.h 2007-08-22 19:51:31.000000000 +0000
@@ -1059,8 +1059,6 @@
/* dma.c */
typedef int (*DMA_transfer_handler) (void *opaque, int nchan, int pos, int size);
int DMA_get_channel_mode (int nchan);
-int DMA_read_memory (int nchan, void *buf, int pos, int size);
-int DMA_write_memory (int nchan, void *buf, int pos, int size);
void DMA_hold_DREQ (int nchan);
void DMA_release_DREQ (int nchan);
void DMA_schedule(int nchan);
[-- Attachment #7: sparc_gdma.diff --]
[-- Type: text/x-diff, Size: 5932 bytes --]
Index: qemu/hw/sun4m.c
===================================================================
--- qemu.orig/hw/sun4m.c 2007-08-20 20:11:31.000000000 +0000
+++ qemu/hw/sun4m.c 2007-08-22 20:03:28.000000000 +0000
@@ -82,19 +82,10 @@
{
return 0;
}
-int DMA_read_memory (int nchan, void *buf, int pos, int size)
-{
- return 0;
-}
-int DMA_write_memory (int nchan, void *buf, int pos, int size)
-{
- return 0;
-}
void DMA_hold_DREQ (int nchan) {}
void DMA_release_DREQ (int nchan) {}
void DMA_schedule(int nchan) {}
void DMA_run (void) {}
-void DMA_init (int high_page_enable) {}
void DMA_register_channel (int nchan,
DMA_transfer_handler transfer_handler,
void *opaque)
@@ -289,6 +280,17 @@
slavio_set_power_fail(slavio_misc, 1);
}
+static void sparc_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
+static void sparc_dummy_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+}
+
static void main_cpu_reset(void *opaque)
{
CPUState *env = opaque;
@@ -316,6 +318,7 @@
qemu_irq *cpu_irqs[MAX_CPUS], *slavio_irq, *slavio_cpu_irq,
*espdma_irq, *ledma_irq;
qemu_irq *esp_reset, *le_reset;
+ qemu_dma *physical_dma, *dummy_dma, *dvma;
/* init CPUs */
sparc_find_by_name(cpu_model, &def);
@@ -344,7 +347,11 @@
/* allocate RAM */
cpu_register_physical_memory(0, RAM_size, 0);
- iommu = iommu_init(hwdef->iommu_base);
+ physical_dma = qemu_init_dma(sparc_dma_memory_rw, NULL);
+ dummy_dma = qemu_init_dma(sparc_dummy_memory_rw, NULL);
+
+ iommu = iommu_init(hwdef->iommu_base, physical_dma, &dvma);
+
slavio_intctl = slavio_intctl_init(hwdef->intctl_base,
hwdef->intctl_base + 0x10000ULL,
&hwdef->intbit_to_level[0],
@@ -391,7 +398,8 @@
// Slavio TTYB (base+0, Linux ttyS1) is the second Qemu serial device
slavio_serial_init(hwdef->serial_base, slavio_irq[hwdef->ser_irq],
serial_hds[1], serial_hds[0]);
- fdctrl_init(slavio_irq[hwdef->fd_irq], 0, 1, hwdef->fd_base, fd_table);
+ fdctrl_init(slavio_irq[hwdef->fd_irq], 0, 1, hwdef->fd_base, fd_table,
+ dummy_dma);
main_esp = esp_init(bs_table, hwdef->esp_base, espdma, *espdma_irq,
esp_reset);
Index: qemu/hw/iommu.c
===================================================================
--- qemu.orig/hw/iommu.c 2007-08-20 20:11:31.000000000 +0000
+++ qemu/hw/iommu.c 2007-08-22 19:53:40.000000000 +0000
@@ -104,6 +104,7 @@
target_phys_addr_t addr;
uint32_t regs[IOMMU_NREGS];
target_phys_addr_t iostart;
+ qemu_dma *parent_dma;
} IOMMUState;
static uint32_t iommu_mem_readw(void *opaque, target_phys_addr_t addr)
@@ -202,7 +203,8 @@
static uint32_t iommu_page_get_flags(IOMMUState *s, target_phys_addr_t addr)
{
- uint32_t iopte, ret;
+ uint32_t ret;
+ target_phys_addr_t iopte;
#ifdef DEBUG_IOMMU
target_phys_addr_t pa = addr;
#endif
@@ -210,7 +212,8 @@
iopte = s->regs[IOMMU_BASE] << 4;
addr &= ~s->iostart;
iopte += (addr >> (PAGE_SHIFT - 2)) & ~3;
- ret = ldl_phys(iopte);
+ dma_memory_read(s->parent_dma, iopte, (uint8_t *)&ret, 4);
+ bswap32s(&ret);
DPRINTF("get flags addr " TARGET_FMT_plx " => pte %x, *ptes = %x\n", pa,
iopte, ret);
@@ -245,6 +248,7 @@
void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int is_write)
{
+ IOMMUState *s = opaque;
int l;
uint32_t flags;
target_phys_addr_t page, phys_addr;
@@ -259,16 +263,12 @@
iommu_bad_addr(opaque, page, is_write);
return;
}
- phys_addr = iommu_translate_pa(opaque, addr, flags);
- if (is_write) {
- if (!(flags & IOPTE_WRITE)) {
- iommu_bad_addr(opaque, page, is_write);
- return;
- }
- cpu_physical_memory_write(phys_addr, buf, len);
- } else {
- cpu_physical_memory_read(phys_addr, buf, len);
+ if (is_write && !(flags & IOPTE_WRITE)) {
+ iommu_bad_addr(opaque, page, is_write);
+ return;
}
+ phys_addr = iommu_translate_pa(opaque, addr, flags);
+ dma_memory_rw(s->parent_dma, phys_addr, buf, len, is_write);
len -= l;
buf += l;
addr += l;
@@ -309,7 +309,8 @@
s->regs[IOMMU_CTRL] = IOMMU_VERSION;
}
-void *iommu_init(target_phys_addr_t addr)
+void *iommu_init(target_phys_addr_t addr, qemu_dma *parent_dma,
+ qemu_dma **dvma)
{
IOMMUState *s;
int iommu_io_memory;
@@ -322,7 +323,10 @@
iommu_io_memory = cpu_register_io_memory(0, iommu_mem_read, iommu_mem_write, s);
cpu_register_physical_memory(addr, IOMMU_NREGS * 4, iommu_io_memory);
-
+
+ s->parent_dma = parent_dma;
+ *dvma = qemu_init_dma(sparc_iommu_memory_rw, s);
+
register_savevm("iommu", addr, 2, iommu_save, iommu_load, s);
qemu_register_reset(iommu_reset, s);
return s;
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-22 19:51:31.000000000 +0000
+++ qemu/vl.h 2007-08-22 20:03:09.000000000 +0000
@@ -1263,7 +1263,8 @@
extern QEMUMachine ss5_machine, ss10_machine;
/* iommu.c */
-void *iommu_init(target_phys_addr_t addr);
+void *iommu_init(target_phys_addr_t addr, qemu_dma *parent_dma,
+ qemu_dma **dvma);
void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int is_write);
static inline void sparc_iommu_memory_read(void *opaque,
[-- Attachment #8: sun4c.diff --]
[-- Type: text/x-diff, Size: 17074 bytes --]
Index: qemu/hw/sun4m.c
===================================================================
--- qemu.orig/hw/sun4m.c 2007-08-22 20:03:47.000000000 +0000
+++ qemu/hw/sun4m.c 2007-08-23 17:30:22.000000000 +0000
@@ -1,7 +1,7 @@
/*
- * QEMU Sun4m System Emulator
+ * QEMU Sun4m/Sun4c System Emulator
*
- * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2003-2005, 2007 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -36,6 +36,13 @@
* SPARCstation 20/xx, SPARCserver 20
* SPARCstation 4
*
+ * Sun4c architecture was used in the following machines:
+ * SPARCstation 1/1+, SPARCserver 1/1+
+ * SPARCstation SLC
+ * SPARCstation IPC
+ * SPARCstation ELC
+ * SPARCstation IPX
+ *
* See for example: http://www.sunhelp.org/faq/sunref1.html
*/
@@ -62,6 +69,7 @@
target_phys_addr_t serial_base, fd_base;
target_phys_addr_t dma_base, esp_base, le_base;
target_phys_addr_t tcx_base, cs_base, power_base;
+ target_phys_addr_t sun4c_intctl_base, sun4c_counter_base;
long vram_size, nvram_size;
// IRQ numbers are not PIL ones, but master interrupt controller register
// bit numbers
@@ -286,6 +294,12 @@
cpu_physical_memory_rw(addr, buf, len, is_write);
}
+static void sun4c_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_memory_rw_debug(opaque, addr, buf, len, is_write);
+}
+
static void sparc_dummy_memory_rw(void *opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int is_write)
{
@@ -418,6 +432,93 @@
return nvram;
}
+static void *sun4c_hw_init(const struct hwdef *hwdef, int RAM_size,
+ DisplayState *ds, const char *cpu_model)
+
+{
+ CPUState *env;
+ unsigned int i;
+ void *espdma, *ledma, *main_esp, *nvram;
+ const sparc_def_t *def;
+ qemu_irq *cpu_irqs, *slavio_irq, *espdma_irq, *ledma_irq;
+ qemu_irq *esp_reset, *le_reset;
+ qemu_dma *physical_dma, *dummy_dma, *mmu_dma, *esp_dvma, *le_dvma;
+
+ /* init CPU */
+ sparc_find_by_name(cpu_model, &def);
+ if (def == NULL) {
+ fprintf(stderr, "Unable to find Sparc CPU definition\n");
+ exit(1);
+ }
+
+ env = cpu_init();
+ cpu_sparc_register(env, def);
+ qemu_register_reset(main_cpu_reset, env);
+ register_savevm("cpu", 0, 3, cpu_save, cpu_load, env);
+ cpu_irqs = qemu_allocate_irqs(cpu_set_irq, env, MAX_PILS);
+
+ /* allocate RAM */
+ cpu_register_physical_memory(0, RAM_size, 0);
+
+ physical_dma = qemu_init_dma(sparc_dma_memory_rw, NULL);
+ dummy_dma = qemu_init_dma(sparc_dummy_memory_rw, NULL);
+ mmu_dma = qemu_init_dma(sun4c_dma_memory_rw, env);
+
+ slavio_intctl = sun4c_intctl_init(hwdef->sun4c_intctl_base,
+ &slavio_irq, cpu_irqs);
+
+ espdma = sparc32_dma_init(hwdef->dma_base, slavio_irq[hwdef->esp_irq],
+ &espdma_irq, mmu_dma, &esp_dvma, 1, &esp_reset);
+
+ ledma = sparc32_dma_init(hwdef->dma_base + 16ULL,
+ slavio_irq[hwdef->le_irq], &ledma_irq, mmu_dma,
+ &le_dvma, 0, &le_reset);
+
+
+ if (graphic_depth != 8 && graphic_depth != 24) {
+ fprintf(stderr, "qemu: Unsupported depth: %d\n", graphic_depth);
+ exit (1);
+ }
+ tcx_init(ds, hwdef->tcx_base, phys_ram_base + RAM_size, RAM_size,
+ hwdef->vram_size, graphic_width, graphic_height, graphic_depth);
+
+ if (nd_table[0].model == NULL
+ || strcmp(nd_table[0].model, "lance") == 0) {
+ lance_init(&nd_table[0], hwdef->le_base, *ledma_irq, le_dvma, le_reset);
+ } else if (strcmp(nd_table[0].model, "?") == 0) {
+ fprintf(stderr, "qemu: Supported NICs: lance\n");
+ exit (1);
+ } else {
+ fprintf(stderr, "qemu: Unsupported NIC: %s\n", nd_table[0].model);
+ exit (1);
+ }
+
+ nvram = m48t59_init(slavio_irq[0], hwdef->nvram_base, 0,
+ hwdef->nvram_size, 8);
+
+ slavio_timer_init(hwdef->sun4c_counter_base,
+ slavio_irq[hwdef->clock_irq], 2);
+
+ slavio_serial_ms_kbd_init(hwdef->ms_kb_base, slavio_irq[hwdef->ms_kb_irq]);
+ // Slavio TTYA (base+4, Linux ttyS0) is the first Qemu serial device
+ // Slavio TTYB (base+0, Linux ttyS1) is the second Qemu serial device
+ slavio_serial_init(hwdef->serial_base, slavio_irq[hwdef->ser_irq],
+ serial_hds[1], serial_hds[0]);
+ fdctrl_init(slavio_irq[hwdef->fd_irq], 0, 1, hwdef->fd_base, fd_table,
+ dummy_dma);
+
+ main_esp = esp_init(bs_table, hwdef->esp_base, *espdma_irq, esp_dvma,
+ esp_reset);
+
+ for (i = 0; i < MAX_DISKS; i++) {
+ if (bs_table[i]) {
+ esp_scsi_attach(main_esp, bs_table[i], i);
+ }
+ }
+
+ return nvram;
+}
+
static void sun4m_load_kernel(long vram_size, int RAM_size, int boot_device,
const char *kernel_filename,
const char *kernel_cmdline,
@@ -501,6 +602,8 @@
.esp_base = 0x78800000,
.le_base = 0x78c00000,
.power_base = 0x7a000000,
+ .sun4c_intctl_base = -1,
+ .sun4c_counter_base = -1,
.vram_size = 0x00100000,
.nvram_size = 0x2000,
.esp_irq = 18,
@@ -534,6 +637,8 @@
.esp_base = 0xef0800000ULL,
.le_base = 0xef0c00000ULL,
.power_base = 0xefa000000ULL,
+ .sun4c_intctl_base = -1,
+ .sun4c_counter_base = -1,
.vram_size = 0x00100000,
.nvram_size = 0x2000,
.esp_irq = 18,
@@ -551,6 +656,37 @@
6, 0, 4, 10, 8, 0, 11, 0, 0, 0, 0, 0, 15, 0, 15, 0,
},
},
+ /* SS-2 */
+ {
+ .iommu_base = 0xf8000000,
+ .tcx_base = 0xfe000000,
+ .cs_base = -1,
+ .slavio_base = 0xf0000000,
+ .ms_kb_base = 0xf0000000,
+ .serial_base = 0xf1000000,
+ .nvram_base = 0xf2000000,
+ .fd_base = 0xf7200000,
+ .counter_base = -1,
+ .intctl_base = -1,
+ .dma_base = 0xf8400000,
+ .esp_base = 0xf8800000,
+ .le_base = 0xf8c00000,
+ .power_base = -1,
+ .sun4c_intctl_base = 0xf5000000,
+ .sun4c_counter_base = 0xf3000000,
+ .vram_size = 0x00100000,
+ .nvram_size = 0x2000, // XXX 0x800,
+ .esp_irq = 2,
+ .le_irq = 3,
+ .clock_irq = 5,
+ .clock1_irq = 7,
+ .ms_kb_irq = 1,
+ .ser_irq = 1,
+ .fd_irq = 1,
+ .me_irq = 1,
+ .cs_irq = -1,
+ .machine_id = 0x55,
+ },
};
static void sun4m_common_init(int RAM_size, int boot_device, DisplayState *ds,
@@ -599,6 +735,32 @@
1, PROM_ADDR); // XXX prom overlap, actually first 4GB ok
}
+/* SPARCstation 2 hardware initialisation */
+static void ss2_init(int RAM_size, int vga_ram_size, int boot_device,
+ DisplayState *ds, const char **fd_filename, int snapshot,
+ const char *kernel_filename, const char *kernel_cmdline,
+ const char *initrd_filename, const char *cpu_model)
+{
+ void *nvram;
+ const unsigned int max_ram = 0x10000000;
+ const int machine = 2;
+
+ if (cpu_model == NULL)
+ cpu_model = "Cypress CY7C601";
+
+ if ((unsigned int)RAM_size > max_ram) {
+ fprintf(stderr, "qemu: Too much memory for this machine: %d, maximum %d\n",
+ (unsigned int)RAM_size / (1024 * 1024),
+ (unsigned int)max_ram / (1024 * 1024));
+ exit(1);
+ }
+ nvram = sun4c_hw_init(&hwdefs[machine], RAM_size, ds, cpu_model);
+
+ sun4m_load_kernel(hwdefs[machine].vram_size, RAM_size, boot_device,
+ kernel_filename, kernel_cmdline, initrd_filename,
+ hwdefs[machine].machine_id, nvram);
+}
+
QEMUMachine ss5_machine = {
"SS-5",
"Sun4m platform, SPARCstation 5",
@@ -610,3 +772,9 @@
"Sun4m platform, SPARCstation 10",
ss10_init,
};
+
+QEMUMachine ss2_machine = {
+ "SS-2",
+ "Sun4c platform, SPARCstation 2",
+ ss2_init,
+};
Index: qemu/target-sparc/translate.c
===================================================================
--- qemu.orig/target-sparc/translate.c 2007-08-22 19:56:37.000000000 +0000
+++ qemu/target-sparc/translate.c 2007-08-22 20:03:55.000000000 +0000
@@ -3456,6 +3456,12 @@
},
#else
{
+ .name = "Cypress CY7C601",
+ .iu_version = 0x11 << 24, /* Impl 1, ver 1 */
+ .fpu_version = 3 << 17, /* FPU version 3 (Weitek WTL3170/2) */
+ .mmu_version = 0,
+ },
+ {
.name = "Fujitsu MB86904",
.iu_version = 0x04 << 24, /* Impl 0, ver 4 */
.fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
Index: qemu/vl.c
===================================================================
--- qemu.orig/vl.c 2007-08-22 19:56:37.000000000 +0000
+++ qemu/vl.c 2007-08-22 20:03:55.000000000 +0000
@@ -7127,6 +7127,7 @@
#else
qemu_register_machine(&ss5_machine);
qemu_register_machine(&ss10_machine);
+ qemu_register_machine(&ss2_machine);
#endif
#elif defined(TARGET_ARM)
qemu_register_machine(&integratorcp_machine);
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-22 20:03:47.000000000 +0000
+++ qemu/vl.h 2007-08-22 20:03:55.000000000 +0000
@@ -1260,7 +1260,7 @@
void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val);
/* sun4m.c */
-extern QEMUMachine ss5_machine, ss10_machine;
+extern QEMUMachine ss5_machine, ss10_machine, ss2_machine;
/* iommu.c */
void *iommu_init(target_phys_addr_t addr, qemu_dma *parent_dma,
@@ -1279,6 +1279,10 @@
void slavio_pic_info(void *opaque);
void slavio_irq_info(void *opaque);
+/* sun4c_intctl.c */
+void *sun4c_intctl_init(target_phys_addr_t addr, qemu_irq **irq,
+ qemu_irq *parent_irq);
+
/* loader.c */
int get_image_size(const char *filename);
int load_image(const char *filename, uint8_t *addr);
Index: qemu/hw/sun4c_intctl.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ qemu/hw/sun4c_intctl.c 2007-08-22 20:03:55.000000000 +0000
@@ -0,0 +1,221 @@
+/*
+ * QEMU Sparc Sun4c interrupt controller emulation
+ *
+ * Copyright (c) 2007 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "vl.h"
+//#define DEBUG_IRQ_COUNT
+//#define DEBUG_IRQ
+
+#ifdef DEBUG_IRQ
+#define DPRINTF(fmt, args...) \
+do { printf("IRQ: " fmt , ##args); } while (0)
+#else
+#define DPRINTF(fmt, args...)
+#endif
+
+/*
+ * Registers of interrupt controller in sun4c.
+ *
+ */
+
+#define MAX_PILS 16
+
+typedef struct Sun4c_INTCTLState {
+#ifdef DEBUG_IRQ_COUNT
+ uint64_t irq_count;
+#endif
+ qemu_irq *cpu_irqs;
+ const uint32_t *intbit_to_level;
+ uint32_t pil_out;
+ uint8_t reg;
+ uint8_t pending;
+} Sun4c_INTCTLState;
+
+#define INTCTL_MAXADDR 0
+#define INTCTL_SIZE (INTCTL_MAXADDR + 1)
+
+static void sun4c_check_interrupts(void *opaque);
+
+static uint32_t sun4c_intctl_mem_readb(void *opaque, target_phys_addr_t addr)
+{
+ Sun4c_INTCTLState *s = opaque;
+ uint32_t ret;
+
+ ret = s->reg;
+ DPRINTF("read reg 0x" TARGET_FMT_plx " = %x\n", addr, ret);
+
+ return ret;
+}
+
+static void sun4c_intctl_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
+{
+ Sun4c_INTCTLState *s = opaque;
+
+ DPRINTF("write reg 0x" TARGET_FMT_plx " = %x\n", addr, val);
+ val &= 0xbf;
+ s->reg = val;
+ sun4c_check_interrupts(s);
+}
+
+static CPUReadMemoryFunc *sun4c_intctl_mem_read[3] = {
+ sun4c_intctl_mem_readb,
+ sun4c_intctl_mem_readb,
+ sun4c_intctl_mem_readb,
+};
+
+static CPUWriteMemoryFunc *sun4c_intctl_mem_write[3] = {
+ sun4c_intctl_mem_writeb,
+ sun4c_intctl_mem_writeb,
+ sun4c_intctl_mem_writeb,
+};
+
+void sun4c_pic_info(void *opaque)
+{
+ Sun4c_INTCTLState *s = opaque;
+
+ term_printf("master: pending 0x%2.2x, enabled 0x%2.2x\n", s->pending, s->reg);
+}
+
+void sun4c_irq_info(void *opaque)
+{
+#ifndef DEBUG_IRQ_COUNT
+ term_printf("irq statistic code not compiled.\n");
+#else
+ Sun4c_INTCTLState *s = opaque;
+ int64_t count;
+
+ term_printf("IRQ statistics:\n");
+ count = s->irq_count[i];
+ if (count > 0)
+ term_printf("%2d: %" PRId64 "\n", i, count);
+#endif
+}
+
+static const uint32_t intbit_to_level[] = { 0, 1, 4, 6, 8, 10, 0, 14, };
+
+static void sun4c_check_interrupts(void *opaque)
+{
+ Sun4c_INTCTLState *s = opaque;
+ uint32_t pil_pending;
+ unsigned int i;
+
+ DPRINTF("pending %x disabled %x\n", pending, s->intregm_disabled);
+ pil_pending = 0;
+ if (s->pending && !(s->reg & 0x80000000)) {
+ for (i = 0; i < 8; i++) {
+ if (s->pending & (1 << i))
+ pil_pending |= 1 << intbit_to_level[i];
+ }
+ }
+
+ for (i = 0; i < MAX_PILS; i++) {
+ if (pil_pending & (1 << i)) {
+ if (!(s->pil_out & (1 << i)))
+ qemu_irq_raise(s->cpu_irqs[i]);
+ } else {
+ if (s->pil_out & (1 << i))
+ qemu_irq_lower(s->cpu_irqs[i]);
+ }
+ }
+ s->pil_out = pil_pending;
+}
+
+/*
+ * "irq" here is the bit number in the system interrupt register
+ */
+static void sun4c_set_irq(void *opaque, int irq, int level)
+{
+ Sun4c_INTCTLState *s = opaque;
+ uint32_t mask = 1 << irq;
+ uint32_t pil = intbit_to_level[irq];
+
+ DPRINTF("Set irq %d -> pil %d level %d\n", irq, pil,
+ level);
+ if (pil > 0) {
+ if (level) {
+#ifdef DEBUG_IRQ_COUNT
+ s->irq_count[pil]++;
+#endif
+ s->pending |= mask;
+ } else {
+ s->pending &= ~mask;
+ }
+ sun4c_check_interrupts(s);
+ }
+}
+
+static void sun4c_intctl_save(QEMUFile *f, void *opaque)
+{
+ Sun4c_INTCTLState *s = opaque;
+
+ qemu_put_8s(f, &s->reg);
+ qemu_put_8s(f, &s->pending);
+}
+
+static int sun4c_intctl_load(QEMUFile *f, void *opaque, int version_id)
+{
+ Sun4c_INTCTLState *s = opaque;
+
+ if (version_id != 1)
+ return -EINVAL;
+
+ qemu_get_8s(f, &s->reg);
+ qemu_get_8s(f, &s->pending);
+ sun4c_check_interrupts(s);
+
+ return 0;
+}
+
+static void sun4c_intctl_reset(void *opaque)
+{
+ Sun4c_INTCTLState *s = opaque;
+
+ s->reg = 1;
+ s->pending = 0;
+ sun4c_check_interrupts(s);
+}
+
+void *sun4c_intctl_init(target_phys_addr_t addr, qemu_irq **irq,
+ qemu_irq *parent_irq)
+{
+ int sun4c_intctl_io_memory;
+ Sun4c_INTCTLState *s;
+
+ s = qemu_mallocz(sizeof(Sun4c_INTCTLState));
+ if (!s)
+ return NULL;
+
+ sun4c_intctl_io_memory = cpu_register_io_memory(0, sun4c_intctl_mem_read,
+ sun4c_intctl_mem_write, s);
+ cpu_register_physical_memory(addr, INTCTL_SIZE, sun4c_intctl_io_memory);
+ s->cpu_irqs = parent_irq;
+
+ register_savevm("sun4c_intctl", addr, 1, sun4c_intctl_save,
+ sun4c_intctl_load, s);
+
+ qemu_register_reset(sun4c_intctl_reset, s);
+ *irq = qemu_allocate_irqs(sun4c_set_irq, s, 8);
+
+ sun4c_intctl_reset(s);
+ return s;
+}
+
Index: qemu/Makefile.target
===================================================================
--- qemu.orig/Makefile.target 2007-08-22 19:56:37.000000000 +0000
+++ qemu/Makefile.target 2007-08-22 20:03:55.000000000 +0000
@@ -454,7 +454,7 @@
else
VL_OBJS+= sun4m.o tcx.o pcnet.o iommu.o m48t59.o slavio_intctl.o
VL_OBJS+= slavio_timer.o slavio_serial.o slavio_misc.o fdc.o esp.o sparc32_dma.o
-VL_OBJS+= cs4231.o ptimer.o
+VL_OBJS+= cs4231.o ptimer.o sun4c_intctl.o
endif
endif
ifeq ($(TARGET_BASE_ARCH), arm)
[-- Attachment #9: pci_gdma.diff --]
[-- Type: text/x-diff, Size: 39367 bytes --]
Index: qemu/hw/pci.c
===================================================================
--- qemu.orig/hw/pci.c 2007-08-23 17:52:49.000000000 +0000
+++ qemu/hw/pci.c 2007-08-23 18:25:35.000000000 +0000
@@ -574,20 +574,20 @@
}
/* Initialize a PCI NIC. */
-void pci_nic_init(PCIBus *bus, NICInfo *nd, int devfn)
+void pci_nic_init(PCIBus *bus, NICInfo *nd, int devfn, qemu_dma *parent_dma)
{
if (strcmp(nd->model, "ne2k_pci") == 0) {
pci_ne2000_init(bus, nd, devfn);
} else if (strcmp(nd->model, "i82551") == 0) {
- pci_i82551_init(bus, nd, devfn);
+ pci_i82551_init(bus, nd, devfn, parent_dma);
} else if (strcmp(nd->model, "i82557b") == 0) {
- pci_i82557b_init(bus, nd, devfn);
+ pci_i82557b_init(bus, nd, devfn, parent_dma);
} else if (strcmp(nd->model, "i82559er") == 0) {
- pci_i82559er_init(bus, nd, devfn);
+ pci_i82559er_init(bus, nd, devfn, parent_dma);
} else if (strcmp(nd->model, "rtl8139") == 0) {
- pci_rtl8139_init(bus, nd, devfn);
+ pci_rtl8139_init(bus, nd, devfn, parent_dma);
} else if (strcmp(nd->model, "pcnet") == 0) {
- pci_pcnet_init(bus, nd, devfn);
+ pci_pcnet_init(bus, nd, devfn, parent_dma);
} else if (strcmp(nd->model, "?") == 0) {
fprintf(stderr, "qemu: Supported PCI NICs: i82551 i82557b i82559er"
" ne2k_pci pcnet rtl8139\n");
Index: qemu/hw/rtl8139.c
===================================================================
--- qemu.orig/hw/rtl8139.c 2007-08-23 17:44:32.000000000 +0000
+++ qemu/hw/rtl8139.c 2007-08-23 17:56:22.000000000 +0000
@@ -410,9 +410,6 @@
/* Clears all tally counters */
static void RTL8139TallyCounters_clear(RTL8139TallyCounters* counters);
-/* Writes tally counters to specified physical memory address */
-static void RTL8139TallyCounters_physical_memory_write(target_phys_addr_t tc_addr, RTL8139TallyCounters* counters);
-
/* Loads values of tally counters from VM state file */
static void RTL8139TallyCounters_load(QEMUFile* f, RTL8139TallyCounters *tally_counters);
@@ -492,8 +489,13 @@
/* PCI interrupt timer */
QEMUTimer *timer;
+ /* Memory access */
+ qemu_dma *dma;
} RTL8139State;
+/* Writes tally counters to specified physical memory address */
+static void RTL8139TallyCounters_physical_memory_write(RTL8139State *s, target_phys_addr_t tc_addr, RTL8139TallyCounters* tally_counters);
+
void prom9346_decode_command(EEprom9346 *eeprom, uint8_t command)
{
DEBUG_PRINT(("RTL8139: eeprom command 0x%02x\n", command));
@@ -752,14 +754,14 @@
if (size > wrapped)
{
- cpu_physical_memory_write( s->RxBuf + s->RxBufAddr,
+ dma_memory_write( s->dma, s->RxBuf + s->RxBufAddr,
buf, size-wrapped );
}
/* reset buffer pointer */
s->RxBufAddr = 0;
- cpu_physical_memory_write( s->RxBuf + s->RxBufAddr,
+ dma_memory_write( s->dma, s->RxBuf + s->RxBufAddr,
buf + (size-wrapped), wrapped );
s->RxBufAddr = wrapped;
@@ -769,7 +771,7 @@
}
/* non-wrapping path or overwrapping enabled */
- cpu_physical_memory_write( s->RxBuf + s->RxBufAddr, buf, size );
+ dma_memory_write( s->dma, s->RxBuf + s->RxBufAddr, buf, size );
s->RxBufAddr += size;
}
@@ -962,13 +964,13 @@
uint32_t val, rxdw0,rxdw1,rxbufLO,rxbufHI;
- cpu_physical_memory_read(cplus_rx_ring_desc, (uint8_t *)&val, 4);
+ dma_memory_read( s->dma, cplus_rx_ring_desc, (uint8_t *)&val, 4);
rxdw0 = le32_to_cpu(val);
- cpu_physical_memory_read(cplus_rx_ring_desc+4, (uint8_t *)&val, 4);
+ dma_memory_read( s->dma, cplus_rx_ring_desc+4, (uint8_t *)&val, 4);
rxdw1 = le32_to_cpu(val);
- cpu_physical_memory_read(cplus_rx_ring_desc+8, (uint8_t *)&val, 4);
+ dma_memory_read( s->dma, cplus_rx_ring_desc+8, (uint8_t *)&val, 4);
rxbufLO = le32_to_cpu(val);
- cpu_physical_memory_read(cplus_rx_ring_desc+12, (uint8_t *)&val, 4);
+ dma_memory_read( s->dma, cplus_rx_ring_desc+12, (uint8_t *)&val, 4);
rxbufHI = le32_to_cpu(val);
DEBUG_PRINT(("RTL8139: +++ C+ mode RX descriptor %d %08x %08x %08x %08x\n",
@@ -1013,7 +1015,7 @@
target_phys_addr_t rx_addr = rtl8139_addr64(rxbufLO, rxbufHI);
/* receive/copy to target memory */
- cpu_physical_memory_write( rx_addr, buf, size );
+ dma_memory_write( s->dma, rx_addr, buf, size );
if (s->CpCmd & CPlusRxChkSum)
{
@@ -1026,7 +1028,7 @@
#else
val = 0;
#endif
- cpu_physical_memory_write( rx_addr+size, (uint8_t *)&val, 4);
+ dma_memory_write( s->dma, rx_addr+size, (uint8_t *)&val, 4);
/* first segment of received packet flag */
#define CP_RX_STATUS_FS (1<<29)
@@ -1075,9 +1077,9 @@
/* update ring data */
val = cpu_to_le32(rxdw0);
- cpu_physical_memory_write(cplus_rx_ring_desc, (uint8_t *)&val, 4);
+ dma_memory_write( s->dma, cplus_rx_ring_desc, (uint8_t *)&val, 4);
val = cpu_to_le32(rxdw1);
- cpu_physical_memory_write(cplus_rx_ring_desc+4, (uint8_t *)&val, 4);
+ dma_memory_write( s->dma, cplus_rx_ring_desc+4, (uint8_t *)&val, 4);
/* update tally counter */
++s->tally_counters.RxOk;
@@ -1268,50 +1270,50 @@
counters->TxUndrn = 0;
}
-static void RTL8139TallyCounters_physical_memory_write(target_phys_addr_t tc_addr, RTL8139TallyCounters* tally_counters)
+static void RTL8139TallyCounters_physical_memory_write(RTL8139State *s, target_phys_addr_t tc_addr, RTL8139TallyCounters* tally_counters)
{
uint16_t val16;
uint32_t val32;
uint64_t val64;
val64 = cpu_to_le64(tally_counters->TxOk);
- cpu_physical_memory_write(tc_addr + 0, (uint8_t *)&val64, 8);
+ dma_memory_write( s->dma, tc_addr + 0, (uint8_t *)&val64, 8);
val64 = cpu_to_le64(tally_counters->RxOk);
- cpu_physical_memory_write(tc_addr + 8, (uint8_t *)&val64, 8);
+ dma_memory_write( s->dma, tc_addr + 8, (uint8_t *)&val64, 8);
val64 = cpu_to_le64(tally_counters->TxERR);
- cpu_physical_memory_write(tc_addr + 16, (uint8_t *)&val64, 8);
+ dma_memory_write( s->dma, tc_addr + 16, (uint8_t *)&val64, 8);
val32 = cpu_to_le32(tally_counters->RxERR);
- cpu_physical_memory_write(tc_addr + 24, (uint8_t *)&val32, 4);
+ dma_memory_write( s->dma, tc_addr + 24, (uint8_t *)&val32, 4);
val16 = cpu_to_le16(tally_counters->MissPkt);
- cpu_physical_memory_write(tc_addr + 28, (uint8_t *)&val16, 2);
+ dma_memory_write( s->dma, tc_addr + 28, (uint8_t *)&val16, 2);
val16 = cpu_to_le16(tally_counters->FAE);
- cpu_physical_memory_write(tc_addr + 30, (uint8_t *)&val16, 2);
+ dma_memory_write( s->dma, tc_addr + 30, (uint8_t *)&val16, 2);
val32 = cpu_to_le32(tally_counters->Tx1Col);
- cpu_physical_memory_write(tc_addr + 32, (uint8_t *)&val32, 4);
+ dma_memory_write( s->dma, tc_addr + 32, (uint8_t *)&val32, 4);
val32 = cpu_to_le32(tally_counters->TxMCol);
- cpu_physical_memory_write(tc_addr + 36, (uint8_t *)&val32, 4);
+ dma_memory_write( s->dma, tc_addr + 36, (uint8_t *)&val32, 4);
val64 = cpu_to_le64(tally_counters->RxOkPhy);
- cpu_physical_memory_write(tc_addr + 40, (uint8_t *)&val64, 8);
+ dma_memory_write( s->dma, tc_addr + 40, (uint8_t *)&val64, 8);
val64 = cpu_to_le64(tally_counters->RxOkBrd);
- cpu_physical_memory_write(tc_addr + 48, (uint8_t *)&val64, 8);
+ dma_memory_write( s->dma, tc_addr + 48, (uint8_t *)&val64, 8);
val32 = cpu_to_le32(tally_counters->RxOkMul);
- cpu_physical_memory_write(tc_addr + 56, (uint8_t *)&val32, 4);
+ dma_memory_write( s->dma, tc_addr + 56, (uint8_t *)&val32, 4);
val16 = cpu_to_le16(tally_counters->TxAbt);
- cpu_physical_memory_write(tc_addr + 60, (uint8_t *)&val16, 2);
+ dma_memory_write( s->dma, tc_addr + 60, (uint8_t *)&val16, 2);
val16 = cpu_to_le16(tally_counters->TxUndrn);
- cpu_physical_memory_write(tc_addr + 62, (uint8_t *)&val16, 2);
+ dma_memory_write( s->dma, tc_addr + 62, (uint8_t *)&val16, 2);
}
/* Loads values of tally counters from VM state file */
@@ -1779,7 +1781,7 @@
DEBUG_PRINT(("RTL8139: +++ transmit reading %d bytes from host memory at 0x%08x\n",
txsize, s->TxAddr[descriptor]));
- cpu_physical_memory_read(s->TxAddr[descriptor], txbuffer, txsize);
+ dma_memory_read( s->dma, s->TxAddr[descriptor], txbuffer, txsize);
/* Mark descriptor as transferred */
s->TxStatus[descriptor] |= TxHostOwns;
@@ -1910,13 +1912,13 @@
uint32_t val, txdw0,txdw1,txbufLO,txbufHI;
- cpu_physical_memory_read(cplus_tx_ring_desc, (uint8_t *)&val, 4);
+ dma_memory_read( s->dma, cplus_tx_ring_desc, (uint8_t *)&val, 4);
txdw0 = le32_to_cpu(val);
- cpu_physical_memory_read(cplus_tx_ring_desc+4, (uint8_t *)&val, 4);
+ dma_memory_read( s->dma, cplus_tx_ring_desc+4, (uint8_t *)&val, 4);
txdw1 = le32_to_cpu(val);
- cpu_physical_memory_read(cplus_tx_ring_desc+8, (uint8_t *)&val, 4);
+ dma_memory_read( s->dma, cplus_tx_ring_desc+8, (uint8_t *)&val, 4);
txbufLO = le32_to_cpu(val);
- cpu_physical_memory_read(cplus_tx_ring_desc+12, (uint8_t *)&val, 4);
+ dma_memory_read( s->dma, cplus_tx_ring_desc+12, (uint8_t *)&val, 4);
txbufHI = le32_to_cpu(val);
DEBUG_PRINT(("RTL8139: +++ C+ mode TX descriptor %d %08x %08x %08x %08x\n",
@@ -2020,7 +2022,7 @@
DEBUG_PRINT(("RTL8139: +++ C+ mode transmit reading %d bytes from host memory at %016" PRIx64 " to offset %d\n",
txsize, (uint64_t)tx_addr, s->cplus_txbuffer_offset));
- cpu_physical_memory_read(tx_addr, s->cplus_txbuffer + s->cplus_txbuffer_offset, txsize);
+ dma_memory_read( s->dma, tx_addr, s->cplus_txbuffer + s->cplus_txbuffer_offset, txsize);
s->cplus_txbuffer_offset += txsize;
/* seek to next Rx descriptor */
@@ -2047,9 +2049,9 @@
/* update ring data */
val = cpu_to_le32(txdw0);
- cpu_physical_memory_write(cplus_tx_ring_desc, (uint8_t *)&val, 4);
+ dma_memory_write( s->dma, cplus_tx_ring_desc, (uint8_t *)&val, 4);
// val = cpu_to_le32(txdw1);
-// cpu_physical_memory_write(cplus_tx_ring_desc+4, &val, 4);
+// dma_memory_write( s->dma, cplus_tx_ring_desc+4, &val, 4);
/* Now decide if descriptor being processed is holding the last segment of packet */
if (txdw0 & CP_TX_LS)
@@ -2375,7 +2377,7 @@
target_phys_addr_t tc_addr = rtl8139_addr64(s->TxStatus[0] & ~0x3f, s->TxStatus[1]);
/* dump tally counters to specified memory location */
- RTL8139TallyCounters_physical_memory_write( tc_addr, &s->tally_counters);
+ RTL8139TallyCounters_physical_memory_write( s, tc_addr, &s->tally_counters);
/* mark dump completed */
s->TxStatus[0] &= ~0x8;
@@ -3405,7 +3407,7 @@
}
#endif /* RTL8139_ONBOARD_TIMER */
-void pci_rtl8139_init(PCIBus *bus, NICInfo *nd, int devfn)
+void pci_rtl8139_init(PCIBus *bus, NICInfo *nd, int devfn, qemu_dma *parent_dma)
{
PCIRTL8139State *d;
RTL8139State *s;
@@ -3442,6 +3444,7 @@
s->pci_dev = (PCIDevice *)d;
memcpy(s->macaddr, nd->macaddr, 6);
+ s->dma = parent_dma;
rtl8139_reset(s);
s->vc = qemu_new_vlan_client(nd->vlan, rtl8139_receive,
rtl8139_can_receive, s);
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-23 17:39:19.000000000 +0000
+++ qemu/vl.h 2007-08-23 20:31:01.000000000 +0000
@@ -877,7 +877,7 @@
PCIBus *pci_register_bus(pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
qemu_irq *pic, int devfn_min, int nirq);
-void pci_nic_init(PCIBus *bus, NICInfo *nd, int devfn);
+void pci_nic_init(PCIBus *bus, NICInfo *nd, int devfn, qemu_dma *parent_dma);
void pci_data_write(void *opaque, uint32_t addr, uint32_t val, int len);
uint32_t pci_data_read(void *opaque, uint32_t addr, int len);
int pci_bus_num(PCIBus *s);
@@ -897,8 +897,9 @@
PCIBus *pci_pmac_init(qemu_irq *pic);
/* apb_pci.c */
-PCIBus *pci_apb_init(target_phys_addr_t special_base, target_phys_addr_t mem_base,
- qemu_irq *pic);
+PCIBus *pci_apb_init(target_phys_addr_t special_base,
+ target_phys_addr_t mem_base,
+ qemu_irq *pic, qemu_dma *parent_dma, qemu_dma **dvma);
PCIBus *pci_vpb_init(qemu_irq *pic, int irq, int realview);
@@ -938,7 +939,7 @@
union {
int (*init_isa) (AudioState *s, qemu_irq *pic, qemu_dma *parent_dma,
qemu_dma *parent_hdma);
- int (*init_pci) (PCIBus *bus, AudioState *s);
+ int (*init_pci) (PCIBus *bus, AudioState *s, qemu_dma *parent_dma);
} init;
};
@@ -1044,7 +1045,7 @@
ds1225y_t *ds1225y_init(target_phys_addr_t mem_base, const char *filename);
/* es1370.c */
-int es1370_init (PCIBus *bus, AudioState *s);
+int es1370_init (PCIBus *bus, AudioState *audio, qemu_dma *parent_dma);
/* sb16.c */
int SB16_init (AudioState *audio, qemu_irq *pic, qemu_dma *parent_dma,
@@ -1081,9 +1082,12 @@
/* eepro100.c */
-void pci_i82551_init(PCIBus *bus, NICInfo *nd, int devfn);
-void pci_i82557b_init(PCIBus *bus, NICInfo *nd, int devfn);
-void pci_i82559er_init(PCIBus *bus, NICInfo *nd, int devfn);
+void pci_i82551_init(PCIBus * bus, NICInfo * nd, int devfn,
+ qemu_dma *parent_dma);
+void pci_i82557b_init(PCIBus * bus, NICInfo * nd, int devfn,
+ qemu_dma *parent_dma);
+void pci_i82559er_init(PCIBus * bus, NICInfo * nd, int devfn,
+ qemu_dma *parent_dma);
/* ne2000.c */
@@ -1092,11 +1096,11 @@
/* rtl8139.c */
-void pci_rtl8139_init(PCIBus *bus, NICInfo *nd, int devfn);
+void pci_rtl8139_init(PCIBus *bus, NICInfo *nd, int devfn, qemu_dma *parent_dma);
/* pcnet.c */
-void pci_pcnet_init(PCIBus *bus, NICInfo *nd, int devfn);
+void pci_pcnet_init(PCIBus *bus, NICInfo *nd, int devfn, qemu_dma *parent_dma);
void lance_init(NICInfo *nd, target_phys_addr_t leaddr, qemu_irq irq,
qemu_dma *parent_dma, qemu_irq *reset);
Index: qemu/hw/eepro100.c
===================================================================
--- qemu.orig/hw/eepro100.c 2007-08-23 17:44:21.000000000 +0000
+++ qemu/hw/eepro100.c 2007-08-23 18:23:37.000000000 +0000
@@ -251,6 +251,7 @@
/* Data in mem is always in the byte order of the controller (le). */
uint8_t mem[PCI_MEM_SIZE];
+ qemu_dma *dma;
} EEPRO100State;
/* Default values for MDI (PHY) registers */
@@ -642,7 +643,7 @@
* values which really matter.
* Number of data should check configuration!!!
*/
- cpu_physical_memory_write(s->statsaddr, (uint8_t *) & s->statistics, 64);
+ dma_memory_write(s->dma, s->statsaddr, (uint8_t *) & s->statistics, 64);
stl_phys(s->statsaddr + 0, s->statistics.tx_good_frames);
stl_phys(s->statsaddr + 36, s->statistics.rx_good_frames);
stl_phys(s->statsaddr + 48, s->statistics.rx_resource_errors);
@@ -672,7 +673,7 @@
s->cu_offset = s->pointer;
next_command:
cb_address = s->cu_base + s->cu_offset;
- cpu_physical_memory_read(cb_address, (uint8_t *) & tx, sizeof(tx));
+ dma_memory_read(s->dma, cb_address, (uint8_t *) & tx, sizeof(tx));
uint16_t status = le16_to_cpu(tx.status);
uint16_t command = le16_to_cpu(tx.command);
logout
@@ -690,12 +691,12 @@
/* Do nothing. */
break;
case CmdIASetup:
- cpu_physical_memory_read(cb_address + 8, &s->macaddr[0], 6);
+ dma_memory_read(s->dma, cb_address + 8, &s->macaddr[0], 6);
logout("macaddr: %s\n", nic_dump(&s->macaddr[0], 6));
break;
case CmdConfigure:
- cpu_physical_memory_read(cb_address + 8, &s->configuration[0],
- sizeof(s->configuration));
+ dma_memory_read(s->dma, cb_address + 8, &s->configuration[0],
+ sizeof(s->configuration));
logout("configuration: %s\n", nic_dump(&s->configuration[0], 16));
break;
case CmdMulticastList:
@@ -729,8 +730,8 @@
logout
("TBD (simplified mode): buffer address 0x%08x, size 0x%04x\n",
tx_buffer_address, tx_buffer_size);
- cpu_physical_memory_read(tx_buffer_address, &buf[size],
- tx_buffer_size);
+ dma_memory_read(s->dma, tx_buffer_address, &buf[size],
+ tx_buffer_size);
size += tx_buffer_size;
}
if (tbd_array == 0xffffffff) {
@@ -749,8 +750,8 @@
logout
("TBD (extended mode): buffer address 0x%08x, size 0x%04x\n",
tx_buffer_address, tx_buffer_size);
- cpu_physical_memory_read(tx_buffer_address, &buf[size],
- tx_buffer_size);
+ dma_memory_read(s->dma, tx_buffer_address, &buf[size],
+ tx_buffer_size);
size += tx_buffer_size;
if (tx_buffer_el & 1) {
break;
@@ -766,8 +767,8 @@
logout
("TBD (flexible mode): buffer address 0x%08x, size 0x%04x\n",
tx_buffer_address, tx_buffer_size);
- cpu_physical_memory_read(tx_buffer_address, &buf[size],
- tx_buffer_size);
+ dma_memory_read(s->dma, tx_buffer_address, &buf[size],
+ tx_buffer_size);
size += tx_buffer_size;
if (tx_buffer_el & 1) {
break;
@@ -1112,10 +1113,10 @@
case PORT_SELFTEST:
logout("selftest address=0x%08x\n", address);
eepro100_selftest_t data;
- cpu_physical_memory_read(address, (uint8_t *) & data, sizeof(data));
+ dma_memory_read(s->dma, address, (uint8_t *) & data, sizeof(data));
data.st_sign = 0xffffffff;
data.st_result = 0;
- cpu_physical_memory_write(address, (uint8_t *) & data, sizeof(data));
+ dma_memory_write(s->dma, address, (uint8_t *) & data, sizeof(data));
break;
case PORT_SELECTIVE_RESET:
logout("selective reset, selftest address=0x%08x\n", address);
@@ -1534,8 +1535,8 @@
//~ !!!
//~ $3 = {status = 0x0, command = 0xc000, link = 0x2d220, rx_buf_addr = 0x207dc, count = 0x0, size = 0x5f8, packet = {0x0 <repeats 1518 times>}}
eepro100_rx_t rx;
- cpu_physical_memory_read(s->ru_base + s->ru_offset, (uint8_t *) & rx,
- offsetof(eepro100_rx_t, packet));
+ dma_memory_read(s->dma, s->ru_base + s->ru_offset, (uint8_t *) & rx,
+ offsetof(eepro100_rx_t, packet));
uint16_t rfd_command = le16_to_cpu(rx.command);
uint16_t rfd_size = le16_to_cpu(rx.size);
assert(size <= rfd_size);
@@ -1553,8 +1554,8 @@
assert(!(s->configuration[18] & 4));
/* TODO: check stripping enable bit. */
//~ assert(!(s->configuration[17] & 1));
- cpu_physical_memory_write(s->ru_base + s->ru_offset +
- offsetof(eepro100_rx_t, packet), buf, size);
+ dma_memory_write(s->dma, s->ru_base + s->ru_offset +
+ offsetof(eepro100_rx_t, packet), buf, size);
s->statistics.rx_good_frames++;
eepro100_fr_interrupt(s);
s->ru_offset = le32_to_cpu(rx.link);
@@ -1736,7 +1737,7 @@
}
static void nic_init(PCIBus * bus, NICInfo * nd,
- const char *name, uint32_t device)
+ const char *name, uint32_t device, qemu_dma *parent_dma)
{
PCIEEPRO100State *d;
EEPRO100State *s;
@@ -1750,6 +1751,7 @@
s = &d->eepro100;
s->device = device;
s->pci_dev = &d->dev;
+ s->dma = parent_dma;
pci_reset(s);
@@ -1789,20 +1791,23 @@
register_savevm(name, 0, 3, nic_save, nic_load, s);
}
-void pci_i82551_init(PCIBus * bus, NICInfo * nd, int devfn)
+void pci_i82551_init(PCIBus * bus, NICInfo * nd, int devfn,
+ qemu_dma *parent_dma)
{
- nic_init(bus, nd, "i82551", i82551);
+ nic_init(bus, nd, "i82551", i82551, parent_dma);
//~ uint8_t *pci_conf = d->dev.config;
}
-void pci_i82557b_init(PCIBus * bus, NICInfo * nd, int devfn)
+void pci_i82557b_init(PCIBus * bus, NICInfo * nd, int devfn,
+ qemu_dma *parent_dma)
{
- nic_init(bus, nd, "i82557b", i82557B);
+ nic_init(bus, nd, "i82557b", i82557B, parent_dma);
}
-void pci_i82559er_init(PCIBus * bus, NICInfo * nd, int devfn)
+void pci_i82559er_init(PCIBus * bus, NICInfo * nd, int devfn,
+ qemu_dma *parent_dma)
{
- nic_init(bus, nd, "i82559er", i82559ER);
+ nic_init(bus, nd, "i82559er", i82559ER, parent_dma);
}
/* eof */
Index: qemu/hw/pcnet.c
===================================================================
--- qemu.orig/hw/pcnet.c 2007-08-23 18:25:26.000000000 +0000
+++ qemu/hw/pcnet.c 2007-08-23 18:27:10.000000000 +0000
@@ -1947,16 +1947,16 @@
static void pci_physical_memory_write(void *dma_opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int do_bswap)
{
- cpu_physical_memory_write(addr, buf, len);
+ dma_memory_write(dma_opaque, addr, buf, len);
}
static void pci_physical_memory_read(void *dma_opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int do_bswap)
{
- cpu_physical_memory_read(addr, buf, len);
+ dma_memory_read(dma_opaque, addr, buf, len);
}
-void pci_pcnet_init(PCIBus *bus, NICInfo *nd, int devfn)
+void pci_pcnet_init(PCIBus *bus, NICInfo *nd, int devfn, qemu_dma *parent_dma)
{
PCNetState *d;
uint8_t *pci_conf;
@@ -2002,6 +2002,7 @@
d->phys_mem_read = pci_physical_memory_read;
d->phys_mem_write = pci_physical_memory_write;
d->pci_dev = &d->dev;
+ d->dma_opaque = parent_dma;
pcnet_common_init(d, nd, "pcnet");
}
Index: qemu/hw/mips_malta.c
===================================================================
--- qemu.orig/hw/mips_malta.c 2007-08-23 18:35:39.000000000 +0000
+++ qemu/hw/mips_malta.c 2007-08-23 20:37:52.000000000 +0000
@@ -435,7 +435,7 @@
/* Audio support */
#ifdef HAS_AUDIO
-static void audio_init (PCIBus *pci_bus)
+static void audio_init (PCIBus *pci_bus, qemu_dma *parent_dma)
{
struct soundhw *c;
int audio_enabled = 0;
@@ -451,7 +451,7 @@
if (s) {
for (c = soundhw; c->name; ++c) {
if (c->enabled)
- c->init.init_pci (pci_bus, s);
+ c->init.init_pci (pci_bus, s, parent_dma);
}
}
}
@@ -459,7 +459,7 @@
#endif
/* Network support */
-static void network_init (PCIBus *pci_bus)
+static void network_init (PCIBus *pci_bus, qemu_dma *parent_dma)
{
int i;
NICInfo *nd;
@@ -471,13 +471,19 @@
}
if (i == 0 && strcmp(nd->model, "pcnet") == 0) {
/* The malta board has a PCNet card using PCI SLOT 11 */
- pci_nic_init(pci_bus, nd, 88);
+ pci_nic_init(pci_bus, nd, 88, parent_dma);
} else {
- pci_nic_init(pci_bus, nd, -1);
+ pci_nic_init(pci_bus, nd, -1, parent_dma);
}
}
}
+static void mips_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
/* ROM and pseudo bootloader
The following code implements a very very simple bootloader. It first
@@ -759,6 +765,7 @@
int piix4_devfn;
uint8_t *eeprom_buf;
i2c_bus *smbus;
+ qemu_dma *physical_dma, **isa_dma;
int i;
/* init CPUs */
@@ -853,7 +860,8 @@
smbus_eeprom_device_init(smbus, 0x50 + i, eeprom_buf + (i * 256));
}
pit = pit_init(0x40, i8259[0]);
- DMA_init(0);
+ physical_dma = qemu_init_dma(mips_dma_memory_rw, NULL);
+ DMA_init(0, physical_dma, &isa_dma);
/* Super I/O */
i8042_init(i8259[1], i8259[12], 0x60);
@@ -870,11 +878,11 @@
/* Sound card */
#ifdef HAS_AUDIO
- audio_init(pci_bus);
+ audio_init(pci_bus, physical_dma);
#endif
/* Network card */
- network_init(pci_bus);
+ network_init(pci_bus, physical_dma);
/* Optional PCI video card */
pci_cirrus_vga_init(pci_bus, ds, phys_ram_base + ram_size,
Index: qemu/hw/pc.c
===================================================================
--- qemu.orig/hw/pc.c 2007-08-23 18:34:35.000000000 +0000
+++ qemu/hw/pc.c 2007-08-23 20:34:02.000000000 +0000
@@ -645,7 +645,7 @@
}
else {
if (pci_bus) {
- c->init.init_pci (pci_bus, s);
+ c->init.init_pci (pci_bus, s, parent_dma);
}
}
}
@@ -877,7 +877,7 @@
} else if (pci_enabled) {
if (strcmp(nd->model, "?") == 0)
fprintf(stderr, "qemu: Supported ISA NICs: ne2k_isa\n");
- pci_nic_init(pci_bus, nd, -1);
+ pci_nic_init(pci_bus, nd, -1, physical_dma);
} else if (strcmp(nd->model, "?") == 0) {
fprintf(stderr, "qemu: Supported ISA NICs: ne2k_isa\n");
exit(1);
@@ -899,7 +899,8 @@
i8042_init(i8259[1], i8259[12], 0x60);
DMA_init(0, physical_dma, &isa_dma);
#ifdef HAS_AUDIO
- audio_init(pci_enabled ? pci_bus : NULL, i8259, isa_dma[1], isa_dma[5]);
+ audio_init(pci_enabled ? pci_bus : NULL, i8259,
+ pci_enabled ? physical_dma : isa_dma[1], isa_dma[5]);
#endif
floppy_controller = fdctrl_init(i8259[6], 2, 0, 0x3f0, fd_table, isa_dma[2]);
Index: qemu/hw/mips_pica61.c
===================================================================
--- qemu.orig/hw/mips_pica61.c 2007-08-23 18:46:50.000000000 +0000
+++ qemu/hw/mips_pica61.c 2007-08-23 18:48:17.000000000 +0000
@@ -47,6 +47,12 @@
extern FILE *logfile;
+static void mips_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
static void main_cpu_reset(void *opaque)
{
CPUState *env = opaque;
@@ -68,6 +74,7 @@
mips_def_t *def;
int available_ram;
qemu_irq *i8259;
+ qemu_dma *physical_dma, **isa_dma;
/* init CPUs */
if (cpu_model == NULL) {
@@ -129,6 +136,8 @@
i8259 = i8259_init(env->irq[2]);
rtc_mm_init(0x80004070, 1, i8259[14]);
pit_init(0x40, 0);
+ physical_dma = qemu_init_dma(mips_dma_memory_rw, NULL);
+ DMA_init(0, physical_dma, &isa_dma);
/* Keyboard (i8042) */
i8042_mm_init(i8259[6], i8259[7], 0x80005060, 0);
@@ -145,7 +154,7 @@
/* FIXME: missing NCR 53C94 */
/* ISA devices (floppy, serial, parallel) */
- fdctrl_init(i8259[1], 1, 1, 0x80003000, fd_table);
+ fdctrl_init(i8259[1], 1, 1, 0x80003000, fd_table, isa_dma[1]);
for(i = 0; i < MAX_SERIAL_PORTS; i++) {
if (serial_hds[i]) {
serial_mm_init(serial_base[i], 0, i8259[serial_irq[i]], serial_hds[i], 1);
Index: qemu/hw/realview.c
===================================================================
--- qemu.orig/hw/realview.c 2007-08-23 18:40:48.000000000 +0000
+++ qemu/hw/realview.c 2007-08-23 19:39:16.000000000 +0000
@@ -10,6 +10,12 @@
#include "vl.h"
#include "arm_pic.h"
+static void arm_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
/* Board init. */
static void realview_init(int ram_size, int vga_ram_size, int boot_device,
@@ -24,6 +30,7 @@
NICInfo *nd;
int n;
int done_smc = 0;
+ qemu_dma *physical_dma;
env = cpu_init();
if (!cpu_model)
@@ -59,6 +66,8 @@
pl031_init(0x10017000, pic[10]);
+ physical_dma = qemu_init_dma(arm_dma_memory_rw, NULL);
+
pci_bus = pci_vpb_init(pic, 48, 1);
if (usb_enabled) {
usb_ohci_init_pci(pci_bus, 3, -1);
@@ -76,7 +85,7 @@
if (strcmp(nd->model, "smc91c111") == 0) {
smc91c111_init(nd, 0x4e000000, pic[28]);
} else {
- pci_nic_init(pci_bus, nd, -1);
+ pci_nic_init(pci_bus, nd, -1, physical_dma);
}
}
Index: qemu/hw/ppc_chrp.c
===================================================================
--- qemu.orig/hw/ppc_chrp.c 2007-08-23 18:50:55.000000000 +0000
+++ qemu/hw/ppc_chrp.c 2007-08-23 18:59:59.000000000 +0000
@@ -289,6 +289,12 @@
buf[1] = nvram_chksum(buf, 16);
}
+static void ppc_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
/* PowerPC CHRP hardware initialisation */
static void ppc_chrp_init (int ram_size, int vga_ram_size, int boot_device,
DisplayState *ds, const char **fd_filename,
@@ -312,6 +318,7 @@
const char *arch_name;
int vga_bios_size, bios_size;
qemu_irq *dummy_irq;
+ qemu_dma *physical_dma;
linux_boot = (kernel_filename != NULL);
@@ -408,6 +415,8 @@
initrd_size = 0;
}
+ physical_dma = qemu_init_dma(ppc_dma_memory_rw, NULL);
+
if (is_heathrow) {
isa_mem_base = 0x80000000;
@@ -434,7 +443,7 @@
for(i = 0; i < nb_nics; i++) {
if (!nd_table[i].model)
nd_table[i].model = "ne2k_pci";
- pci_nic_init(pci_bus, &nd_table[i], -1);
+ pci_nic_init(pci_bus, &nd_table[i], -1, physical_dma);
}
pci_cmd646_ide_init(pci_bus, &bs_table[0], 0);
@@ -524,7 +533,7 @@
for(i = 0; i < nb_nics; i++) {
if (!nd_table[i].model)
nd_table[i].model = "ne2k_pci";
- pci_nic_init(pci_bus, &nd_table[i], -1);
+ pci_nic_init(pci_bus, &nd_table[i], -1, physical_dma);
}
#if 1
ide0_mem_index = pmac_ide_init(&bs_table[0], pic[0x13]);
Index: qemu/hw/apb_pci.c
===================================================================
--- qemu.orig/hw/apb_pci.c 2007-08-23 19:15:08.000000000 +0000
+++ qemu/hw/apb_pci.c 2007-08-23 19:28:37.000000000 +0000
@@ -30,7 +30,10 @@
typedef target_phys_addr_t pci_addr_t;
#include "pci_host.h"
-typedef PCIHostState APBState;
+typedef struct APBState {
+ PCIHostState pci;
+ qemu_dma *dma;
+} APBState;
static void pci_apb_config_writel (void *opaque, target_phys_addr_t addr,
uint32_t val)
@@ -42,7 +45,7 @@
if ((val & (1 << i)) != 0)
break;
}
- s->config_reg = (1 << 16) | (val & 0x7FC) | (i << 11);
+ s->pci.config_reg = (1 << 16) | (val & 0x7FC) | (i << 11);
}
static uint32_t pci_apb_config_readl (void *opaque,
@@ -52,8 +55,9 @@
uint32_t val;
int devfn;
- devfn = (s->config_reg >> 8) & 0xFF;
- val = (1 << (devfn >> 3)) | ((devfn & 0x07) << 8) | (s->config_reg & 0xFC);
+ devfn = (s->pci.config_reg >> 8) & 0xFF;
+ val = (1 << (devfn >> 3)) | ((devfn & 0x07) << 8) |
+ (s->pci.config_reg & 0xFC);
return val;
}
@@ -206,9 +210,18 @@
qemu_set_irq(pic[irq_num], level);
}
+static void apb_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ APBState *s = opaque;
+
+ // XXX implement IOMMU
+ dma_memory_rw(s->dma, addr, buf, len, is_write);
+}
+
PCIBus *pci_apb_init(target_phys_addr_t special_base,
target_phys_addr_t mem_base,
- qemu_irq *pic)
+ qemu_irq *pic, qemu_dma *parent_dma, qemu_dma **dvma)
{
APBState *s;
PCIDevice *d;
@@ -217,7 +230,7 @@
s = qemu_mallocz(sizeof(APBState));
/* Ultrasparc PBM main bus */
- s->bus = pci_register_bus(pci_apb_set_irq, pci_pbm_map_irq, pic, 0, 32);
+ s->pci.bus = pci_register_bus(pci_apb_set_irq, pci_pbm_map_irq, pic, 0, 32);
pci_mem_config = cpu_register_io_memory(0, pci_apb_config_read,
pci_apb_config_write, s);
@@ -233,7 +246,7 @@
cpu_register_physical_memory(special_base + 0x2000000ULL, 0x10000, pci_ioport);
cpu_register_physical_memory(mem_base, 0x10000000, pci_mem_data); // XXX size should be 4G-prom
- d = pci_register_device(s->bus, "Advanced PCI Bus", sizeof(PCIDevice),
+ d = pci_register_device(s->pci.bus, "Advanced PCI Bus", sizeof(PCIDevice),
0, NULL, NULL);
d->config[0x00] = 0x8e; // vendor_id : Sun
d->config[0x01] = 0x10;
@@ -251,8 +264,12 @@
d->config[0x0E] = 0x00; // header_type
/* APB secondary busses */
- secondary = pci_bridge_init(s->bus, 8, 0x108e5000, pci_apb_map_irq, "Advanced PCI Bus secondary bridge 1");
- pci_bridge_init(s->bus, 9, 0x108e5000, pci_apb_map_irq, "Advanced PCI Bus secondary bridge 2");
+ secondary = pci_bridge_init(s->pci.bus, 8, 0x108e5000, pci_apb_map_irq, "Advanced PCI Bus secondary bridge 1");
+ pci_bridge_init(s->pci.bus, 9, 0x108e5000, pci_apb_map_irq, "Advanced PCI Bus secondary bridge 2");
+
+ s->dma = parent_dma;
+ *dvma = qemu_init_dma(apb_dma_memory_rw, d);
+
return secondary;
}
Index: qemu/hw/sun4u.c
===================================================================
--- qemu.orig/hw/sun4u.c 2007-08-23 19:11:40.000000000 +0000
+++ qemu/hw/sun4u.c 2007-08-23 19:21:25.000000000 +0000
@@ -47,19 +47,10 @@
{
return 0;
}
-int DMA_read_memory (int nchan, void *buf, int pos, int size)
-{
- return 0;
-}
-int DMA_write_memory (int nchan, void *buf, int pos, int size)
-{
- return 0;
-}
void DMA_hold_DREQ (int nchan) {}
void DMA_release_DREQ (int nchan) {}
void DMA_schedule(int nchan) {}
void DMA_run (void) {}
-void DMA_init (int high_page_enable) {}
void DMA_register_channel (int nchan,
DMA_transfer_handler transfer_handler,
void *opaque)
@@ -318,6 +309,12 @@
{
}
+static void sun4u_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
static const int ide_iobase[2] = { 0x1f0, 0x170 };
static const int ide_iobase2[2] = { 0x3f6, 0x376 };
static const int ide_irq[2] = { 14, 15 };
@@ -346,6 +343,7 @@
const sparc_def_t *def;
QEMUBH *bh;
qemu_irq *irq;
+ qemu_dma *physical_dma, *pci_dvma;
linux_boot = (kernel_filename != NULL);
@@ -425,7 +423,10 @@
}
}
}
- pci_bus = pci_apb_init(APB_SPECIAL_BASE, APB_MEM_BASE, NULL);
+ physical_dma = qemu_init_dma(sun4u_dma_memory_rw, NULL);
+ pci_bus = pci_apb_init(APB_SPECIAL_BASE, APB_MEM_BASE, NULL, physical_dma,
+ &pci_dvma);
+
isa_mem_base = VGA_BASE;
pci_cirrus_vga_init(pci_bus, ds, phys_ram_base + ram_size, ram_size, vga_ram_size);
@@ -444,7 +445,7 @@
for(i = 0; i < nb_nics; i++) {
if (!nd_table[i].model)
nd_table[i].model = "ne2k_pci";
- pci_nic_init(pci_bus, &nd_table[i], -1);
+ pci_nic_init(pci_bus, &nd_table[i], -1, pci_dvma);
}
irq = qemu_allocate_irqs(dummy_cpu_set_irq, NULL, 32);
@@ -452,7 +453,7 @@
pci_piix3_ide_init(pci_bus, bs_table, -1, irq);
/* FIXME: wire up interrupts. */
i8042_init(NULL/*1*/, NULL/*12*/, 0x60);
- floppy_controller = fdctrl_init(NULL/*6*/, 2, 0, 0x3f0, fd_table);
+ floppy_controller = fdctrl_init(NULL/*6*/, 2, 0, 0x3f0, fd_table, pci_dvma);
nvram = m48t59_init(NULL/*8*/, 0, 0x0074, NVRAM_SIZE, 59);
sun4u_NVRAM_set_params(nvram, NVRAM_SIZE, "Sun4u", ram_size, boot_device,
KERNEL_LOAD_ADDR, kernel_size,
Index: qemu/hw/versatilepb.c
===================================================================
--- qemu.orig/hw/versatilepb.c 2007-08-23 19:38:10.000000000 +0000
+++ qemu/hw/versatilepb.c 2007-08-23 19:39:33.000000000 +0000
@@ -145,6 +145,12 @@
return qi;
}
+static void arm_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
/* Board init. */
/* The AB and PB boards both use the same core, just with different
@@ -163,6 +169,7 @@
void *scsi_hba;
PCIBus *pci_bus;
NICInfo *nd;
+ qemu_dma *physical_dma;
int n;
int done_smc = 0;
@@ -181,6 +188,8 @@
pl050_init(0x10006000, sic[3], 0);
pl050_init(0x10007000, sic[4], 1);
+ physical_dma = qemu_init_dma(arm_dma_memory_rw, NULL);
+
pci_bus = pci_vpb_init(sic, 27, 0);
/* The Versatile PCI bridge does not provide access to PCI IO space,
so many of the qemu PCI devices are not useable. */
@@ -191,7 +200,7 @@
if (strcmp(nd->model, "smc91c111") == 0) {
smc91c111_init(nd, 0x10010000, sic[25]);
} else {
- pci_nic_init(pci_bus, nd, -1);
+ pci_nic_init(pci_bus, nd, -1, physical_dma);
}
}
if (usb_enabled) {
Index: qemu/hw/es1370.c
===================================================================
--- qemu.orig/hw/es1370.c 2007-08-23 20:24:58.000000000 +0000
+++ qemu/hw/es1370.c 2007-08-23 20:29:54.000000000 +0000
@@ -275,6 +275,7 @@
uint32_t mempage;
uint32_t codec;
uint32_t sctl;
+ qemu_dma *dma;
} ES1370State;
typedef struct PCIES1370State {
@@ -805,7 +806,7 @@
if (!acquired)
break;
- cpu_physical_memory_write (addr, tmpbuf, acquired);
+ dma_memory_write(s->dma, addr, tmpbuf, acquired);
temp -= acquired;
addr += acquired;
@@ -819,7 +820,7 @@
int copied, to_copy;
to_copy = audio_MIN ((size_t) temp, sizeof (tmpbuf));
- cpu_physical_memory_read (addr, tmpbuf, to_copy);
+ dma_memory_read (s->dma, addr, tmpbuf, to_copy);
copied = AUD_write (voice, tmpbuf, to_copy);
if (!copied)
break;
@@ -996,7 +997,7 @@
es1370_reset (s);
}
-int es1370_init (PCIBus *bus, AudioState *audio)
+int es1370_init (PCIBus *bus, AudioState *audio, qemu_dma *parent_dma)
{
PCIES1370State *d;
ES1370State *s;
@@ -1051,6 +1052,7 @@
s = &d->es1370;
s->pci_dev = &d->dev;
+ s->dma = parent_dma;
pci_register_io_region (&d->dev, 0, 256, PCI_ADDRESS_SPACE_IO, es1370_map);
register_savevm ("es1370", 0, 1, es1370_save, es1370_load, s);
[-- Attachment #10: sparc32_dma_esp_le_to_gdma.diff --]
[-- Type: text/x-diff, Size: 13745 bytes --]
Index: qemu/hw/sparc32_dma.c
===================================================================
--- qemu.orig/hw/sparc32_dma.c 2007-08-22 20:03:09.000000000 +0000
+++ qemu/hw/sparc32_dma.c 2007-08-22 20:03:47.000000000 +0000
@@ -58,62 +58,10 @@
struct DMAState {
uint32_t dmaregs[DMA_REGS];
qemu_irq irq;
- void *iommu;
qemu_irq dev_reset;
+ qemu_dma *dma;
};
-/* Note: on sparc, the lance 16 bit bus is swapped */
-void ledma_memory_read(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap)
-{
- DMAState *s = opaque;
- int i;
-
- DPRINTF("DMA write, direction: %c, addr 0x%8.8x\n",
- s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- addr |= s->dmaregs[3];
- if (do_bswap) {
- sparc_iommu_memory_read(s->iommu, addr, buf, len);
- } else {
- addr &= ~1;
- len &= ~1;
- sparc_iommu_memory_read(s->iommu, addr, buf, len);
- for(i = 0; i < len; i += 2) {
- bswap16s((uint16_t *)(buf + i));
- }
- }
-}
-
-void ledma_memory_write(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap)
-{
- DMAState *s = opaque;
- int l, i;
- uint16_t tmp_buf[32];
-
- DPRINTF("DMA read, direction: %c, addr 0x%8.8x\n",
- s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- addr |= s->dmaregs[3];
- if (do_bswap) {
- sparc_iommu_memory_write(s->iommu, addr, buf, len);
- } else {
- addr &= ~1;
- len &= ~1;
- while (len > 0) {
- l = len;
- if (l > sizeof(tmp_buf))
- l = sizeof(tmp_buf);
- for(i = 0; i < l; i += 2) {
- tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i));
- }
- sparc_iommu_memory_write(s->iommu, addr, (uint8_t *)tmp_buf, l);
- len -= l;
- buf += l;
- addr += l;
- }
- }
-}
-
static void dma_set_irq(void *opaque, int irq, int level)
{
DMAState *s = opaque;
@@ -128,24 +76,29 @@
}
}
-void espdma_memory_read(void *opaque, uint8_t *buf, int len)
+static void ledma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
{
DMAState *s = opaque;
- DPRINTF("DMA read, direction: %c, addr 0x%8.8x\n",
- s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- sparc_iommu_memory_read(s->iommu, s->dmaregs[1], buf, len);
- s->dmaregs[0] |= DMA_INTR;
- s->dmaregs[1] += len;
+ DPRINTF("DMA %s, direction: %c, addr 0x%8.8x\n",
+ is_write ? "write" : "read",
+ s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', addr);
+ addr |= s->dmaregs[3];
+ addr &= ~1;
+ len &= ~1;
+ dma_memory_rw(s->dma, addr, buf, len, is_write);
}
-void espdma_memory_write(void *opaque, uint8_t *buf, int len)
+static void espdma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
{
DMAState *s = opaque;
- DPRINTF("DMA write, direction: %c, addr 0x%8.8x\n",
+ DPRINTF("DMA %s, direction: %c, addr 0x%8.8x\n",
+ is_write ? "write" : "read",
s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- sparc_iommu_memory_write(s->iommu, s->dmaregs[1], buf, len);
+ dma_memory_rw(s->dma, s->dmaregs[1], buf, len, is_write);
s->dmaregs[0] |= DMA_INTR;
s->dmaregs[1] += len;
}
@@ -238,7 +191,8 @@
}
void *sparc32_dma_init(target_phys_addr_t daddr, qemu_irq parent_irq,
- void *iommu, qemu_irq **dev_irq, qemu_irq **reset)
+ qemu_irq **dev_irq, qemu_dma *parent_dma,
+ qemu_dma **dev_dma, int is_espdma, qemu_irq **reset)
{
DMAState *s;
int dma_io_memory;
@@ -248,7 +202,7 @@
return NULL;
s->irq = parent_irq;
- s->iommu = iommu;
+ s->dma = parent_dma;
dma_io_memory = cpu_register_io_memory(0, dma_mem_read, dma_mem_write, s);
cpu_register_physical_memory(daddr, DMA_SIZE, dma_io_memory);
@@ -259,5 +213,10 @@
*reset = &s->dev_reset;
+ if (is_espdma)
+ *dev_dma = qemu_init_dma(espdma_memory_rw, s);
+ else
+ *dev_dma = qemu_init_dma(ledma_memory_rw, s);
+
return s;
}
Index: qemu/hw/sun4m.c
===================================================================
--- qemu.orig/hw/sun4m.c 2007-08-22 20:03:28.000000000 +0000
+++ qemu/hw/sun4m.c 2007-08-22 20:03:47.000000000 +0000
@@ -318,7 +318,7 @@
qemu_irq *cpu_irqs[MAX_CPUS], *slavio_irq, *slavio_cpu_irq,
*espdma_irq, *ledma_irq;
qemu_irq *esp_reset, *le_reset;
- qemu_dma *physical_dma, *dummy_dma, *dvma;
+ qemu_dma *physical_dma, *dummy_dma, *dvma, *esp_dvma, *le_dvma;
/* init CPUs */
sparc_find_by_name(cpu_model, &def);
@@ -360,11 +360,11 @@
hwdef->clock_irq);
espdma = sparc32_dma_init(hwdef->dma_base, slavio_irq[hwdef->esp_irq],
- iommu, &espdma_irq, &esp_reset);
+ &espdma_irq, dvma, &esp_dvma, 1, &esp_reset);
ledma = sparc32_dma_init(hwdef->dma_base + 16ULL,
- slavio_irq[hwdef->le_irq], iommu, &ledma_irq,
- &le_reset);
+ slavio_irq[hwdef->le_irq], &ledma_irq, dvma,
+ &le_dvma, 0, &le_reset);
if (graphic_depth != 8 && graphic_depth != 24) {
fprintf(stderr, "qemu: Unsupported depth: %d\n", graphic_depth);
@@ -375,7 +375,7 @@
if (nd_table[0].model == NULL
|| strcmp(nd_table[0].model, "lance") == 0) {
- lance_init(&nd_table[0], hwdef->le_base, ledma, *ledma_irq, le_reset);
+ lance_init(&nd_table[0], hwdef->le_base, *ledma_irq, le_dvma, le_reset);
} else if (strcmp(nd_table[0].model, "?") == 0) {
fprintf(stderr, "qemu: Supported NICs: lance\n");
exit (1);
@@ -401,7 +401,7 @@
fdctrl_init(slavio_irq[hwdef->fd_irq], 0, 1, hwdef->fd_base, fd_table,
dummy_dma);
- main_esp = esp_init(bs_table, hwdef->esp_base, espdma, *espdma_irq,
+ main_esp = esp_init(bs_table, hwdef->esp_base, *espdma_irq, esp_dvma,
esp_reset);
for (i = 0; i < MAX_DISKS; i++) {
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-22 20:03:09.000000000 +0000
+++ qemu/vl.h 2007-08-22 20:03:47.000000000 +0000
@@ -1097,8 +1097,8 @@
/* pcnet.c */
void pci_pcnet_init(PCIBus *bus, NICInfo *nd, int devfn);
-void lance_init(NICInfo *nd, target_phys_addr_t leaddr, void *dma_opaque,
- qemu_irq irq, qemu_irq *reset);
+void lance_init(NICInfo *nd, target_phys_addr_t leaddr, qemu_irq irq,
+ qemu_dma *parent_dma, qemu_irq *reset);
/* vmmouse.c */
void *vmmouse_init(void *m);
@@ -1265,21 +1265,6 @@
/* iommu.c */
void *iommu_init(target_phys_addr_t addr, qemu_dma *parent_dma,
qemu_dma **dvma);
-void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int is_write);
-static inline void sparc_iommu_memory_read(void *opaque,
- target_phys_addr_t addr,
- uint8_t *buf, int len)
-{
- sparc_iommu_memory_rw(opaque, addr, buf, len, 0);
-}
-
-static inline void sparc_iommu_memory_write(void *opaque,
- target_phys_addr_t addr,
- uint8_t *buf, int len)
-{
- sparc_iommu_memory_rw(opaque, addr, buf, len, 1);
-}
/* tcx.c */
void tcx_init(DisplayState *ds, target_phys_addr_t addr, uint8_t *vram_base,
@@ -1318,17 +1303,12 @@
/* esp.c */
void esp_scsi_attach(void *opaque, BlockDriverState *bd, int id);
void *esp_init(BlockDriverState **bd, target_phys_addr_t espaddr,
- void *dma_opaque, qemu_irq irq, qemu_irq *reset);
+ qemu_irq irq, qemu_dma *parent_dma, qemu_irq *reset);
/* sparc32_dma.c */
void *sparc32_dma_init(target_phys_addr_t daddr, qemu_irq parent_irq,
- void *iommu, qemu_irq **dev_irq, qemu_irq **reset);
-void ledma_memory_read(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap);
-void ledma_memory_write(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap);
-void espdma_memory_read(void *opaque, uint8_t *buf, int len);
-void espdma_memory_write(void *opaque, uint8_t *buf, int len);
+ qemu_irq **dev_irq, qemu_dma *parent_dma,
+ qemu_dma **dev_dma, int is_espdma, qemu_irq **reset);
/* cs4231.c */
void cs_init(target_phys_addr_t base, int irq, void *intctl);
Index: qemu/hw/esp.c
===================================================================
--- qemu.orig/hw/esp.c 2007-08-22 20:03:09.000000000 +0000
+++ qemu/hw/esp.c 2007-08-22 20:03:47.000000000 +0000
@@ -52,6 +52,7 @@
struct ESPState {
qemu_irq irq;
+ qemu_dma *parent_dma;
BlockDriverState **bd;
uint8_t rregs[ESP_REGS];
uint8_t wregs[ESP_REGS];
@@ -73,7 +74,6 @@
uint32_t dma_counter;
uint8_t *async_buf;
uint32_t async_len;
- void *dma_opaque;
};
#define STAT_DO 0x00
@@ -105,7 +105,7 @@
target = s->wregs[4] & 7;
DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
if (s->dma) {
- espdma_memory_read(s->dma_opaque, buf, dmalen);
+ dma_memory_read(s->parent_dma, 0, buf, dmalen);
} else {
buf[0] = 0;
memcpy(&buf[1], s->ti_buf, dmalen);
@@ -189,7 +189,7 @@
s->ti_buf[0] = s->sense;
s->ti_buf[1] = 0;
if (s->dma) {
- espdma_memory_write(s->dma_opaque, s->ti_buf, 2);
+ dma_memory_write(s->parent_dma, 0, s->ti_buf, 2);
s->rregs[4] = STAT_IN | STAT_TC | STAT_ST;
s->rregs[5] = INTR_BS | INTR_FC;
s->rregs[6] = SEQ_CD;
@@ -222,7 +222,7 @@
len = s->dma_left;
if (s->do_cmd) {
DPRINTF("command len %d + %d\n", s->cmdlen, len);
- espdma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
+ dma_memory_read(s->parent_dma, 0, &s->cmdbuf[s->cmdlen], len);
s->ti_size = 0;
s->cmdlen = 0;
s->do_cmd = 0;
@@ -237,9 +237,9 @@
len = s->async_len;
}
if (to_device) {
- espdma_memory_read(s->dma_opaque, s->async_buf, len);
+ dma_memory_read(s->parent_dma, 0, s->async_buf, len);
} else {
- espdma_memory_write(s->dma_opaque, s->async_buf, len);
+ dma_memory_write(s->parent_dma, 0, s->async_buf, len);
}
s->dma_left -= len;
s->async_buf += len;
@@ -575,7 +575,7 @@
}
void *esp_init(BlockDriverState **bd, target_phys_addr_t espaddr,
- void *dma_opaque, qemu_irq irq, qemu_irq *reset)
+ qemu_irq irq, qemu_dma *parent_dma, qemu_irq *reset)
{
ESPState *s;
int esp_io_memory;
@@ -586,7 +586,7 @@
s->bd = bd;
s->irq = irq;
- s->dma_opaque = dma_opaque;
+ s->parent_dma = parent_dma;
esp_io_memory = cpu_register_io_memory(0, esp_mem_read, esp_mem_write, s);
cpu_register_physical_memory(espaddr, ESP_SIZE, esp_io_memory);
Index: qemu/hw/pcnet.c
===================================================================
--- qemu.orig/hw/pcnet.c 2007-08-22 20:03:09.000000000 +0000
+++ qemu/hw/pcnet.c 2007-08-22 20:03:47.000000000 +0000
@@ -73,6 +73,7 @@
void (*phys_mem_write)(void *dma_opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int do_bswap);
void *dma_opaque;
+ qemu_dma *parent_dma;
};
struct qemu_ether_header {
@@ -2015,6 +2016,46 @@
pcnet_h_reset(opaque);
}
+/* Note: on sparc, the lance 16 bit bus is swapped */
+static void ledma_memory_read(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int do_bswap)
+{
+ int i;
+
+ if (do_bswap) {
+ dma_memory_read(opaque, addr, buf, len);
+ } else {
+ dma_memory_read(opaque, addr, buf, len);
+ for(i = 0; i < len; i += 2) {
+ bswap16s((uint16_t *)(buf + i));
+ }
+ }
+}
+
+static void ledma_memory_write(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int do_bswap)
+{
+ int l, i;
+ uint16_t tmp_buf[32];
+
+ if (do_bswap) {
+ dma_memory_write(opaque, addr, buf, len);
+ } else {
+ while (len > 0) {
+ l = len;
+ if (l > sizeof(tmp_buf))
+ l = sizeof(tmp_buf);
+ for(i = 0; i < l; i += 2) {
+ tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i));
+ }
+ dma_memory_write(opaque, addr, (uint8_t *)tmp_buf, l);
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+ }
+}
+
static void lance_mem_writew(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
@@ -2050,8 +2091,8 @@
lance_mem_writew,
};
-void lance_init(NICInfo *nd, target_phys_addr_t leaddr, void *dma_opaque,
- qemu_irq irq, qemu_irq *reset)
+void lance_init(NICInfo *nd, target_phys_addr_t leaddr, qemu_irq irq,
+ qemu_dma *parent_dma, qemu_irq *reset)
{
PCNetState *d;
int lance_io_memory;
@@ -2063,7 +2104,7 @@
lance_io_memory =
cpu_register_io_memory(0, lance_mem_read, lance_mem_write, d);
- d->dma_opaque = dma_opaque;
+ d->dma_opaque = parent_dma;
*reset = *qemu_allocate_irqs(parent_lance_reset, d, 1);
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-24 19:40 ` Blue Swirl
@ 2007-08-24 20:18 ` Paul Brook
2007-08-24 23:33 ` Fabrice Bellard
0 siblings, 1 reply; 20+ messages in thread
From: Paul Brook @ 2007-08-24 20:18 UTC (permalink / raw)
To: qemu-devel; +Cc: Blue Swirl
On Friday 24 August 2007, Blue Swirl wrote:
> I have now converted the ISA DMA devices (SB16, FDC), most PCI devices
> and targets.
>
> gdma.diff: Generic DMA
> pc_ppc_dma_to_gdma.diff: Convert x86 and PPC to GDMA
> pc_sb16_to_gdma.diff: Convert SB16 to GDMA
> pc_fdc_to_gdma.diff: FDC
> pc_dma_cleanup.diff: Remove unused functions
> sparc_gdma.diff: Convert Sparc32 to GDMA
> sparc32_dma_esp_le_to_gdma.diff: Convert ESP and Lance
> sun4c.diff: Preliminary Sun4c (Sparcstation-1) support
> pci_gdma.diff: Convert PCI devices and targets
>
> Any comments? The patches are a bit intrusive and I can't test the
> targets except that they compile.
Shouldn't the PCI DMA object be a property of the PCI bus?
ie. we don't want/need to pass it round as a separate parameter. It can be
inferred form the device/bus.
Paul
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-24 20:18 ` Paul Brook
@ 2007-08-24 23:33 ` Fabrice Bellard
2007-08-25 0:29 ` Paul Brook
0 siblings, 1 reply; 20+ messages in thread
From: Fabrice Bellard @ 2007-08-24 23:33 UTC (permalink / raw)
To: qemu-devel
Paul Brook wrote:
> On Friday 24 August 2007, Blue Swirl wrote:
>> I have now converted the ISA DMA devices (SB16, FDC), most PCI devices
>> and targets.
>>
>> gdma.diff: Generic DMA
>> pc_ppc_dma_to_gdma.diff: Convert x86 and PPC to GDMA
>> pc_sb16_to_gdma.diff: Convert SB16 to GDMA
>> pc_fdc_to_gdma.diff: FDC
>> pc_dma_cleanup.diff: Remove unused functions
>> sparc_gdma.diff: Convert Sparc32 to GDMA
>> sparc32_dma_esp_le_to_gdma.diff: Convert ESP and Lance
>> sun4c.diff: Preliminary Sun4c (Sparcstation-1) support
>> pci_gdma.diff: Convert PCI devices and targets
>>
>> Any comments? The patches are a bit intrusive and I can't test the
>> targets except that they compile.
>
> Shouldn't the PCI DMA object be a property of the PCI bus?
> ie. we don't want/need to pass it round as a separate parameter. It can be
> inferred form the device/bus.
I agree. Moreover the DMA is bus specific so I don't see a need to add a
generic DMA layer.
Regards,
Fabrice.
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-24 23:33 ` Fabrice Bellard
@ 2007-08-25 0:29 ` Paul Brook
2007-08-26 11:30 ` Fabrice Bellard
0 siblings, 1 reply; 20+ messages in thread
From: Paul Brook @ 2007-08-25 0:29 UTC (permalink / raw)
To: qemu-devel
> >> pci_gdma.diff: Convert PCI devices and targets
> >>
> >> Any comments? The patches are a bit intrusive and I can't test the
> >> targets except that they compile.
> >
> > Shouldn't the PCI DMA object be a property of the PCI bus?
> > ie. we don't want/need to pass it round as a separate parameter. It can
> > be inferred form the device/bus.
>
> I agree. Moreover the DMA is bus specific so I don't see a need to add a
> generic DMA layer.
I can see use for a generic DMA interface. It has some nice possibilities for
devices which can connect via a variety of busses and maybe for layering
different busses within a system.
However I don't know how well this will work in practice for the machines qemu
currently emulates.
Paul
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-25 0:29 ` Paul Brook
@ 2007-08-26 11:30 ` Fabrice Bellard
2007-08-26 17:54 ` Blue Swirl
0 siblings, 1 reply; 20+ messages in thread
From: Fabrice Bellard @ 2007-08-26 11:30 UTC (permalink / raw)
To: qemu-devel
Paul Brook wrote:
>>>> pci_gdma.diff: Convert PCI devices and targets
>>>>
>>>> Any comments? The patches are a bit intrusive and I can't test the
>>>> targets except that they compile.
>>> Shouldn't the PCI DMA object be a property of the PCI bus?
>>> ie. we don't want/need to pass it round as a separate parameter. It can
>>> be inferred form the device/bus.
>> I agree. Moreover the DMA is bus specific so I don't see a need to add a
>> generic DMA layer.
>
> I can see use for a generic DMA interface. It has some nice possibilities for
> devices which can connect via a variety of busses and maybe for layering
> different busses within a system.
>
> However I don't know how well this will work in practice for the machines qemu
> currently emulates.
I can see more uses for a simple bus interface which could be used at
least for ISA devices. The API should include bus read/write functions
(which can be used to implement DMA) and functions to allocate/free a
memory region as we have for the CPU bus.
Of course the same must be added for PCI buses so that the PCI memory
area can be mapped at any position in the CPU address space.
Fabrice.
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-26 11:30 ` Fabrice Bellard
@ 2007-08-26 17:54 ` Blue Swirl
2007-08-28 19:03 ` Blue Swirl
0 siblings, 1 reply; 20+ messages in thread
From: Blue Swirl @ 2007-08-26 17:54 UTC (permalink / raw)
To: qemu-devel, fabrice, Paul Brook
On 8/26/07, Fabrice Bellard <fabrice@bellard.org> wrote:
> Paul Brook wrote:
> >>>> pci_gdma.diff: Convert PCI devices and targets
> >>>>
> >>>> Any comments? The patches are a bit intrusive and I can't test the
> >>>> targets except that they compile.
> >>> Shouldn't the PCI DMA object be a property of the PCI bus?
> >>> ie. we don't want/need to pass it round as a separate parameter. It can
> >>> be inferred form the device/bus.
> >> I agree. Moreover the DMA is bus specific so I don't see a need to add a
> >> generic DMA layer.
> >
> > I can see use for a generic DMA interface. It has some nice possibilities for
> > devices which can connect via a variety of busses and maybe for layering
> > different busses within a system.
> >
> > However I don't know how well this will work in practice for the machines qemu
> > currently emulates.
>
> I can see more uses for a simple bus interface which could be used at
> least for ISA devices. The API should include bus read/write functions
> (which can be used to implement DMA) and functions to allocate/free a
> memory region as we have for the CPU bus.
>
> Of course the same must be added for PCI buses so that the PCI memory
> area can be mapped at any position in the CPU address space.
Nice idea. The functions in exec.c could be made more generic by
extending them with an object parameter, for example
int cpu_register_io_memory(int io_index,
CPUReadMemoryFunc **mem_read,
CPUWriteMemoryFunc **mem_write,
void *opaque)
would become
int bus_register_io_memory(void *opaque,
int io_index,
CPUReadMemoryFunc **mem_read,
CPUWriteMemoryFunc **mem_write,
void *io_opaque)
The opaque object would be struct PCIBus for PCI devices, something
other for ISA. The ISA DMA DREQ signal is still a problem, or could we
use qemu_irq for that too?
Ideally the devices would not know too much about the bus they are in.
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-26 17:54 ` Blue Swirl
@ 2007-08-28 19:03 ` Blue Swirl
2007-08-28 19:43 ` Paul Brook
0 siblings, 1 reply; 20+ messages in thread
From: Blue Swirl @ 2007-08-28 19:03 UTC (permalink / raw)
To: qemu-devel, fabrice, Paul Brook
On 8/26/07, Blue Swirl <blauwirbel@gmail.com> wrote:
> On 8/26/07, Fabrice Bellard <fabrice@bellard.org> wrote:
> > Paul Brook wrote:
> > >>>> pci_gdma.diff: Convert PCI devices and targets
> > >>>>
> > >>>> Any comments? The patches are a bit intrusive and I can't test the
> > >>>> targets except that they compile.
> > >>> Shouldn't the PCI DMA object be a property of the PCI bus?
> > >>> ie. we don't want/need to pass it round as a separate parameter. It can
> > >>> be inferred form the device/bus.
> > >> I agree. Moreover the DMA is bus specific so I don't see a need to add a
> > >> generic DMA layer.
> > >
> > > I can see use for a generic DMA interface. It has some nice possibilities for
> > > devices which can connect via a variety of busses and maybe for layering
> > > different busses within a system.
> > >
> > > However I don't know how well this will work in practice for the machines qemu
> > > currently emulates.
> >
> > I can see more uses for a simple bus interface which could be used at
> > least for ISA devices. The API should include bus read/write functions
> > (which can be used to implement DMA) and functions to allocate/free a
> > memory region as we have for the CPU bus.
> >
> > Of course the same must be added for PCI buses so that the PCI memory
> > area can be mapped at any position in the CPU address space.
>
> Nice idea. The functions in exec.c could be made more generic by
> extending them with an object parameter, for example
> int cpu_register_io_memory(int io_index,
> CPUReadMemoryFunc **mem_read,
> CPUWriteMemoryFunc **mem_write,
> void *opaque)
> would become
> int bus_register_io_memory(void *opaque,
> int io_index,
> CPUReadMemoryFunc **mem_read,
> CPUWriteMemoryFunc **mem_write,
> void *io_opaque)
>
> The opaque object would be struct PCIBus for PCI devices, something
> other for ISA. The ISA DMA DREQ signal is still a problem, or could we
> use qemu_irq for that too?
On second thought, there is a huge difference between a write access
originating from CPU destined for the device and the device writing to
main memory. The CPU address could be 0xf000 1000, which may translate
to a bus address of 0x1000, as an example. The device could write to
main memory using the same bus address 0x1000, but this time the IOMMU
would map this to for example 0x1234 5000, or without an IOMMU it
would be just 0x1000.
So for the CPU physical address to bus address translation, my
proposal would work. But the system does not work in the direction of
device to memory, which was the original problem.
The devices in the same bus could access each other as well. How would
these accesses be identified, so that they are not device to memory
accesses?
Maybe I have missed something important?
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-28 19:03 ` Blue Swirl
@ 2007-08-28 19:43 ` Paul Brook
2007-08-29 17:00 ` Blue Swirl
0 siblings, 1 reply; 20+ messages in thread
From: Paul Brook @ 2007-08-28 19:43 UTC (permalink / raw)
To: qemu-devel; +Cc: Blue Swirl
> On second thought, there is a huge difference between a write access
> originating from CPU destined for the device and the device writing to
> main memory. The CPU address could be 0xf000 1000, which may translate
> to a bus address of 0x1000, as an example. The device could write to
> main memory using the same bus address 0x1000, but this time the IOMMU
> would map this to for example 0x1234 5000, or without an IOMMU it
> would be just 0x1000.
While your concern is valid, your example is not.
You can't have the same bus address mapping onto both a device and main
memory. Your example works if e.g. IO bus address 0x2000 1000 (or worse still
0xf000 1000) maps onto system memory 0x1234 5000.
Conceptually you can have a separate IOMMU on every bus-bus or bus/host
bridge, with asymmetric mappings depending where the transaction originates.
I believe some of the newer POWER machines can do this (x86 hardware with this
capability is not generally available). The ARM PCI host bridge allows
asymmetric mappings, thought this is simple regions rather than a full IOMMU,
and is currently not implemented.
Paul
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-28 19:43 ` Paul Brook
@ 2007-08-29 17:00 ` Blue Swirl
2007-08-29 20:39 ` Paul Brook
0 siblings, 1 reply; 20+ messages in thread
From: Blue Swirl @ 2007-08-29 17:00 UTC (permalink / raw)
To: Paul Brook, qemu-devel
On 8/28/07, Paul Brook <paul@codesourcery.com> wrote:
> > On second thought, there is a huge difference between a write access
> > originating from CPU destined for the device and the device writing to
> > main memory. The CPU address could be 0xf000 1000, which may translate
> > to a bus address of 0x1000, as an example. The device could write to
> > main memory using the same bus address 0x1000, but this time the IOMMU
> > would map this to for example 0x1234 5000, or without an IOMMU it
> > would be just 0x1000.
>
> While your concern is valid, your example is not.
>
> You can't have the same bus address mapping onto both a device and main
> memory. Your example works if e.g. IO bus address 0x2000 1000 (or worse still
> 0xf000 1000) maps onto system memory 0x1234 5000.
This is a bit mysterious for me too. SBus address space is 28 bits
(256MB). Usually each slot maps to a different area. So the CPU sees
one slot for example at 0x3000 0000 and other at 0x4000 0000.
IOMMU can map max 2G of memory, usually a 32 or 64MB region. For the
devices, this device virtual memory access (DVMA) space exists at the
top of address space (for example 0xfc00 0000). Each page can map to a
different address. But these mappings can not be seen from CPU, for
example the boot prom is located at 0xffd0 0000. I wonder how the
devices access the DVMA space in case of >256M DVMA.
The device can't obviously supply the address bits 28-31, I don't know
where they come from (=1?). But from tracing Linux I'm pretty sure
that the bus address can be 0 disregarding the higher bits and also
the device (or device FCode prom more likely) can exist at that
location. How? Maybe IOMMU does not see CPU accesses at all and the
devices see neither each other nor themselves, so it's not a really a
shared bus?
> Conceptually you can have a separate IOMMU on every bus-bus or bus/host
> bridge, with asymmetric mappings depending where the transaction originates.
IOMMU on Sun4m maps DVMA addresses to physical addresses, which (I
think) in turn can be other device's registers or memory, but the
mappings are same for all devices.
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-29 17:00 ` Blue Swirl
@ 2007-08-29 20:39 ` Paul Brook
2007-08-29 21:18 ` Paul Brook
0 siblings, 1 reply; 20+ messages in thread
From: Paul Brook @ 2007-08-29 20:39 UTC (permalink / raw)
To: qemu-devel; +Cc: Blue Swirl
> This is a bit mysterious for me too. SBus address space is 28 bits
> (256MB). Usually each slot maps to a different area. So the CPU sees
> one slot for example at 0x3000 0000 and other at 0x4000 0000.
>
> IOMMU can map max 2G of memory, usually a 32 or 64MB region. For the
> devices, this device virtual memory access (DVMA) space exists at the
> top of address space (for example 0xfc00 0000). Each page can map to a
> different address. But these mappings can not be seen from CPU, for
> example the boot prom is located at 0xffd0 0000. I wonder how the
> devices access the DVMA space in case of >256M DVMA.
>
> The device can't obviously supply the address bits 28-31, I don't know
> where they come from (=1?). But from tracing Linux I'm pretty sure
> that the bus address can be 0 disregarding the higher bits and also
> the device (or device FCode prom more likely) can exist at that
> location. How? Maybe IOMMU does not see CPU accesses at all and the
> devices see neither each other nor themselves, so it's not a really a
> shared bus?
I can't find a copy of the SBus specification, so I'm guessing how this fits
together.
The key bit is that SBus controller performs device selection. c.f. PCI and
ISA where each device does full address decoding.
What information I've found indicates that SBus supports an unlimited number
of slave devices, and master devices use a 32-bit virtual address space.
This leads me to the conclusion that it's as if each slave device is on its
own 28-bit bus, and the sbus devices master transactions go via the IOMMU
onto the CPU bus. From there they may be routed back to an SBus device.
Actual implementation may need to do some short-circuiting to prevent
deadlock, so I'm not entirely sure about this.
If this is the case, it means we don't need anything complicated. Devices map
themselves straight into the system address space at the appropriate slot
address (no plug-n-play to worry about), and device "DMA" goes via the IOMMU.
Because devices do not do address decoding I suspect this isn't going to
nicely fit into a generic bus framework that would work for most systems.
Paul
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-29 20:39 ` Paul Brook
@ 2007-08-29 21:18 ` Paul Brook
2007-09-08 14:07 ` Blue Swirl
0 siblings, 1 reply; 20+ messages in thread
From: Paul Brook @ 2007-08-29 21:18 UTC (permalink / raw)
To: qemu-devel; +Cc: Blue Swirl
> If this is the case, it means we don't need anything complicated. Devices
> map themselves straight into the system address space at the appropriate
> slot address (no plug-n-play to worry about), and device "DMA" goes via the
> IOMMU.
Further searching by google suggests I may be wrong.
The alternative is that the controller maps the 32-bit VA onto a device
select+28-bit address, using some as-yet undiscovered mechanism.
There are then a couple of different options for how the CPU/memory bus is
accessed:
a) The IOMMU is one or more slave devices, than feed the 28-bit address
possibly plus a few other bits from the device ID into the translation table.
This effectively allows you to map a proportion of the SBus 32-bit master VA
space onto CPU address space via the IOMMU, and map the remainder onto
devices on the same bus. For a system with <=8 slots per bus a fixed mapping
using the first 2G as 256Mb for each slot and the top 2G for IOMMU is
entirely feasible.
b) The 32-bit SBus VA is looked up directly into the IOMMU. Each IOMMU entry
can refer to either a CPU address, or a device+28-bit address on the local
SBUS.
Paul
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-08-29 21:18 ` Paul Brook
@ 2007-09-08 14:07 ` Blue Swirl
2007-09-08 14:31 ` Paul Brook
0 siblings, 1 reply; 20+ messages in thread
From: Blue Swirl @ 2007-09-08 14:07 UTC (permalink / raw)
To: Paul Brook; +Cc: qemu-devel
On 8/30/07, Paul Brook <paul@codesourcery.com> wrote:
> > If this is the case, it means we don't need anything complicated. Devices
> > map themselves straight into the system address space at the appropriate
> > slot address (no plug-n-play to worry about), and device "DMA" goes via the
> > IOMMU.
>
> Further searching by google suggests I may be wrong.
>
> The alternative is that the controller maps the 32-bit VA onto a device
> select+28-bit address, using some as-yet undiscovered mechanism.
> There are then a couple of different options for how the CPU/memory bus is
> accessed:
> a) The IOMMU is one or more slave devices, than feed the 28-bit address
> possibly plus a few other bits from the device ID into the translation table.
> This effectively allows you to map a proportion of the SBus 32-bit master VA
> space onto CPU address space via the IOMMU, and map the remainder onto
> devices on the same bus. For a system with <=8 slots per bus a fixed mapping
> using the first 2G as 256Mb for each slot and the top 2G for IOMMU is
> entirely feasible.
> b) The 32-bit SBus VA is looked up directly into the IOMMU. Each IOMMU entry
> can refer to either a CPU address, or a device+28-bit address on the local
>From DMA2.txt, NCR89C100.txt, NCR89C105.txt and turbosparc.pdf I
gather the following:
- CPU and IOMMU always perform slave accesses
- Slave accesses use the 28-bit address bus to select the device
- Slave accesses are not translated by IOMMU
- NCR master devices (Lance, ESP) use an internal DREQ-style signal to
indicate their need for DMA to their DMA controller
- Master accesses use the 32-bit SBus data signals for both address and data
- DMA controller is the master for NCR89C100+NCR89C105 combination
- Master accesses are translated and controlled by IOMMU
- Slave devices may or may not support master access cycles (not
supported in the NCR case)
- IOMMU can give direct bus access for "intelligent masters" (no devices known)
We could model this using two buses: A slave bus between the CPU and
the devices, and a master bus between devices and IOMMU. The slave bus
translates the 36-bit CPU/memory bus addresses to 28-bit SBus bus
addresses. The master bus uses IOMMU to translate 32-bit DVMA
addresses to 36-bit CPU/memory bus addresses. Slave devices are
connected to the slave bus and DREQs. Master devices and DMA
controllers take the DREQs and both buses. Devices register the
address ranges they serve on each bus.
On Sun4c (without IOMMU) there would be just one bus for both purposes
(with the MMU quirk).
For the Sparc64 PCI bus which has an IOMMU, a similar dual bus
arrangement would be needed. On PC/PPC systems the two buses would be
again one.
Maybe even the IO port access and MMIO could be unified (one generic
bus for both)? That could simplify the device registration.
Alternatively, the generic bus could support several access modes, so
there would be no need for many virtual buses.
Comments?
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-09-08 14:07 ` Blue Swirl
@ 2007-09-08 14:31 ` Paul Brook
2007-09-08 14:53 ` Blue Swirl
0 siblings, 1 reply; 20+ messages in thread
From: Paul Brook @ 2007-09-08 14:31 UTC (permalink / raw)
To: Blue Swirl; +Cc: qemu-devel
> From DMA2.txt, NCR89C100.txt, NCR89C105.txt and turbosparc.pdf I
> gather the following:
> - CPU and IOMMU always perform slave accesses
> - Slave accesses use the 28-bit address bus to select the device
I thought device selection was separate from the 28-bit SBus slave address
space. ie. each device has exclusive ownership of the whole 28-bit address
space, and it's effectively just multiplexing per-slave busses over a single
electrical connection.
> - Slave accesses are not translated by IOMMU
> - NCR master devices (Lance, ESP) use an internal DREQ-style signal to
> indicate their need for DMA to their DMA controller
> - Master accesses use the 32-bit SBus data signals for both address and
> data - DMA controller is the master for NCR89C100+NCR89C105 combination -
> Master accesses are translated and controlled by IOMMU
> - Slave devices may or may not support master access cycles (not
> supported in the NCR case)
> - IOMMU can give direct bus access for "intelligent masters" (no devices
> known)
>
> We could model this using two buses: A slave bus between the CPU and
> the devices, and a master bus between devices and IOMMU. The slave bus
> translates the 36-bit CPU/memory bus addresses to 28-bit SBus bus
> addresses. The master bus uses IOMMU to translate 32-bit DVMA
> addresses to 36-bit CPU/memory bus addresses. Slave devices are
> connected to the slave bus and DREQs. Master devices and DMA
> controllers take the DREQs and both buses. Devices register the
> address ranges they serve on each bus.
IIUC devices never register addresses on the master bus. The only thing that
responds on that bus is the IOMMU.
> On Sun4c (without IOMMU) there would be just one bus for both purposes
> (with the MMU quirk).
>
> For the Sparc64 PCI bus which has an IOMMU, a similar dual bus
> arrangement would be needed. On PC/PPC systems the two buses would be
> again one.
PCI shouldn't need a dual bus setup. You just have one bus for PCI and one bus
for CPU/memory.
IMHO the whole point of having a generic bus infrastructure is that we can
define address mapping in terms of [asymmetric] translations from one bus
address space to another. This isolates teh device from needing to care about
bridges and IOMMu.
If we're assuming 1:1 or symmetric address space mapping there doesn't seem
much point modelling separate busses. Instead push everything into the device
registration and DMA routines.
Paul
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-09-08 14:31 ` Paul Brook
@ 2007-09-08 14:53 ` Blue Swirl
2007-09-08 16:03 ` Paul Brook
0 siblings, 1 reply; 20+ messages in thread
From: Blue Swirl @ 2007-09-08 14:53 UTC (permalink / raw)
To: Paul Brook; +Cc: qemu-devel
On 9/8/07, Paul Brook <paul@codesourcery.com> wrote:
> > From DMA2.txt, NCR89C100.txt, NCR89C105.txt and turbosparc.pdf I
> > gather the following:
> > - CPU and IOMMU always perform slave accesses
> > - Slave accesses use the 28-bit address bus to select the device
>
> I thought device selection was separate from the 28-bit SBus slave address
> space. ie. each device has exclusive ownership of the whole 28-bit address
> space, and it's effectively just multiplexing per-slave busses over a single
> electrical connection.
At least the NCR slave devices use SBus bus signals for device select.
I don't know whether this applies generally.
> > - Slave accesses are not translated by IOMMU
> > - NCR master devices (Lance, ESP) use an internal DREQ-style signal to
> > indicate their need for DMA to their DMA controller
> > - Master accesses use the 32-bit SBus data signals for both address and
> > data - DMA controller is the master for NCR89C100+NCR89C105 combination -
> > Master accesses are translated and controlled by IOMMU
> > - Slave devices may or may not support master access cycles (not
> > supported in the NCR case)
> > - IOMMU can give direct bus access for "intelligent masters" (no devices
> > known)
> >
> > We could model this using two buses: A slave bus between the CPU and
> > the devices, and a master bus between devices and IOMMU. The slave bus
> > translates the 36-bit CPU/memory bus addresses to 28-bit SBus bus
> > addresses. The master bus uses IOMMU to translate 32-bit DVMA
> > addresses to 36-bit CPU/memory bus addresses. Slave devices are
> > connected to the slave bus and DREQs. Master devices and DMA
> > controllers take the DREQs and both buses. Devices register the
> > address ranges they serve on each bus.
>
> IIUC devices never register addresses on the master bus. The only thing that
> responds on that bus is the IOMMU.
Generally yes, but these "intelligent masters" and their targets would
register on on both buses. The only case I can only think of is a
video grabber, it's frame memory could be accessed directly by other
IO devices.
> > On Sun4c (without IOMMU) there would be just one bus for both purposes
> > (with the MMU quirk).
> >
> > For the Sparc64 PCI bus which has an IOMMU, a similar dual bus
> > arrangement would be needed. On PC/PPC systems the two buses would be
> > again one.
>
> PCI shouldn't need a dual bus setup. You just have one bus for PCI and one bus
> for CPU/memory.
Then how would Sparc64 IOMMU intercept the device DMA? I'd think that
PCI bus mastering works similarly as in SBus, or doesn't it?
> IMHO the whole point of having a generic bus infrastructure is that we can
> define address mapping in terms of [asymmetric] translations from one bus
> address space to another. This isolates teh device from needing to care about
> bridges and IOMMu.
>
> If we're assuming 1:1 or symmetric address space mapping there doesn't seem
> much point modelling separate busses. Instead push everything into the device
> registration and DMA routines.
Agreed. For Sparc32/64 cases there isn't much choice, there is no symmetry.
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-09-08 14:53 ` Blue Swirl
@ 2007-09-08 16:03 ` Paul Brook
2007-09-15 16:16 ` Blue Swirl
0 siblings, 1 reply; 20+ messages in thread
From: Paul Brook @ 2007-09-08 16:03 UTC (permalink / raw)
To: Blue Swirl; +Cc: qemu-devel
> > IIUC devices never register addresses on the master bus. The only thing
> > that responds on that bus is the IOMMU.
>
> Generally yes, but these "intelligent masters" and their targets would
> register on on both buses. The only case I can only think of is a
> video grabber, it's frame memory could be accessed directly by other
> IO devices.
Ah, I think we've got different interpretations of what "registering" a device
involves. To a first approximation a master doesn't need to register with a
bus at all. Slave devices can't/don't need to identify which master initiated
the transaction. Plus all bus transactions are atomic w.r.t. other bus
traffic, so no arbitration is needed.
Masters will need to register with a bus insofar as they need to get a
reference to identify which bus they're talking to. They don't generally
reserve any bus address space though. Most devices are actually dual function
master/slave devices, so already have a bus handle from registering the slave
device.
> > > For the Sparc64 PCI bus which has an IOMMU, a similar dual bus
> > > arrangement would be needed. On PC/PPC systems the two buses would be
> > > again one.
> >
> > PCI shouldn't need a dual bus setup. You just have one bus for PCI and
> > one bus for CPU/memory.
>
> Then how would Sparc64 IOMMU intercept the device DMA? I'd think that
> PCI bus mastering works similarly as in SBus, or doesn't it?
A PCI host controller effectively consists of two bridges.
The CPU->PCI bridge responds to requests on the CPU bus, using simple linear
address translation to create PCI requests. The PCI->CPU bridge responds to
requests on the PCI bus (ie. device DMA), using an IOMMU to translate these
into CPU requests.
The interesting bits of a generic bus infrastructure are the bridges between
the busses, not the busses themselves.
Conceptually each access starts on the bus local to that device (the system
bus for the CPU, PCI bus for device DMA, etc), then recursively walks bus-bus
bridges until it finds a device. Walking over a bridge is what causes address
translation, and that translation is sensitive to direction.
I admit I haven't figured out how to implement this efficiently.
Paul
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] Re: PATCH, RFC: Generic DMA framework
2007-09-08 16:03 ` Paul Brook
@ 2007-09-15 16:16 ` Blue Swirl
0 siblings, 0 replies; 20+ messages in thread
From: Blue Swirl @ 2007-09-15 16:16 UTC (permalink / raw)
To: Paul Brook; +Cc: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 226 bytes --]
I made a first implementation of this concept. CPU->bus uses
"southbound" functions, device->CPU "northbound" ones.
The system is not symmetric, the device address range allocation could
well be separate.
What do you think?
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: bus_ops.diff --]
[-- Type: text/x-diff; name="bus_ops.diff", Size: 11704 bytes --]
Index: qemu/cpu-all.h
===================================================================
--- qemu.orig/cpu-all.h 2007-09-15 15:34:51.000000000 +0000
+++ qemu/cpu-all.h 2007-09-15 16:05:04.000000000 +0000
@@ -861,6 +861,54 @@
void dump_exec_info(FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
+/* Bus operations */
+typedef struct qemu_bus qemu_bus;
+
+typedef void (*qemu_mem_rw_handler)(void *opaque,
+ target_phys_addr_t addr,
+ uint8_t *buf, unsigned int len,
+ int is_write);
+
+qemu_bus *bus_init(unsigned int bus_bits, qemu_mem_rw_handler handler,
+ void *handler_opaque);
+void bus_register_physical_memory(qemu_bus *bus,
+ target_phys_addr_t start_addr,
+ unsigned long size,
+ unsigned long phys_offset);
+int bus_register_io_memory(qemu_bus *bus,
+ int io_index,
+ CPUReadMemoryFunc **mem_read,
+ CPUWriteMemoryFunc **mem_write,
+ void *opaque);
+/* Direction CPU->bridge->device/memory */
+void bus_rw_south(qemu_bus *bus, target_phys_addr_t addr, uint8_t *buf,
+ unsigned int len, int is_write);
+static inline void bus_read_south(qemu_bus *bus, target_phys_addr_t addr,
+ uint8_t *buf, unsigned int len)
+{
+ bus_rw_south(bus, addr, buf, len, 0);
+}
+static inline void bus_write_south(qemu_bus *bus, target_phys_addr_t addr,
+ const uint8_t *buf, unsigned int len)
+{
+ bus_rw_south(bus, addr, (uint8_t *)buf, len, 1);
+}
+void bus_write_south_rom(qemu_bus *bus, target_phys_addr_t addr,
+ const uint8_t *buf, unsigned int len);
+/* From device towards CPU/memory (DMA) */
+void bus_rw_north(qemu_bus *bus, target_phys_addr_t addr, uint8_t *buf,
+ unsigned int len, int is_write);
+static inline void bus_read_north(qemu_bus *bus, target_phys_addr_t addr,
+ uint8_t *buf, unsigned int len)
+{
+ bus_rw_north(bus, addr, buf, len, 0);
+}
+static inline void bus_write_north(qemu_bus *bus, target_phys_addr_t addr,
+ const uint8_t *buf, unsigned int len)
+{
+ bus_rw_north(bus, addr, (uint8_t *)buf, len, 1);
+}
+
/*******************************************/
/* host CPU ticks (if available) */
Index: qemu/exec.c
===================================================================
--- qemu.orig/exec.c 2007-09-15 15:34:51.000000000 +0000
+++ qemu/exec.c 2007-09-15 16:05:54.000000000 +0000
@@ -2905,3 +2905,258 @@
#undef env
#endif
+
+typedef struct BusPageDesc {
+ /* offset in host memory of the page + io_index in the low 12 bits */
+ target_phys_addr_t phys_offset;
+} BusPageDesc;
+
+struct qemu_bus {
+ /* Northbound access handler */
+ qemu_mem_rw_handler north_handler;
+ void *handler_opaque;
+ /* Southbound access management */
+ BusPageDesc **l1_bus_map;
+ unsigned int bus_bits, l1_bits, l2_bits, l1_size, l2_size;
+ CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
+ CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
+ void *io_mem_opaque[IO_MEM_NB_ENTRIES];
+ unsigned int io_mem_nb;
+};
+
+static BusPageDesc *bus_page_find_alloc(qemu_bus *bus,
+ target_phys_addr_t index, int alloc)
+{
+ void **lp, **p;
+ BusPageDesc *pd;
+
+ p = (void **)bus->l1_bus_map;
+#if TARGET_PHYS_ADDR_SPACE_BITS > 32
+ lp = p + ((index >> (bus->l1_bits + bus->l2_bits)) & (bus->l1_size - 1));
+ p = *lp;
+ if (!p) {
+ /* allocate if not found */
+ if (!alloc)
+ return NULL;
+ p = qemu_vmalloc(sizeof(void *) * bus->l1_size);
+ memset(p, 0, sizeof(void *) * bus->l1_size);
+ *lp = p;
+ }
+#endif
+ lp = p + ((index >> bus->l2_bits) & (bus->l1_size - 1));
+ pd = *lp;
+ if (!pd) {
+ unsigned int i;
+ /* allocate if not found */
+ if (!alloc)
+ return NULL;
+ pd = qemu_vmalloc(sizeof(BusPageDesc) * bus->l2_size);
+ *lp = pd;
+ for (i = 0; i < bus->l2_size; i++)
+ pd[i].phys_offset = IO_MEM_UNASSIGNED;
+ }
+ return ((BusPageDesc *)pd) + (index & (bus->l2_size - 1));
+}
+
+static inline BusPageDesc *bus_page_find(qemu_bus *bus,
+ target_phys_addr_t index)
+{
+ return bus_page_find_alloc(bus, index, 0);
+}
+
+int bus_register_io_memory(qemu_bus *bus,
+ int io_index,
+ CPUReadMemoryFunc **mem_read,
+ CPUWriteMemoryFunc **mem_write,
+ void *opaque)
+{
+ unsigned int i;
+
+ if (io_index <= 0) {
+ if (io_mem_nb >= IO_MEM_NB_ENTRIES)
+ return -1;
+ io_index = io_mem_nb++;
+ } else {
+ if (io_index >= IO_MEM_NB_ENTRIES)
+ return -1;
+ }
+
+ for(i = 0; i < 3; i++) {
+ bus->io_mem_read[io_index][i] = mem_read[i];
+ bus->io_mem_write[io_index][i] = mem_write[i];
+ }
+ bus->io_mem_opaque[io_index] = opaque;
+ return io_index << IO_MEM_SHIFT;
+}
+
+void bus_rw_south(qemu_bus *bus, target_phys_addr_t addr, uint8_t *buf,
+ unsigned int len, int is_write)
+{
+ int l, io_index;
+ uint8_t *ptr;
+ uint32_t val;
+ target_phys_addr_t page;
+ unsigned long pd;
+ BusPageDesc *p;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ p = bus_page_find(bus, page >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if (is_write) {
+ if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ /* XXX: could force cpu_single_env to NULL to avoid
+ potential bugs */
+ if (l >= 4 && ((addr & 3) == 0)) {
+ /* 32 bit write access */
+ val = ldl_p(buf);
+ bus->io_mem_write[io_index][2](io_mem_opaque[io_index],
+ addr, val);
+ l = 4;
+ } else if (l >= 2 && ((addr & 1) == 0)) {
+ /* 16 bit write access */
+ val = lduw_p(buf);
+ bus->io_mem_write[io_index][1](io_mem_opaque[io_index],
+ addr, val);
+ l = 2;
+ } else {
+ /* 8 bit write access */
+ val = ldub_p(buf);
+ bus->io_mem_write[io_index][0](io_mem_opaque[io_index],
+ addr, val);
+ l = 1;
+ }
+ } else {
+ unsigned long addr1;
+
+ addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ /* RAM case */
+ ptr = phys_ram_base + addr1;
+ memcpy(ptr, buf, l);
+ if (!cpu_physical_memory_is_dirty(addr1)) {
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
+ /* set dirty bit */
+ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
+ (0xff & ~CODE_DIRTY_FLAG);
+ }
+ }
+ } else {
+ if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
+ !(pd & IO_MEM_ROMD)) {
+ /* I/O case */
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (l >= 4 && ((addr & 3) == 0)) {
+ /* 32 bit read access */
+ val = bus->io_mem_read[io_index][2](io_mem_opaque[io_index],
+ addr);
+ stl_p(buf, val);
+ l = 4;
+ } else if (l >= 2 && ((addr & 1) == 0)) {
+ /* 16 bit read access */
+ val = bus->io_mem_read[io_index][1](io_mem_opaque[io_index],
+ addr);
+ stw_p(buf, val);
+ l = 2;
+ } else {
+ /* 8 bit read access */
+ val = bus->io_mem_read[io_index][0](io_mem_opaque[io_index],
+ addr);
+ stb_p(buf, val);
+ l = 1;
+ }
+ } else {
+ /* RAM case */
+ ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ (addr & ~TARGET_PAGE_MASK);
+ memcpy(buf, ptr, l);
+ }
+ }
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+}
+
+/* used for ROM loading : can write in RAM and ROM */
+void bus_write_south_rom(qemu_bus *bus, target_phys_addr_t addr,
+ const uint8_t *buf, unsigned int len)
+{
+ int l;
+ uint8_t *ptr;
+ target_phys_addr_t page;
+ unsigned long pd;
+ BusPageDesc *p;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ p = bus_page_find(bus, page >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
+ (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
+ !(pd & IO_MEM_ROMD)) {
+ /* do nothing */
+ } else {
+ unsigned long addr1;
+
+ addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ /* ROM/RAM case */
+ ptr = phys_ram_base + addr1;
+ memcpy(ptr, buf, l);
+ }
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+}
+
+void bus_rw_north(qemu_bus *bus, target_phys_addr_t addr, uint8_t *buf,
+ unsigned int len, int is_write)
+{
+ bus->north_handler(bus->handler_opaque, addr, buf, len, is_write);
+}
+
+qemu_bus *bus_init(unsigned int bus_bits, qemu_mem_rw_handler north_handler,
+ void *handler_opaque)
+{
+ qemu_bus *bus;
+
+ bus = qemu_mallocz(sizeof(qemu_bus));
+ bus->north_handler = north_handler;
+ bus->bus_bits = bus_bits;
+ bus->l2_bits = 10;
+ bus->l1_bits = bus->bus_bits - bus->l2_bits - TARGET_PAGE_BITS;
+ bus->l1_size = (1 << bus->l1_bits);
+ bus->l2_size = (1 << bus->l2_bits);
+ bus->l1_bus_map = qemu_vmalloc(bus->l1_size * sizeof(void *));
+ memset(bus->l1_bus_map, 0, bus->l1_size * sizeof(void *));
+ bus->handler_opaque = handler_opaque;
+
+ /* Register default types */
+ bus_register_io_memory(bus, IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read,
+ unassigned_mem_write, NULL);
+ bus_register_io_memory(bus, IO_MEM_UNASSIGNED >> IO_MEM_SHIFT,
+ unassigned_mem_read, unassigned_mem_write, NULL);
+ bus_register_io_memory(bus, IO_MEM_NOTDIRTY >> IO_MEM_SHIFT,
+ error_mem_read, notdirty_mem_write, NULL);
+ bus->io_mem_nb = 5;
+
+ return bus;
+}
^ permalink raw reply [flat|nested] 20+ messages in thread
end of thread, other threads:[~2007-09-15 16:16 UTC | newest]
Thread overview: 20+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-08-14 19:48 [Qemu-devel] PATCH, RFC: Generic DMA framework Blue Swirl
2007-08-16 18:18 ` [Qemu-devel] " Blue Swirl
2007-08-16 19:58 ` malc
2007-08-19 17:46 ` Blue Swirl
2007-08-24 19:40 ` Blue Swirl
2007-08-24 20:18 ` Paul Brook
2007-08-24 23:33 ` Fabrice Bellard
2007-08-25 0:29 ` Paul Brook
2007-08-26 11:30 ` Fabrice Bellard
2007-08-26 17:54 ` Blue Swirl
2007-08-28 19:03 ` Blue Swirl
2007-08-28 19:43 ` Paul Brook
2007-08-29 17:00 ` Blue Swirl
2007-08-29 20:39 ` Paul Brook
2007-08-29 21:18 ` Paul Brook
2007-09-08 14:07 ` Blue Swirl
2007-09-08 14:31 ` Paul Brook
2007-09-08 14:53 ` Blue Swirl
2007-09-08 16:03 ` Paul Brook
2007-09-15 16:16 ` Blue Swirl
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).