From: "Blue Swirl" <blauwirbel@gmail.com>
To: qemu-devel <qemu-devel@nongnu.org>
Subject: [Qemu-devel] PATCH, RFC: Generic DMA framework
Date: Tue, 14 Aug 2007 22:48:05 +0300 [thread overview]
Message-ID: <f43fc5580708141248l3ea425e9q5f3851f5096f1dee@mail.gmail.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 215 bytes --]
Hi,
The first patch implements a simple generic DMA framework. The next
patches convert first Sparc32 IOMMU and then ESP and Lance.
Would the framework need any changes to support other targets? Comments welcome.
[-- Attachment #2: gdma.diff --]
[-- Type: text/x-diff, Size: 1635 bytes --]
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-14 19:25:52.000000000 +0000
+++ qemu/vl.h 2007-08-14 19:26:34.000000000 +0000
@@ -734,6 +734,50 @@
#include "hw/irq.h"
+/* Generic DMA API */
+typedef void (*qemu_dma_handler)(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write);
+
+typedef struct QEMUDMAState {
+ qemu_dma_handler handler;
+ void *opaque;
+} qemu_dma;
+
+static inline void dma_memory_read(qemu_dma *dma_opaque,
+ target_phys_addr_t addr,
+ uint8_t *buf, int len)
+{
+ if (dma_opaque)
+ dma_opaque->handler(dma_opaque->opaque, addr, buf, len, 0);
+}
+
+static inline void dma_memory_write(qemu_dma *dma_opaque,
+ target_phys_addr_t addr,
+ uint8_t *buf, int len)
+{
+ if (dma_opaque)
+ dma_opaque->handler(dma_opaque->opaque, addr, buf, len, 1);
+}
+
+static inline void dma_memory_rw(qemu_dma *dma_opaque,
+ target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ if (dma_opaque)
+ dma_opaque->handler(dma_opaque->opaque, addr, buf, len, is_write);
+}
+
+static inline qemu_dma *qemu_init_dma(qemu_dma_handler handler, void *opaque)
+{
+ qemu_dma *s;
+
+ s = (qemu_dma *)qemu_mallocz(sizeof(qemu_dma));
+ s->handler = handler;
+ s->opaque = opaque;
+
+ return s;
+}
+
/* ISA bus */
extern target_phys_addr_t isa_mem_base;
[-- Attachment #3: sparc_gdma.diff --]
[-- Type: text/x-diff, Size: 3656 bytes --]
Index: qemu/hw/sun4m.c
===================================================================
--- qemu.orig/hw/sun4m.c 2007-08-14 19:26:56.000000000 +0000
+++ qemu/hw/sun4m.c 2007-08-14 19:37:20.000000000 +0000
@@ -289,6 +289,12 @@
slavio_set_power_fail(slavio_misc, 1);
}
+static void sparc_dma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
+{
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
static void main_cpu_reset(void *opaque)
{
CPUState *env = opaque;
@@ -315,6 +321,7 @@
const sparc_def_t *def;
qemu_irq *cpu_irqs[MAX_CPUS], *slavio_irq, *slavio_cpu_irq,
*espdma_irq, *ledma_irq;
+ qemu_dma *physical_dma, *dvma;
/* init CPUs */
sparc_find_by_name(cpu_model, &def);
@@ -343,7 +350,10 @@
/* allocate RAM */
cpu_register_physical_memory(0, RAM_size, 0);
- iommu = iommu_init(hwdef->iommu_base);
+ physical_dma = qemu_init_dma(sparc_dma_memory_rw, NULL);
+
+ iommu = iommu_init(hwdef->iommu_base, physical_dma, &dvma);
+
slavio_intctl = slavio_intctl_init(hwdef->intctl_base,
hwdef->intctl_base + 0x10000ULL,
&hwdef->intbit_to_level[0],
Index: qemu/hw/iommu.c
===================================================================
--- qemu.orig/hw/iommu.c 2007-08-14 19:27:35.000000000 +0000
+++ qemu/hw/iommu.c 2007-08-14 19:36:33.000000000 +0000
@@ -104,6 +104,7 @@
target_phys_addr_t addr;
uint32_t regs[IOMMU_NREGS];
target_phys_addr_t iostart;
+ qemu_dma *parent_dma;
} IOMMUState;
static uint32_t iommu_mem_readw(void *opaque, target_phys_addr_t addr)
@@ -245,6 +246,7 @@
void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int is_write)
{
+ IOMMUState *s = opaque;
int l;
uint32_t flags;
target_phys_addr_t page, phys_addr;
@@ -265,10 +267,8 @@
iommu_bad_addr(opaque, page, is_write);
return;
}
- cpu_physical_memory_write(phys_addr, buf, len);
- } else {
- cpu_physical_memory_read(phys_addr, buf, len);
}
+ dma_memory_rw(s->parent_dma, phys_addr, buf, len, is_write);
len -= l;
buf += l;
addr += l;
@@ -309,7 +309,8 @@
s->regs[IOMMU_CTRL] = IOMMU_VERSION;
}
-void *iommu_init(target_phys_addr_t addr)
+void *iommu_init(target_phys_addr_t addr, qemu_dma *parent_dma,
+ qemu_dma **dvma)
{
IOMMUState *s;
int iommu_io_memory;
@@ -322,7 +323,10 @@
iommu_io_memory = cpu_register_io_memory(0, iommu_mem_read, iommu_mem_write, s);
cpu_register_physical_memory(addr, IOMMU_NREGS * 4, iommu_io_memory);
-
+
+ s->parent_dma = parent_dma;
+ *dvma = qemu_init_dma(sparc_iommu_memory_rw, s);
+
register_savevm("iommu", addr, 2, iommu_save, iommu_load, s);
qemu_register_reset(iommu_reset, s);
return s;
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-14 19:27:35.000000000 +0000
+++ qemu/vl.h 2007-08-14 19:36:54.000000000 +0000
@@ -1263,7 +1263,8 @@
extern QEMUMachine ss5_machine, ss10_machine;
/* iommu.c */
-void *iommu_init(target_phys_addr_t addr);
+void *iommu_init(target_phys_addr_t addr, qemu_dma *parent_dma,
+ qemu_dma **dvma);
void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int is_write);
static inline void sparc_iommu_memory_read(void *opaque,
[-- Attachment #4: sparc32_dma_esp_le_to_gdma.diff --]
[-- Type: text/x-diff, Size: 13719 bytes --]
Index: qemu/hw/sparc32_dma.c
===================================================================
--- qemu.orig/hw/sparc32_dma.c 2007-08-14 19:25:42.000000000 +0000
+++ qemu/hw/sparc32_dma.c 2007-08-14 19:39:11.000000000 +0000
@@ -58,63 +58,12 @@
struct DMAState {
uint32_t dmaregs[DMA_REGS];
qemu_irq irq;
- void *iommu, *dev_opaque;
+ void *dev_opaque;
void (*dev_reset)(void *dev_opaque);
qemu_irq *pic;
+ qemu_dma *dma;
};
-/* Note: on sparc, the lance 16 bit bus is swapped */
-void ledma_memory_read(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap)
-{
- DMAState *s = opaque;
- int i;
-
- DPRINTF("DMA write, direction: %c, addr 0x%8.8x\n",
- s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- addr |= s->dmaregs[3];
- if (do_bswap) {
- sparc_iommu_memory_read(s->iommu, addr, buf, len);
- } else {
- addr &= ~1;
- len &= ~1;
- sparc_iommu_memory_read(s->iommu, addr, buf, len);
- for(i = 0; i < len; i += 2) {
- bswap16s((uint16_t *)(buf + i));
- }
- }
-}
-
-void ledma_memory_write(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap)
-{
- DMAState *s = opaque;
- int l, i;
- uint16_t tmp_buf[32];
-
- DPRINTF("DMA read, direction: %c, addr 0x%8.8x\n",
- s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- addr |= s->dmaregs[3];
- if (do_bswap) {
- sparc_iommu_memory_write(s->iommu, addr, buf, len);
- } else {
- addr &= ~1;
- len &= ~1;
- while (len > 0) {
- l = len;
- if (l > sizeof(tmp_buf))
- l = sizeof(tmp_buf);
- for(i = 0; i < l; i += 2) {
- tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i));
- }
- sparc_iommu_memory_write(s->iommu, addr, (uint8_t *)tmp_buf, l);
- len -= l;
- buf += l;
- addr += l;
- }
- }
-}
-
static void dma_set_irq(void *opaque, int irq, int level)
{
DMAState *s = opaque;
@@ -129,24 +78,29 @@
}
}
-void espdma_memory_read(void *opaque, uint8_t *buf, int len)
+static void ledma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
{
DMAState *s = opaque;
- DPRINTF("DMA read, direction: %c, addr 0x%8.8x\n",
- s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- sparc_iommu_memory_read(s->iommu, s->dmaregs[1], buf, len);
- s->dmaregs[0] |= DMA_INTR;
- s->dmaregs[1] += len;
+ DPRINTF("DMA %s, direction: %c, addr 0x%8.8x\n",
+ is_write ? "write" : "read",
+ s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', addr);
+ addr |= s->dmaregs[3];
+ addr &= ~1;
+ len &= ~1;
+ dma_memory_rw(s->dma, addr, buf, len, is_write);
}
-void espdma_memory_write(void *opaque, uint8_t *buf, int len)
+static void espdma_memory_rw(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int is_write)
{
DMAState *s = opaque;
- DPRINTF("DMA write, direction: %c, addr 0x%8.8x\n",
+ DPRINTF("DMA %s, direction: %c, addr 0x%8.8x\n",
+ is_write ? "write" : "read",
s->dmaregs[0] & DMA_WRITE_MEM ? 'w': 'r', s->dmaregs[1]);
- sparc_iommu_memory_write(s->iommu, s->dmaregs[1], buf, len);
+ dma_memory_rw(s->dma, s->dmaregs[1], buf, len, is_write);
s->dmaregs[0] |= DMA_INTR;
s->dmaregs[1] += len;
}
@@ -238,7 +192,8 @@
}
void *sparc32_dma_init(target_phys_addr_t daddr, qemu_irq parent_irq,
- void *iommu, qemu_irq **dev_irq)
+ qemu_irq **dev_irq, qemu_dma *parent_dma,
+ qemu_dma **dev_dma, int is_espdma)
{
DMAState *s;
int dma_io_memory;
@@ -248,7 +203,7 @@
return NULL;
s->irq = parent_irq;
- s->iommu = iommu;
+ s->dma = parent_dma;
dma_io_memory = cpu_register_io_memory(0, dma_mem_read, dma_mem_write, s);
cpu_register_physical_memory(daddr, DMA_SIZE, dma_io_memory);
@@ -257,6 +212,11 @@
qemu_register_reset(dma_reset, s);
*dev_irq = qemu_allocate_irqs(dma_set_irq, s, 1);
+ if (is_espdma)
+ *dev_dma = qemu_init_dma(espdma_memory_rw, s);
+ else
+ *dev_dma = qemu_init_dma(ledma_memory_rw, s);
+
return s;
}
Index: qemu/hw/sun4m.c
===================================================================
--- qemu.orig/hw/sun4m.c 2007-08-14 19:37:20.000000000 +0000
+++ qemu/hw/sun4m.c 2007-08-14 19:39:11.000000000 +0000
@@ -321,7 +321,7 @@
const sparc_def_t *def;
qemu_irq *cpu_irqs[MAX_CPUS], *slavio_irq, *slavio_cpu_irq,
*espdma_irq, *ledma_irq;
- qemu_dma *physical_dma, *dvma;
+ qemu_dma *physical_dma, *dvma, *esp_dvma, *le_dvma;
/* init CPUs */
sparc_find_by_name(cpu_model, &def);
@@ -362,9 +362,11 @@
hwdef->clock_irq);
espdma = sparc32_dma_init(hwdef->dma_base, slavio_irq[hwdef->esp_irq],
- iommu, &espdma_irq);
+ &espdma_irq, dvma, &esp_dvma, 1);
+
ledma = sparc32_dma_init(hwdef->dma_base + 16ULL,
- slavio_irq[hwdef->le_irq], iommu, &ledma_irq);
+ slavio_irq[hwdef->le_irq], &ledma_irq, dvma,
+ &le_dvma, 0);
if (graphic_depth != 8 && graphic_depth != 24) {
fprintf(stderr, "qemu: Unsupported depth: %d\n", graphic_depth);
@@ -375,7 +377,7 @@
if (nd_table[0].model == NULL
|| strcmp(nd_table[0].model, "lance") == 0) {
- lance_init(&nd_table[0], hwdef->le_base, ledma, *ledma_irq);
+ lance_init(&nd_table[0], hwdef->le_base, ledma, *ledma_irq, le_dvma);
} else if (strcmp(nd_table[0].model, "?") == 0) {
fprintf(stderr, "qemu: Supported NICs: lance\n");
exit (1);
@@ -399,7 +401,9 @@
slavio_serial_init(hwdef->serial_base, slavio_irq[hwdef->ser_irq],
serial_hds[1], serial_hds[0]);
fdctrl_init(slavio_irq[hwdef->fd_irq], 0, 1, hwdef->fd_base, fd_table);
- main_esp = esp_init(bs_table, hwdef->esp_base, espdma, *espdma_irq);
+
+ main_esp = esp_init(bs_table, hwdef->esp_base, espdma, *espdma_irq,
+ esp_dvma);
for (i = 0; i < MAX_DISKS; i++) {
if (bs_table[i]) {
Index: qemu/vl.h
===================================================================
--- qemu.orig/vl.h 2007-08-14 19:36:54.000000000 +0000
+++ qemu/vl.h 2007-08-14 19:39:45.000000000 +0000
@@ -1098,7 +1098,7 @@
void pci_pcnet_init(PCIBus *bus, NICInfo *nd, int devfn);
void lance_init(NICInfo *nd, target_phys_addr_t leaddr, void *dma_opaque,
- qemu_irq irq);
+ qemu_irq irq, qemu_dma *parent_dma);
/* vmmouse.c */
void *vmmouse_init(void *m);
@@ -1265,21 +1265,6 @@
/* iommu.c */
void *iommu_init(target_phys_addr_t addr, qemu_dma *parent_dma,
qemu_dma **dvma);
-void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int is_write);
-static inline void sparc_iommu_memory_read(void *opaque,
- target_phys_addr_t addr,
- uint8_t *buf, int len)
-{
- sparc_iommu_memory_rw(opaque, addr, buf, len, 0);
-}
-
-static inline void sparc_iommu_memory_write(void *opaque,
- target_phys_addr_t addr,
- uint8_t *buf, int len)
-{
- sparc_iommu_memory_rw(opaque, addr, buf, len, 1);
-}
/* tcx.c */
void tcx_init(DisplayState *ds, target_phys_addr_t addr, uint8_t *vram_base,
@@ -1318,17 +1303,12 @@
/* esp.c */
void esp_scsi_attach(void *opaque, BlockDriverState *bd, int id);
void *esp_init(BlockDriverState **bd, target_phys_addr_t espaddr,
- void *dma_opaque, qemu_irq irq);
+ void *dma_opaque, qemu_irq irq, qemu_dma *parent_dma);
/* sparc32_dma.c */
void *sparc32_dma_init(target_phys_addr_t daddr, qemu_irq parent_irq,
- void *iommu, qemu_irq **dev_irq);
-void ledma_memory_read(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap);
-void ledma_memory_write(void *opaque, target_phys_addr_t addr,
- uint8_t *buf, int len, int do_bswap);
-void espdma_memory_read(void *opaque, uint8_t *buf, int len);
-void espdma_memory_write(void *opaque, uint8_t *buf, int len);
+ qemu_irq **dev_irq, qemu_dma *parent_dma,
+ qemu_dma **dev_dma, int is_espdma);
void sparc32_dma_set_reset_data(void *opaque, void (*dev_reset)(void *opaque),
void *dev_opaque);
Index: qemu/hw/esp.c
===================================================================
--- qemu.orig/hw/esp.c 2007-08-14 19:25:42.000000000 +0000
+++ qemu/hw/esp.c 2007-08-14 19:39:11.000000000 +0000
@@ -52,6 +52,7 @@
struct ESPState {
qemu_irq irq;
+ qemu_dma *parent_dma;
BlockDriverState **bd;
uint8_t rregs[ESP_REGS];
uint8_t wregs[ESP_REGS];
@@ -73,7 +74,6 @@
uint32_t dma_counter;
uint8_t *async_buf;
uint32_t async_len;
- void *dma_opaque;
};
#define STAT_DO 0x00
@@ -105,7 +105,7 @@
target = s->wregs[4] & 7;
DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
if (s->dma) {
- espdma_memory_read(s->dma_opaque, buf, dmalen);
+ dma_memory_read(s->parent_dma, 0, buf, dmalen);
} else {
buf[0] = 0;
memcpy(&buf[1], s->ti_buf, dmalen);
@@ -189,7 +189,7 @@
s->ti_buf[0] = s->sense;
s->ti_buf[1] = 0;
if (s->dma) {
- espdma_memory_write(s->dma_opaque, s->ti_buf, 2);
+ dma_memory_write(s->parent_dma, 0, s->ti_buf, 2);
s->rregs[4] = STAT_IN | STAT_TC | STAT_ST;
s->rregs[5] = INTR_BS | INTR_FC;
s->rregs[6] = SEQ_CD;
@@ -222,7 +222,7 @@
len = s->dma_left;
if (s->do_cmd) {
DPRINTF("command len %d + %d\n", s->cmdlen, len);
- espdma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
+ dma_memory_read(s->parent_dma, 0, &s->cmdbuf[s->cmdlen], len);
s->ti_size = 0;
s->cmdlen = 0;
s->do_cmd = 0;
@@ -237,9 +237,9 @@
len = s->async_len;
}
if (to_device) {
- espdma_memory_read(s->dma_opaque, s->async_buf, len);
+ dma_memory_read(s->parent_dma, 0, s->async_buf, len);
} else {
- espdma_memory_write(s->dma_opaque, s->async_buf, len);
+ dma_memory_write(s->parent_dma, 0, s->async_buf, len);
}
s->dma_left -= len;
s->async_buf += len;
@@ -569,7 +569,7 @@
}
void *esp_init(BlockDriverState **bd, target_phys_addr_t espaddr,
- void *dma_opaque, qemu_irq irq)
+ void *dma_opaque, qemu_irq irq, qemu_dma *parent_dma)
{
ESPState *s;
int esp_io_memory;
@@ -580,7 +580,7 @@
s->bd = bd;
s->irq = irq;
- s->dma_opaque = dma_opaque;
+ s->parent_dma = parent_dma;
sparc32_dma_set_reset_data(dma_opaque, esp_reset, s);
esp_io_memory = cpu_register_io_memory(0, esp_mem_read, esp_mem_write, s);
Index: qemu/hw/pcnet.c
===================================================================
--- qemu.orig/hw/pcnet.c 2007-08-14 19:25:42.000000000 +0000
+++ qemu/hw/pcnet.c 2007-08-14 19:39:11.000000000 +0000
@@ -75,6 +75,7 @@
void (*phys_mem_write)(void *dma_opaque, target_phys_addr_t addr,
uint8_t *buf, int len, int do_bswap);
void *dma_opaque;
+ qemu_dma *parent_dma;
};
struct qemu_ether_header {
@@ -2011,6 +2012,46 @@
#if defined (TARGET_SPARC) && !defined(TARGET_SPARC64) // Avoid compile failure
+/* Note: on sparc, the lance 16 bit bus is swapped */
+static void ledma_memory_read(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int do_bswap)
+{
+ int i;
+
+ if (do_bswap) {
+ dma_memory_read(opaque, addr, buf, len);
+ } else {
+ dma_memory_read(opaque, addr, buf, len);
+ for(i = 0; i < len; i += 2) {
+ bswap16s((uint16_t *)(buf + i));
+ }
+ }
+}
+
+static void ledma_memory_write(void *opaque, target_phys_addr_t addr,
+ uint8_t *buf, int len, int do_bswap)
+{
+ int l, i;
+ uint16_t tmp_buf[32];
+
+ if (do_bswap) {
+ dma_memory_write(opaque, addr, buf, len);
+ } else {
+ while (len > 0) {
+ l = len;
+ if (l > sizeof(tmp_buf))
+ l = sizeof(tmp_buf);
+ for(i = 0; i < l; i += 2) {
+ tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i));
+ }
+ dma_memory_write(opaque, addr, (uint8_t *)tmp_buf, l);
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+ }
+}
+
static void lance_mem_writew(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
@@ -2047,7 +2088,7 @@
};
void lance_init(NICInfo *nd, target_phys_addr_t leaddr, void *dma_opaque,
- qemu_irq irq)
+ qemu_irq irq, qemu_dma *parent_dma)
{
PCNetState *d;
int lance_io_memory;
@@ -2059,7 +2100,7 @@
lance_io_memory =
cpu_register_io_memory(0, lance_mem_read, lance_mem_write, d);
- d->dma_opaque = dma_opaque;
+ d->dma_opaque = parent_dma;
sparc32_dma_set_reset_data(dma_opaque, pcnet_h_reset, d);
cpu_register_physical_memory(leaddr, 4, lance_io_memory);
next reply other threads:[~2007-08-14 19:48 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-08-14 19:48 Blue Swirl [this message]
2007-08-16 18:18 ` [Qemu-devel] Re: PATCH, RFC: Generic DMA framework Blue Swirl
2007-08-16 19:58 ` malc
2007-08-19 17:46 ` Blue Swirl
2007-08-24 19:40 ` Blue Swirl
2007-08-24 20:18 ` Paul Brook
2007-08-24 23:33 ` Fabrice Bellard
2007-08-25 0:29 ` Paul Brook
2007-08-26 11:30 ` Fabrice Bellard
2007-08-26 17:54 ` Blue Swirl
2007-08-28 19:03 ` Blue Swirl
2007-08-28 19:43 ` Paul Brook
2007-08-29 17:00 ` Blue Swirl
2007-08-29 20:39 ` Paul Brook
2007-08-29 21:18 ` Paul Brook
2007-09-08 14:07 ` Blue Swirl
2007-09-08 14:31 ` Paul Brook
2007-09-08 14:53 ` Blue Swirl
2007-09-08 16:03 ` Paul Brook
2007-09-15 16:16 ` Blue Swirl
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=f43fc5580708141248l3ea425e9q5f3851f5096f1dee@mail.gmail.com \
--to=blauwirbel@gmail.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).