qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: qemu-devel@nongnu.org, peter.maydell@linaro.org
Cc: Akihiko Odaki <akihiko.odaki@daynix.com>,
	Jason Wang <jasowang@redhat.com>
Subject: [PULL 03/50] hw/net/net_tx_pkt: Decouple interface from PCI
Date: Tue, 23 May 2023 15:31:51 +0800	[thread overview]
Message-ID: <20230523073238.54236-4-jasowang@redhat.com> (raw)
In-Reply-To: <20230523073238.54236-1-jasowang@redhat.com>

From: Akihiko Odaki <akihiko.odaki@daynix.com>

This allows to use the network packet abstractions even if PCI is not
used.

Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/net/e1000e_core.c | 13 ++++++++-----
 hw/net/igb_core.c    | 13 ++++++-------
 hw/net/net_tx_pkt.c  | 36 +++++++++++++-----------------------
 hw/net/net_tx_pkt.h  | 31 ++++++++++++++++++++-----------
 hw/net/vmxnet3.c     | 14 +++++++-------
 5 files changed, 54 insertions(+), 53 deletions(-)

diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
index cfa3f55..15821a7 100644
--- a/hw/net/e1000e_core.c
+++ b/hw/net/e1000e_core.c
@@ -746,7 +746,8 @@ e1000e_process_tx_desc(E1000ECore *core,
     addr = le64_to_cpu(dp->buffer_addr);
 
     if (!tx->skip_cp) {
-        if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) {
+        if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, core->owner,
+                                             addr, split_size)) {
             tx->skip_cp = true;
         }
     }
@@ -764,7 +765,7 @@ e1000e_process_tx_desc(E1000ECore *core,
         }
 
         tx->skip_cp = false;
-        net_tx_pkt_reset(tx->tx_pkt, core->owner);
+        net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner);
 
         tx->sum_needed = 0;
         tx->cptse = 0;
@@ -3421,7 +3422,7 @@ e1000e_core_pci_realize(E1000ECore     *core,
         qemu_add_vm_change_state_handler(e1000e_vm_state_change, core);
 
     for (i = 0; i < E1000E_NUM_QUEUES; i++) {
-        net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS);
+        net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS);
     }
 
     net_rx_pkt_init(&core->rx_pkt);
@@ -3446,7 +3447,8 @@ e1000e_core_pci_uninit(E1000ECore *core)
     qemu_del_vm_change_state_handler(core->vmstate);
 
     for (i = 0; i < E1000E_NUM_QUEUES; i++) {
-        net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner);
+        net_tx_pkt_reset(core->tx[i].tx_pkt,
+                         net_tx_pkt_unmap_frag_pci, core->owner);
         net_tx_pkt_uninit(core->tx[i].tx_pkt);
     }
 
@@ -3571,7 +3573,8 @@ static void e1000e_reset(E1000ECore *core, bool sw)
     e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
 
     for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
-        net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner);
+        net_tx_pkt_reset(core->tx[i].tx_pkt,
+                         net_tx_pkt_unmap_frag_pci, core->owner);
         memset(&core->tx[i].props, 0, sizeof(core->tx[i].props));
         core->tx[i].skip_cp = false;
     }
diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index 826e7a6..abfdce9 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -597,7 +597,8 @@ igb_process_tx_desc(IGBCore *core,
     length = cmd_type_len & 0xFFFF;
 
     if (!tx->skip_cp) {
-        if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, buffer_addr, length)) {
+        if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, dev,
+                                             buffer_addr, length)) {
             tx->skip_cp = true;
         }
     }
@@ -616,7 +617,7 @@ igb_process_tx_desc(IGBCore *core,
 
         tx->first = true;
         tx->skip_cp = false;
-        net_tx_pkt_reset(tx->tx_pkt, dev);
+        net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, dev);
     }
 }
 
@@ -842,8 +843,6 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
         d = core->owner;
     }
 
-    net_tx_pkt_reset(txr->tx->tx_pkt, d);
-
     while (!igb_ring_empty(core, txi)) {
         base = igb_ring_head_descr(core, txi);
 
@@ -861,6 +860,8 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
         core->mac[EICR] |= eic;
         igb_set_interrupt_cause(core, E1000_ICR_TXDW);
     }
+
+    net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, d);
 }
 
 static uint32_t
@@ -3954,7 +3955,7 @@ igb_core_pci_realize(IGBCore        *core,
     core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core);
 
     for (i = 0; i < IGB_NUM_QUEUES; i++) {
-        net_tx_pkt_init(&core->tx[i].tx_pkt, NULL, E1000E_MAX_TX_FRAGS);
+        net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS);
     }
 
     net_rx_pkt_init(&core->rx_pkt);
@@ -3979,7 +3980,6 @@ igb_core_pci_uninit(IGBCore *core)
     qemu_del_vm_change_state_handler(core->vmstate);
 
     for (i = 0; i < IGB_NUM_QUEUES; i++) {
-        net_tx_pkt_reset(core->tx[i].tx_pkt, NULL);
         net_tx_pkt_uninit(core->tx[i].tx_pkt);
     }
 
@@ -4158,7 +4158,6 @@ static void igb_reset(IGBCore *core, bool sw)
 
     for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
         tx = &core->tx[i];
-        net_tx_pkt_reset(tx->tx_pkt, NULL);
         memset(tx->ctx, 0, sizeof(tx->ctx));
         tx->first = true;
         tx->skip_cp = false;
diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c
index aca12ff..cc36750 100644
--- a/hw/net/net_tx_pkt.c
+++ b/hw/net/net_tx_pkt.c
@@ -16,12 +16,12 @@
  */
 
 #include "qemu/osdep.h"
-#include "net_tx_pkt.h"
 #include "net/eth.h"
 #include "net/checksum.h"
 #include "net/tap.h"
 #include "net/net.h"
 #include "hw/pci/pci_device.h"
+#include "net_tx_pkt.h"
 
 enum {
     NET_TX_PKT_VHDR_FRAG = 0,
@@ -32,8 +32,6 @@ enum {
 
 /* TX packet private context */
 struct NetTxPkt {
-    PCIDevice *pci_dev;
-
     struct virtio_net_hdr virt_hdr;
 
     struct iovec *raw;
@@ -59,13 +57,10 @@ struct NetTxPkt {
     uint8_t l4proto;
 };
 
-void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
-    uint32_t max_frags)
+void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags)
 {
     struct NetTxPkt *p = g_malloc0(sizeof *p);
 
-    p->pci_dev = pci_dev;
-
     p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
 
     p->raw = g_new(struct iovec, max_frags);
@@ -384,8 +379,7 @@ void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt,
     }
 }
 
-static bool net_tx_pkt_add_raw_fragment_common(struct NetTxPkt *pkt,
-                                               void *base, size_t len)
+bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len)
 {
     struct iovec *ventry;
     assert(pkt);
@@ -433,7 +427,8 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt)
 #endif
 }
 
-void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev)
+void net_tx_pkt_reset(struct NetTxPkt *pkt,
+                      NetTxPktFreeFrag callback, void *context)
 {
     int i;
 
@@ -453,12 +448,9 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev)
         assert(pkt->raw);
         for (i = 0; i < pkt->raw_frags; i++) {
             assert(pkt->raw[i].iov_base);
-            net_tx_pkt_unmap_frag_pci(pkt->pci_dev,
-                                      pkt->raw[i].iov_base,
-                                      pkt->raw[i].iov_len);
+            callback(context, pkt->raw[i].iov_base, pkt->raw[i].iov_len);
         }
     }
-    pkt->pci_dev = pci_dev;
     pkt->raw_frags = 0;
 
     pkt->hdr_len = 0;
@@ -470,19 +462,17 @@ void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len)
     pci_dma_unmap(context, base, len, DMA_DIRECTION_TO_DEVICE, 0);
 }
 
-bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
-    size_t len)
+bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev,
+                                     dma_addr_t pa, size_t len)
 {
     dma_addr_t mapped_len = len;
-    void *base = pci_dma_map(pkt->pci_dev, pa, &mapped_len,
-                             DMA_DIRECTION_TO_DEVICE);
+    void *base = pci_dma_map(pci_dev, pa, &mapped_len, DMA_DIRECTION_TO_DEVICE);
     if (!base) {
         return false;
     }
 
-    if (mapped_len != len ||
-        !net_tx_pkt_add_raw_fragment_common(pkt, base, len)) {
-        net_tx_pkt_unmap_frag_pci(pkt->pci_dev, base, mapped_len);
+    if (mapped_len != len || !net_tx_pkt_add_raw_fragment(pkt, base, len)) {
+        net_tx_pkt_unmap_frag_pci(pci_dev, base, mapped_len);
         return false;
     }
 
@@ -710,7 +700,7 @@ static void net_tx_pkt_udp_fragment_fix(struct NetTxPkt *pkt,
 }
 
 static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt,
-                                           NetTxPktCallback callback,
+                                           NetTxPktSend callback,
                                            void *context)
 {
     uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
@@ -807,7 +797,7 @@ bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc)
 }
 
 bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload,
-                            NetTxPktCallback callback, void *context)
+                            NetTxPktSend callback, void *context)
 {
     assert(pkt);
 
diff --git a/hw/net/net_tx_pkt.h b/hw/net/net_tx_pkt.h
index 5eb123e..4d7233e 100644
--- a/hw/net/net_tx_pkt.h
+++ b/hw/net/net_tx_pkt.h
@@ -26,17 +26,16 @@
 
 struct NetTxPkt;
 
-typedef void (* NetTxPktCallback)(void *, const struct iovec *, int, const struct iovec *, int);
+typedef void (*NetTxPktFreeFrag)(void *, void *, size_t);
+typedef void (*NetTxPktSend)(void *, const struct iovec *, int, const struct iovec *, int);
 
 /**
  * Init function for tx packet functionality
  *
  * @pkt:            packet pointer
- * @pci_dev:        PCI device processing this packet
  * @max_frags:      max tx ip fragments
  */
-void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
-    uint32_t max_frags);
+void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags);
 
 /**
  * Clean all tx packet resources.
@@ -95,12 +94,11 @@ net_tx_pkt_setup_vlan_header(struct NetTxPkt *pkt, uint16_t vlan)
  * populate data fragment into pkt context.
  *
  * @pkt:            packet
- * @pa:             physical address of fragment
+ * @base:           pointer to fragment
  * @len:            length of fragment
  *
  */
-bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
-    size_t len);
+bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len);
 
 /**
  * Fix ip header fields and calculate IP header and pseudo header checksums.
@@ -148,10 +146,11 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt);
  * reset tx packet private context (needed to be called between packets)
  *
  * @pkt:            packet
- * @dev:            PCI device processing the next packet
- *
+ * @callback:       function to free the fragments
+ * @context:        pointer to be passed to the callback
  */
-void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *dev);
+void net_tx_pkt_reset(struct NetTxPkt *pkt,
+                      NetTxPktFreeFrag callback, void *context);
 
 /**
  * Unmap a fragment mapped from a PCI device.
@@ -163,6 +162,16 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *dev);
 void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len);
 
 /**
+ * map data fragment from PCI device and populate it into pkt context.
+ *
+ * @pci_dev:        PCI device owning fragment
+ * @pa:             physical address of fragment
+ * @len:            length of fragment
+ */
+bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev,
+                                     dma_addr_t pa, size_t len);
+
+/**
  * Send packet to qemu. handles sw offloads if vhdr is not supported.
  *
  * @pkt:            packet
@@ -182,7 +191,7 @@ bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc);
  * @ret:            operation result
  */
 bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload,
-                            NetTxPktCallback callback, void *context);
+                            NetTxPktSend callback, void *context);
 
 /**
  * parse raw packet data and analyze offload requirements.
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index f7b874c..9acff31 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -651,9 +651,8 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx)
             data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE;
             data_pa = txd.addr;
 
-            if (!net_tx_pkt_add_raw_fragment(s->tx_pkt,
-                                                data_pa,
-                                                data_len)) {
+            if (!net_tx_pkt_add_raw_fragment_pci(s->tx_pkt, PCI_DEVICE(s),
+                                                 data_pa, data_len)) {
                 s->skip_current_tx_pkt = true;
             }
         }
@@ -678,7 +677,8 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx)
             vmxnet3_complete_packet(s, qidx, txd_idx);
             s->tx_sop = true;
             s->skip_current_tx_pkt = false;
-            net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s));
+            net_tx_pkt_reset(s->tx_pkt,
+                             net_tx_pkt_unmap_frag_pci, PCI_DEVICE(s));
         }
     }
 }
@@ -1159,7 +1159,7 @@ static void vmxnet3_deactivate_device(VMXNET3State *s)
 {
     if (s->device_active) {
         VMW_CBPRN("Deactivating vmxnet3...");
-        net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s));
+        net_tx_pkt_reset(s->tx_pkt, net_tx_pkt_unmap_frag_pci, PCI_DEVICE(s));
         net_tx_pkt_uninit(s->tx_pkt);
         net_rx_pkt_uninit(s->rx_pkt);
         s->device_active = false;
@@ -1519,7 +1519,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
 
     /* Preallocate TX packet wrapper */
     VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags);
-    net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags);
+    net_tx_pkt_init(&s->tx_pkt, s->max_tx_frags);
     net_rx_pkt_init(&s->rx_pkt);
 
     /* Read rings memory locations for RX queues */
@@ -2399,7 +2399,7 @@ static int vmxnet3_post_load(void *opaque, int version_id)
 {
     VMXNET3State *s = opaque;
 
-    net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags);
+    net_tx_pkt_init(&s->tx_pkt, s->max_tx_frags);
     net_rx_pkt_init(&s->rx_pkt);
 
     if (s->msix_used) {
-- 
2.7.4



  parent reply	other threads:[~2023-05-23  7:36 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-23  7:31 [PULL 00/50] Net patches Jason Wang
2023-05-23  7:31 ` [PULL 01/50] e1000e: Fix tx/rx counters Jason Wang
2023-05-23  7:31 ` [PULL 02/50] hw/net/net_tx_pkt: Decouple implementation from PCI Jason Wang
2023-05-23  7:31 ` Jason Wang [this message]
2023-05-23  7:31 ` [PULL 04/50] e1000x: Fix BPRC and MPRC Jason Wang
2023-05-23  7:31 ` [PULL 05/50] igb: Fix Rx packet type encoding Jason Wang
2023-05-23  7:31 ` [PULL 06/50] igb: Do not require CTRL.VME for tx VLAN tagging Jason Wang
2023-05-23  7:31 ` [PULL 07/50] igb: Clear IMS bits when committing ICR access Jason Wang
2023-05-23  7:31 ` [PULL 08/50] net/net_rx_pkt: Use iovec for net_rx_pkt_set_protocols() Jason Wang
2023-05-23  7:31 ` [PULL 09/50] e1000e: Always copy ethernet header Jason Wang
2023-05-23  7:31 ` [PULL 10/50] igb: " Jason Wang
2023-05-23  7:31 ` [PULL 11/50] Fix references to igb Avocado test Jason Wang
2023-05-23  7:32 ` [PULL 12/50] tests/avocado: Remove unused imports Jason Wang
2023-05-23  7:32 ` [PULL 13/50] tests/avocado: Remove test_igb_nomsi_kvm Jason Wang
2023-05-23  7:32 ` [PULL 14/50] hw/net/net_tx_pkt: Remove net_rx_pkt_get_l4_info Jason Wang
2023-05-23  7:32 ` [PULL 15/50] net/eth: Rename eth_setup_vlan_headers_ex Jason Wang
2023-05-23  7:32 ` [PULL 16/50] e1000x: Share more Rx filtering logic Jason Wang
2023-05-23  7:32 ` [PULL 17/50] e1000x: Take CRC into consideration for size check Jason Wang
2023-05-23  7:32 ` [PULL 18/50] e1000x: Rename TcpIpv6 into TcpIpv6Ex Jason Wang
2023-05-23  7:32 ` [PULL 19/50] e1000e: Always log status after building rx metadata Jason Wang
2023-05-23  7:32 ` [PULL 20/50] igb: " Jason Wang
2023-05-23  7:32 ` [PULL 21/50] igb: Remove goto Jason Wang
2023-05-23  7:32 ` [PULL 22/50] igb: Read DCMD.VLE of the first Tx descriptor Jason Wang
2023-05-23  7:32 ` [PULL 23/50] e1000e: Reset packet state after emptying Tx queue Jason Wang
2023-05-23  7:32 ` [PULL 24/50] vmxnet3: " Jason Wang
2023-05-23  7:32 ` [PULL 25/50] igb: Add more definitions for Tx descriptor Jason Wang
2023-05-23  7:32 ` [PULL 26/50] igb: Share common VF constants Jason Wang
2023-05-23  7:32 ` [PULL 27/50] igb: Fix igb_mac_reg_init coding style alignment Jason Wang
2023-05-23  7:32 ` [PULL 28/50] igb: Clear EICR bits for delayed MSI-X interrupts Jason Wang
2023-05-23  7:32 ` [PULL 29/50] e1000e: Rename a variable in e1000e_receive_internal() Jason Wang
2023-05-23  7:32 ` [PULL 30/50] igb: Rename a variable in igb_receive_internal() Jason Wang
2023-05-23  7:32 ` [PULL 31/50] net/eth: Use void pointers Jason Wang
2023-05-23  7:32 ` [PULL 32/50] net/eth: Always add VLAN tag Jason Wang
2023-05-23  7:32 ` [PULL 33/50] hw/net/net_rx_pkt: Enforce alignment for eth_header Jason Wang
2023-05-23  7:32 ` [PULL 34/50] tests/qtest/libqos/igb: Set GPIE.Multiple_MSIX Jason Wang
2023-05-23  7:32 ` [PULL 35/50] igb: Implement MSI-X single vector mode Jason Wang
2023-05-23  7:32 ` [PULL 36/50] igb: Use UDP for RSS hash Jason Wang
2023-05-23  7:32 ` [PULL 37/50] igb: Implement Rx SCTP CSO Jason Wang
2023-05-23  7:32 ` [PULL 38/50] igb: Implement Tx " Jason Wang
2023-05-23  7:32 ` [PULL 39/50] igb: Strip the second VLAN tag for extended VLAN Jason Wang
2023-05-23  7:32 ` [PULL 40/50] igb: Filter with " Jason Wang
2023-05-23  7:32 ` [PULL 41/50] igb: Implement igb-specific oversize check Jason Wang
2023-05-23  7:32 ` [PULL 42/50] igb: Implement Rx PTP2 timestamp Jason Wang
2023-05-23  7:32 ` [PULL 43/50] igb: Implement Tx timestamp Jason Wang
2023-05-23  7:32 ` [PULL 44/50] e1000e: Notify only new interrupts Jason Wang
2023-05-23  7:32 ` [PULL 45/50] igb: " Jason Wang
2023-05-23  7:32 ` [PULL 46/50] igb: Clear-on-read ICR when ICR.INTA is set Jason Wang
2023-05-23  7:32 ` [PULL 47/50] vmxnet3: Do not depend on PC Jason Wang
2023-05-23  7:32 ` [PULL 48/50] MAINTAINERS: Add a reviewer for network packet abstractions Jason Wang
2023-05-23  7:32 ` [PULL 49/50] docs/system/devices/igb: Note igb is tested for DPDK Jason Wang
2023-05-23  7:32 ` [PULL 50/50] rtl8139: fix large_send_mss divide-by-zero Jason Wang
2023-05-23 17:56 ` [PULL 00/50] Net patches Richard Henderson
2023-05-23 19:53 ` Michael Tokarev
2023-05-24  4:06   ` Jason Wang
2023-05-24  4:21     ` Akihiko Odaki

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230523073238.54236-4-jasowang@redhat.com \
    --to=jasowang@redhat.com \
    --cc=akihiko.odaki@daynix.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).