qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: mark.cave-ayland@ilande.co.uk, atar4qemu@gmail.com
Subject: [PATCH 49/85] target/sparc: Split out fp ldst functions with asi precomputed
Date: Fri, 13 Oct 2023 14:28:10 -0700	[thread overview]
Message-ID: <20231013212846.165724-50-richard.henderson@linaro.org> (raw)
In-Reply-To: <20231013212846.165724-1-richard.henderson@linaro.org>

Take the operation size from the MemOp instead of a
separate parameter.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/sparc/translate.c | 136 ++++++++++++++++++++++-----------------
 1 file changed, 78 insertions(+), 58 deletions(-)

diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 02030bd99b..973b864ca0 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -2214,35 +2214,40 @@ static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
     }
 }
 
-static void __attribute__((unused))
-gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
+static void gen_ldf_asi0(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
 {
-    DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
+    MemOp memop = da->memop;
+    MemOp size = memop & MO_SIZE;
     TCGv_i32 d32;
     TCGv_i64 d64;
 
-    switch (da.type) {
+    /* TODO: Use 128-bit load/store below. */
+    if (size == MO_128) {
+        memop = (memop & ~MO_SIZE) | MO_64;
+    }
+
+    switch (da->type) {
     case GET_ASI_EXCP:
         break;
 
     case GET_ASI_DIRECT:
-        gen_address_mask(dc, addr);
+        memop |= MO_ALIGN_4;
         switch (size) {
-        case 4:
+        case MO_32:
             d32 = gen_dest_fpr_F(dc);
-            tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
+            tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
             gen_store_fpr_F(dc, rd, d32);
             break;
-        case 8:
-            tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
-                                da.memop | MO_ALIGN_4);
+
+        case MO_64:
+            tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
             break;
-        case 16:
+
+        case MO_128:
             d64 = tcg_temp_new_i64();
-            tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
+            tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
             tcg_gen_addi_tl(addr, addr, 8);
-            tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
-                                da.memop | MO_ALIGN_4);
+            tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr, da->mem_idx, memop);
             tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
             break;
         default:
@@ -2252,24 +2257,19 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
 
     case GET_ASI_BLOCK:
         /* Valid for lddfa on aligned registers only.  */
-        if (size == 8 && (rd & 7) == 0) {
-            MemOp memop;
+        if (size == MO_64 && (rd & 7) == 0) {
             TCGv eight;
             int i;
 
-            gen_address_mask(dc, addr);
-
             /* The first operation checks required alignment.  */
-            memop = da.memop | MO_ALIGN_64;
             eight = tcg_constant_tl(8);
             for (i = 0; ; ++i) {
-                tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
-                                    da.mem_idx, memop);
+                tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
+                                    memop | (i == 0 ? MO_ALIGN_64 : 0));
                 if (i == 7) {
                     break;
                 }
                 tcg_gen_add_tl(addr, addr, eight);
-                memop = da.memop;
             }
         } else {
             gen_exception(dc, TT_ILL_INSN);
@@ -2278,10 +2278,9 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
 
     case GET_ASI_SHORT:
         /* Valid for lddfa only.  */
-        if (size == 8) {
-            gen_address_mask(dc, addr);
-            tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
-                                da.memop | MO_ALIGN);
+        if (size == MO_64) {
+            tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+                                memop | MO_ALIGN);
         } else {
             gen_exception(dc, TT_ILL_INSN);
         }
@@ -2289,8 +2288,8 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
 
     default:
         {
-            TCGv_i32 r_asi = tcg_constant_i32(da.asi);
-            TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
+            TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+            TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
 
             save_state(dc);
             /* According to the table in the UA2011 manual, the only
@@ -2298,21 +2297,23 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
                the NO_FAULT asis.  We still need a helper for these,
                but we can just use the integer asi helper for them.  */
             switch (size) {
-            case 4:
+            case MO_32:
                 d64 = tcg_temp_new_i64();
                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
                 d32 = gen_dest_fpr_F(dc);
                 tcg_gen_extrl_i64_i32(d32, d64);
                 gen_store_fpr_F(dc, rd, d32);
                 break;
-            case 8:
-                gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop);
+            case MO_64:
+                gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
+                                  r_asi, r_mop);
                 break;
-            case 16:
+            case MO_128:
                 d64 = tcg_temp_new_i64();
                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
                 tcg_gen_addi_tl(addr, addr, 8);
-                gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop);
+                gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr,
+                                  r_asi, r_mop);
                 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
                 break;
             default:
@@ -2324,36 +2325,51 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
 }
 
 static void __attribute__((unused))
-gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
+gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
 {
-    DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
+    DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL :
+                                     size == 8 ? MO_TEUQ : MO_TE | MO_128));
+
+    gen_address_mask(dc, addr);
+    gen_ldf_asi0(dc, &da, addr, rd);
+}
+
+static void gen_stf_asi0(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
+{
+    MemOp memop = da->memop;
+    MemOp size = memop & MO_SIZE;
     TCGv_i32 d32;
 
-    switch (da.type) {
+    /* TODO: Use 128-bit load/store below. */
+    if (size == MO_128) {
+        memop = (memop & ~MO_SIZE) | MO_64;
+    }
+
+    switch (da->type) {
     case GET_ASI_EXCP:
         break;
 
     case GET_ASI_DIRECT:
-        gen_address_mask(dc, addr);
+        memop |= MO_ALIGN_4;
         switch (size) {
-        case 4:
+        case MO_32:
             d32 = gen_load_fpr_F(dc, rd);
-            tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
+            tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
             break;
-        case 8:
-            tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
-                                da.memop | MO_ALIGN_4);
+        case MO_64:
+            tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+                                memop | MO_ALIGN_4);
             break;
-        case 16:
+        case MO_128:
             /* Only 4-byte alignment required.  However, it is legal for the
                cpu to signal the alignment fault, and the OS trap handler is
                required to fix it up.  Requiring 16-byte alignment here avoids
                having to probe the second page before performing the first
                write.  */
-            tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
-                                da.memop | MO_ALIGN_16);
+            tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+                                memop | MO_ALIGN_16);
             tcg_gen_addi_tl(addr, addr, 8);
-            tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
+            tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr, da->mem_idx, memop);
             break;
         default:
             g_assert_not_reached();
@@ -2362,24 +2378,19 @@ gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
 
     case GET_ASI_BLOCK:
         /* Valid for stdfa on aligned registers only.  */
-        if (size == 8 && (rd & 7) == 0) {
-            MemOp memop;
+        if (size == MO_64 && (rd & 7) == 0) {
             TCGv eight;
             int i;
 
-            gen_address_mask(dc, addr);
-
             /* The first operation checks required alignment.  */
-            memop = da.memop | MO_ALIGN_64;
             eight = tcg_constant_tl(8);
             for (i = 0; ; ++i) {
-                tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
-                                    da.mem_idx, memop);
+                tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
+                                    memop | (i == 0 ? MO_ALIGN_64 : 0));
                 if (i == 7) {
                     break;
                 }
                 tcg_gen_add_tl(addr, addr, eight);
-                memop = da.memop;
             }
         } else {
             gen_exception(dc, TT_ILL_INSN);
@@ -2388,10 +2399,9 @@ gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
 
     case GET_ASI_SHORT:
         /* Valid for stdfa only.  */
-        if (size == 8) {
-            gen_address_mask(dc, addr);
-            tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
-                                da.memop | MO_ALIGN);
+        if (size == MO_64) {
+            tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+                                memop | MO_ALIGN);
         } else {
             gen_exception(dc, TT_ILL_INSN);
         }
@@ -2406,6 +2416,16 @@ gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
     }
 }
 
+static void __attribute__((unused))
+gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
+{
+    DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL :
+                                     size == 8 ? MO_TEUQ : MO_TE | MO_128));
+
+    gen_address_mask(dc, addr);
+    gen_stf_asi0(dc, &da, addr, rd);
+}
+
 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
 {
     TCGv hi = gen_dest_gpr(dc, rd);
-- 
2.34.1



  parent reply	other threads:[~2023-10-13 21:40 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-13 21:27 [PATCH 00/85] target/sparc: Convert to decodetree Richard Henderson
2023-10-13 21:27 ` [PATCH 01/85] target/sparc: Set TCG_GUEST_DEFAULT_MO Richard Henderson
2023-10-16  6:46   ` Philippe Mathieu-Daudé
2023-10-13 21:27 ` [PATCH 02/85] configs: Enable MTTCG for sparc, sparc64 Richard Henderson
2023-10-13 21:27 ` [PATCH 03/85] target/sparc: Remove always-set cpu features Richard Henderson
2023-10-15 16:53   ` Richard Henderson
2023-10-13 21:27 ` [PATCH 04/85] target/sparc: Add decodetree infrastructure Richard Henderson
2023-10-13 21:27 ` [PATCH 05/85] target/sparc: Define AM_CHECK for sparc32 Richard Henderson
2023-10-13 21:27 ` [PATCH 06/85] target/sparc: Move CALL to decodetree Richard Henderson
2023-10-13 21:27 ` [PATCH 07/85] target/sparc: Move BPcc and Bicc " Richard Henderson
2023-10-13 21:27 ` [PATCH 08/85] target/sparc: Move BPr " Richard Henderson
2023-10-13 21:27 ` [PATCH 09/85] target/sparc: Move FBPfcc and FBfcc " Richard Henderson
2025-01-26  9:38   ` Artyom Tarasenko
2025-01-26 12:25     ` Richard Henderson
2023-10-13 21:27 ` [PATCH 10/85] target/sparc: Merge gen_cond with only caller Richard Henderson
2023-10-16  6:52   ` Philippe Mathieu-Daudé
2023-10-13 21:27 ` [PATCH 11/85] target/sparc: Merge gen_fcond " Richard Henderson
2023-10-16  6:52   ` Philippe Mathieu-Daudé
2023-10-13 21:27 ` [PATCH 12/85] target/sparc: Merge gen_branch_[an] " Richard Henderson
2023-10-13 21:27 ` [PATCH 13/85] target/sparc: Pass DisasCompare to advance_jump_cond Richard Henderson
2023-10-13 21:27 ` [PATCH 14/85] target/sparc: Move SETHI to decodetree Richard Henderson
2023-10-13 21:27 ` [PATCH 15/85] target/sparc: Move Tcc " Richard Henderson
2023-10-13 21:27 ` [PATCH 16/85] target/sparc: Move RDASR, STBAR, MEMBAR " Richard Henderson
2023-10-13 21:27 ` [PATCH 17/85] target/sparc: Move RDPSR, RDHPR " Richard Henderson
2023-10-13 21:27 ` [PATCH 18/85] target/sparc: Move RDWIM, RDPR " Richard Henderson
2023-10-13 21:27 ` [PATCH 19/85] target/sparc: Move RDTBR, FLUSHW " Richard Henderson
2023-10-13 21:27 ` [PATCH 20/85] target/sparc: Move WRASR " Richard Henderson
2023-10-13 21:27 ` [PATCH 21/85] target/sparc: Move WRPSR, SAVED, RESTORED " Richard Henderson
2023-10-13 21:27 ` [PATCH 22/85] target/sparc: Move WRWIM, WRPR " Richard Henderson
2023-10-13 21:27 ` [PATCH 23/85] target/sparc: Move WRTBR, WRHPR " Richard Henderson
2023-10-13 21:27 ` [PATCH 24/85] target/sparc: Move basic arithmetic " Richard Henderson
2023-10-13 21:27 ` [PATCH 25/85] target/sparc: Move ADDC " Richard Henderson
2023-10-13 21:27 ` [PATCH 26/85] target/sparc: Move MULX " Richard Henderson
2023-10-13 21:27 ` [PATCH 27/85] target/sparc: Move UMUL, SMUL " Richard Henderson
2023-10-13 21:27 ` [PATCH 28/85] target/sparc: Move SUBC " Richard Henderson
2023-10-13 21:27 ` [PATCH 29/85] target/sparc: Move UDIVX, SDIVX " Richard Henderson
2023-10-13 21:27 ` [PATCH 30/85] target/sparc: Move UDIV, SDIV " Richard Henderson
2023-10-13 21:27 ` [PATCH 31/85] target/sparc: Move TADD, TSUB, MULS " Richard Henderson
2023-10-13 21:27 ` [PATCH 32/85] target/sparc: Move SLL, SRL, SRA " Richard Henderson
2023-10-13 21:27 ` [PATCH 33/85] target/sparc: Move MOVcc, MOVR " Richard Henderson
2023-10-13 21:27 ` [PATCH 34/85] target/sparc: Move POPC " Richard Henderson
2023-10-13 21:27 ` [PATCH 35/85] target/sparc: Convert remaining v8 coproc insns " Richard Henderson
2023-10-13 21:27 ` [PATCH 36/85] target/sparc: Move JMPL, RETT, RETURN " Richard Henderson
2023-10-13 21:27 ` [PATCH 37/85] target/sparc: Move FLUSH, SAVE, RESTORE " Richard Henderson
2023-10-13 21:27 ` [PATCH 38/85] target/sparc: Move DONE, RETRY " Richard Henderson
2023-10-13 21:28 ` [PATCH 39/85] target/sparc: Split out resolve_asi Richard Henderson
2023-10-13 21:28 ` [PATCH 40/85] target/sparc: Drop ifdef around get_asi and friends Richard Henderson
2023-10-13 21:28 ` [PATCH 41/85] target/sparc: Split out ldst functions with asi pre-computed Richard Henderson
2023-10-13 21:28 ` [PATCH 42/85] target/sparc: Use tcg_gen_qemu_{ld, st}_i128 for GET_ASI_DTWINX Richard Henderson
2023-10-13 21:28 ` [PATCH 43/85] target/sparc: Move simple integer load/store to decodetree Richard Henderson
2023-10-13 21:28 ` [PATCH 44/85] target/sparc: Move asi " Richard Henderson
2023-10-13 21:28 ` [PATCH 45/85] target/sparc: Move LDSTUB, LDSTUBA " Richard Henderson
2023-10-13 21:28 ` [PATCH 46/85] target/sparc: Move SWAP, SWAPA " Richard Henderson
2023-10-13 21:28 ` [PATCH 47/85] target/sparc: Move CASA, CASXA " Richard Henderson
2023-10-13 21:28 ` [PATCH 48/85] target/sparc: Move PREFETCH, PREFETCHA " Richard Henderson
2023-10-13 21:28 ` Richard Henderson [this message]
2023-10-13 21:28 ` [PATCH 50/85] target/sparc: Move simple fp load/store " Richard Henderson
2023-10-13 21:28 ` [PATCH 51/85] target/sparc: Move asi " Richard Henderson
2023-10-13 21:28 ` [PATCH 52/85] target/sparc: Move LDFSR, STFSR " Richard Henderson
2023-10-13 21:28 ` [PATCH 53/85] target/sparc: Merge LDFSR, LDXFSR implementations Richard Henderson
2023-10-13 21:28 ` [PATCH 54/85] target/sparc: Move EDGE* to decodetree Richard Henderson
2023-10-13 21:28 ` [PATCH 55/85] target/sparc: Move ARRAY* " Richard Henderson
2023-10-13 21:28 ` [PATCH 56/85] target/sparc: Move ADDRALIGN* " Richard Henderson
2023-10-13 21:28 ` [PATCH 57/85] target/sparc: Move BMASK " Richard Henderson
2023-10-13 21:28 ` [PATCH 58/85] target/sparc: Move FMOVS, FNEGS, FABSS, FSRC*S, FNOT*S " Richard Henderson
2023-10-13 21:28 ` [PATCH 59/85] target/sparc: Move FMOVD, FNEGD, FABSD, FSRC*D, FNOT*D " Richard Henderson
2023-10-13 21:28 ` [PATCH 60/85] target/sparc: Use tcg_gen_vec_{add,sub}* Richard Henderson
2023-10-13 21:28 ` [PATCH 61/85] target/sparc: Move gen_ne_fop_FFF insns to decodetree Richard Henderson
2023-10-13 21:28 ` [PATCH 62/85] target/sparc: Move gen_ne_fop_DDD " Richard Henderson
2023-10-13 21:28 ` [PATCH 63/85] target/sparc: Move PDIST " Richard Henderson
2023-10-13 21:28 ` [PATCH 64/85] target/sparc: Move gen_gsr_fop_DDD insns " Richard Henderson
2023-10-13 21:28 ` [PATCH 65/85] target/sparc: Move gen_fop_FF " Richard Henderson
2023-10-13 21:28 ` [PATCH 66/85] target/sparc: Move gen_fop_DD " Richard Henderson
2023-10-13 21:28 ` [PATCH 67/85] target/sparc: Move FSQRTq " Richard Henderson
2023-10-13 21:28 ` [PATCH 68/85] target/sparc: Move gen_fop_FFF insns " Richard Henderson
2023-10-13 21:28 ` [PATCH 69/85] target/sparc: Move gen_fop_DDD " Richard Henderson
2023-10-13 21:28 ` [PATCH 70/85] target/sparc: Move gen_fop_QQQ " Richard Henderson
2023-10-13 21:28 ` [PATCH 71/85] target/sparc: Move FSMULD " Richard Henderson
2023-10-13 21:28 ` [PATCH 72/85] target/sparc: Move FDMULQ " Richard Henderson
2023-10-13 21:28 ` [PATCH 73/85] target/sparc: Move gen_fop_FD insns " Richard Henderson
2023-10-13 21:28 ` [PATCH 74/85] target/sparc: Move FiTOd, FsTOd, FsTOx " Richard Henderson
2023-10-13 21:28 ` [PATCH 75/85] target/sparc: Move FqTOs, FqTOi " Richard Henderson
2023-10-13 21:28 ` [PATCH 76/85] target/sparc: Move FqTOd, FqTOx " Richard Henderson
2023-10-13 21:28 ` [PATCH 77/85] target/sparc: Move FiTOq, FsTOq " Richard Henderson
2023-10-13 21:28 ` [PATCH 78/85] target/sparc: Move FdTOq, FxTOq " Richard Henderson
2023-10-13 21:28 ` [PATCH 79/85] target/sparc: Move FMOVq, FNEGq, FABSq " Richard Henderson
2023-10-13 21:28 ` [PATCH 80/85] target/sparc: Move FMOVR, FMOVcc, FMOVfcc " Richard Henderson
2023-10-13 21:28 ` [PATCH 81/85] target/sparc: Convert FCMP, FCMPE " Richard Henderson
2023-10-13 21:28 ` [PATCH 82/85] target/sparc: Move FPCMP* " Richard Henderson
2023-10-13 21:28 ` [PATCH 83/85] target/sparc: Move FPACK16, FPACKFIX " Richard Henderson
2023-10-13 21:28 ` [PATCH 84/85] target/sparc: Convert FZERO, FONE " Richard Henderson
2023-10-13 21:28 ` [PATCH 85/85] target/sparc: Remove disas_sparc_legacy Richard Henderson
2023-10-14  6:32 ` [PATCH 00/85] target/sparc: Convert to decodetree Mark Cave-Ayland
2023-10-15 20:12 ` Mark Cave-Ayland
2023-10-15 22:38   ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231013212846.165724-50-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=atar4qemu@gmail.com \
    --cc=mark.cave-ayland@ilande.co.uk \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).