qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org, "Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Pierrick Bouvier" <pierrick.bouvier@linaro.org>
Subject: [PATCH v4 26/84] include/hw/core/cpu: Introduce MMUIdxMap
Date: Sat, 30 Aug 2025 15:40:30 +1000	[thread overview]
Message-ID: <20250830054128.448363-27-richard.henderson@linaro.org> (raw)
In-Reply-To: <20250830054128.448363-1-richard.henderson@linaro.org>

Use a typedef instead of uint16_t directly when
describing sets of mmu indexes.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/exec/cputlb.h | 32 ++++++++++++++++----------------
 include/hw/core/cpu.h |  6 +++---
 accel/tcg/cputlb.c    | 30 +++++++++++++++---------------
 3 files changed, 34 insertions(+), 34 deletions(-)

diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
index 03ed7e2165..9bec0e7890 100644
--- a/include/exec/cputlb.h
+++ b/include/exec/cputlb.h
@@ -150,7 +150,7 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
  * MMU indexes.
  */
 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
-                              uint16_t idxmap);
+                              MMUIdxMap idxmap);
 
 /**
  * tlb_flush_page_by_mmuidx_all_cpus_synced:
@@ -165,7 +165,7 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
  * translations using the flushed TLBs.
  */
 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
-                                              uint16_t idxmap);
+                                              MMUIdxMap idxmap);
 
 /**
  * tlb_flush_by_mmuidx:
@@ -176,7 +176,7 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
  * Flush all entries from the TLB of the specified CPU, for the specified
  * MMU indexes.
  */
-void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
+void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap);
 
 /**
  * tlb_flush_by_mmuidx_all_cpus_synced:
@@ -189,7 +189,7 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
  * When this function returns, no CPUs will subsequently perform
  * translations using the flushed TLBs.
  */
-void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, MMUIdxMap idxmap);
 
 /**
  * tlb_flush_page_bits_by_mmuidx
@@ -201,11 +201,11 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
  * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
  */
 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
-                                   uint16_t idxmap, unsigned bits);
+                                   MMUIdxMap idxmap, unsigned bits);
 
 /* Similarly, with broadcast and syncing. */
 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
-                                                   uint16_t idxmap,
+                                                   MMUIdxMap idxmap,
                                                    unsigned bits);
 
 /**
@@ -220,14 +220,14 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
  * comparing only the low @bits worth of each virtual page.
  */
 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
-                               vaddr len, uint16_t idxmap,
+                               vaddr len, MMUIdxMap idxmap,
                                unsigned bits);
 
 /* Similarly, with broadcast and syncing. */
 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                vaddr addr,
                                                vaddr len,
-                                               uint16_t idxmap,
+                                               MMUIdxMap idxmap,
                                                unsigned bits);
 #else
 static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
@@ -243,42 +243,42 @@ static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
 {
 }
 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
-                                            vaddr addr, uint16_t idxmap)
+                                            vaddr addr, MMUIdxMap idxmap)
 {
 }
 
-static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap)
 {
 }
 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                             vaddr addr,
-                                                            uint16_t idxmap)
+                                                            MMUIdxMap idxmap)
 {
 }
 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
-                                                       uint16_t idxmap)
+                                                       MMUIdxMap idxmap)
 {
 }
 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
                                                  vaddr addr,
-                                                 uint16_t idxmap,
+                                                 MMUIdxMap idxmap,
                                                  unsigned bits)
 {
 }
 static inline void
 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
-                                              uint16_t idxmap, unsigned bits)
+                                              MMUIdxMap idxmap, unsigned bits)
 {
 }
 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
-                                             vaddr len, uint16_t idxmap,
+                                             vaddr len, MMUIdxMap idxmap,
                                              unsigned bits)
 {
 }
 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                              vaddr addr,
                                                              vaddr len,
-                                                             uint16_t idxmap,
+                                                             MMUIdxMap idxmap,
                                                              unsigned bits)
 {
 }
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 5eaf41a566..1153cadb70 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -198,10 +198,10 @@ struct CPUClass {
 };
 
 /*
- * Fix the number of mmu modes to 16, which is also the maximum
- * supported by the softmmu tlb api.
+ * Fix the number of mmu modes to 16.
  */
 #define NB_MMU_MODES 16
+typedef uint16_t MMUIdxMap;
 
 /* Use a fully associative victim tlb of 8 entries. */
 #define CPU_VTLB_SIZE 8
@@ -306,7 +306,7 @@ typedef struct CPUTLBCommon {
      * mmu_idx N since the last time that mmu_idx was flushed.
      * Protected by tlb_c.lock.
      */
-    uint16_t dirty;
+     MMUIdxMap dirty;
     /*
      * Statistics.  These are not lock protected, but are read and
      * written atomically.  This allows the monitor to print a snapshot
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 87e14bde4f..d324f33339 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -370,8 +370,8 @@ static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
 
 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
 {
-    uint16_t asked = data.host_int;
-    uint16_t all_dirty, work, to_clean;
+    MMUIdxMap asked = data.host_int;
+    MMUIdxMap all_dirty, work, to_clean;
     int64_t now = get_clock_realtime();
 
     assert_cpu_is_self(cpu);
@@ -408,7 +408,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
     }
 }
 
-void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
+void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap)
 {
     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
 
@@ -422,7 +422,7 @@ void tlb_flush(CPUState *cpu)
     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
 }
 
-void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, MMUIdxMap idxmap)
 {
     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
 
@@ -531,7 +531,7 @@ static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
  */
 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
                                              vaddr addr,
-                                             uint16_t idxmap)
+                                             MMUIdxMap idxmap)
 {
     int mmu_idx;
 
@@ -570,14 +570,14 @@ static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
 {
     vaddr addr_and_idxmap = data.target_ptr;
     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
-    uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
+    MMUIdxMap idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
 
     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
 }
 
 typedef struct {
     vaddr addr;
-    uint16_t idxmap;
+    MMUIdxMap idxmap;
 } TLBFlushPageByMMUIdxData;
 
 /**
@@ -599,7 +599,7 @@ static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
     g_free(d);
 }
 
-void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, MMUIdxMap idxmap)
 {
     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
 
@@ -618,7 +618,7 @@ void tlb_flush_page(CPUState *cpu, vaddr addr)
 
 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
                                               vaddr addr,
-                                              uint16_t idxmap)
+                                              MMUIdxMap idxmap)
 {
     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
 
@@ -715,8 +715,8 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
 typedef struct {
     vaddr addr;
     vaddr len;
-    uint16_t idxmap;
-    uint16_t bits;
+    MMUIdxMap idxmap;
+    unsigned bits;
 } TLBFlushRangeData;
 
 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
@@ -766,7 +766,7 @@ static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
 }
 
 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
-                               vaddr len, uint16_t idxmap,
+                               vaddr len, MMUIdxMap idxmap,
                                unsigned bits)
 {
     TLBFlushRangeData d;
@@ -797,7 +797,7 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
 }
 
 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
-                                   uint16_t idxmap, unsigned bits)
+                                   MMUIdxMap idxmap, unsigned bits)
 {
     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
 }
@@ -805,7 +805,7 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
                                                vaddr addr,
                                                vaddr len,
-                                               uint16_t idxmap,
+                                               MMUIdxMap idxmap,
                                                unsigned bits)
 {
     TLBFlushRangeData d, *p;
@@ -847,7 +847,7 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
 
 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
                                                    vaddr addr,
-                                                   uint16_t idxmap,
+                                                   MMUIdxMap idxmap,
                                                    unsigned bits)
 {
     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
-- 
2.43.0



  parent reply	other threads:[~2025-08-30 17:02 UTC|newest]

Thread overview: 96+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-30  5:40 [PATCH v4 00/84] target/arm: Implement FEAT_GCS Richard Henderson
2025-08-30  5:40 ` [PATCH v4 01/84] linux-user/aarch64: Split out signal_for_exception Richard Henderson
2025-08-30  5:40 ` [PATCH v4 02/84] linux-user/aarch64: Check syndrome for EXCP_UDEF Richard Henderson
2025-08-30  5:40 ` [PATCH v4 03/84] linux-user/aarch64: Generate ESR signal records Richard Henderson
2025-08-30  5:40 ` [PATCH v4 04/84] target/arm: Add prot_check parameter to pmsav8_mpu_lookup Richard Henderson
2025-08-30  5:40 ` [PATCH v4 05/84] target/arm: Add in_prot_check to S1Translate Richard Henderson
2025-08-30  5:40 ` [PATCH v4 06/84] target/arm: Skip permission check from arm_cpu_get_phys_page_attrs_debug Richard Henderson
2025-08-30  5:40 ` [PATCH v4 07/84] target/arm: Introduce get_phys_addr_for_at Richard Henderson
2025-08-30  5:40 ` [PATCH v4 08/84] target/arm: Skip AF and DB updates for AccessType_AT Richard Henderson
2025-08-30  5:40 ` [PATCH v4 09/84] target/arm: Add prot_check parameter to do_ats_write Richard Henderson
2025-08-30  5:40 ` [PATCH v4 10/84] target/arm: Fill in HFG[RWI]TR_EL2 bits for Arm v9.5 Richard Henderson
2025-08-30  5:40 ` [PATCH v4 11/84] target/arm: Remove outdated comment for ZCR_EL12 Richard Henderson
2025-08-30  5:40 ` [PATCH v4 12/84] target/arm: Implement FEAT_ATS1A Richard Henderson
2025-08-30  5:40 ` [PATCH v4 13/84] target/arm: Add isar feature test for FEAT_S1PIE, FEAT_S2PIE Richard Henderson
2025-08-30  5:40 ` [PATCH v4 14/84] target/arm: Enable TCR2_ELx.PIE Richard Henderson
2025-08-30  5:40 ` [PATCH v4 15/84] target/arm: Implement PIR_ELx, PIRE0_ELx, S2PIR_EL2 registers Richard Henderson
2025-08-30  5:40 ` [PATCH v4 16/84] target/arm: Force HPD for stage2 translations Richard Henderson
2025-08-30  5:40 ` [PATCH v4 17/84] target/arm: Cache NV1 early in get_phys_addr_lpae Richard Henderson
2025-08-30  5:40 ` [PATCH v4 18/84] target/arm: Populate PIE in aa64_va_parameters Richard Henderson
2025-08-30  5:40 ` [PATCH v4 19/84] target/arm: Implement get_S1prot_indirect Richard Henderson
2025-08-30  5:40 ` [PATCH v4 20/84] target/arm: Implement get_S2prot_indirect Richard Henderson
2025-08-30  5:40 ` [PATCH v4 21/84] target/arm: Do not migrate env->exception Richard Henderson
2025-09-08 14:40   ` Peter Maydell
2025-09-15 17:42     ` Richard Henderson
2025-08-30  5:40 ` [PATCH v4 22/84] target/arm: Expand CPUARMState.exception.syndrome to 64 bits Richard Henderson
2025-08-30  5:40 ` [PATCH v4 23/84] target/arm: Expand syndrome parameter to raise_exception* Richard Henderson
2025-08-30  5:40 ` [PATCH v4 24/84] target/arm: Implement dirtybit check for PIE Richard Henderson
2025-08-30  5:40 ` [PATCH v4 25/84] target/arm: Enable FEAT_S1PIE and FEAT_S2PIE on -cpu max Richard Henderson
2025-08-30  5:40 ` Richard Henderson [this message]
2025-09-08 14:42   ` [PATCH v4 26/84] include/hw/core/cpu: Introduce MMUIdxMap Peter Maydell
2025-08-30  5:40 ` [PATCH v4 27/84] include/hw/core/cpu: Introduce cpu_tlb_fast Richard Henderson
2025-08-30  5:40 ` [PATCH v4 28/84] include/hw/core/cpu: Invert the indexing into CPUTLBDescFast Richard Henderson
2025-08-30  5:40 ` [PATCH v4 29/84] target/hppa: Adjust mmu indexes to begin with 0 Richard Henderson
2025-08-30  5:40 ` [PATCH v4 30/84] include/exec/memopidx: Adjust for 32 mmu indexes Richard Henderson
2025-08-30  5:40 ` [PATCH v4 31/84] include/hw/core/cpu: Widen MMUIdxMap Richard Henderson
2025-08-30  5:40 ` [PATCH v4 32/84] target/arm: Split out mmuidx.h from cpu.h Richard Henderson
2025-08-30  5:40 ` [PATCH v4 33/84] target/arm: Convert arm_mmu_idx_to_el from switch to table Richard Henderson
2025-08-30  5:40 ` [PATCH v4 34/84] target/arm: Remove unused env argument from regime_el Richard Henderson
2025-08-30  5:40 ` [PATCH v4 35/84] target/arm: Convert regime_el from switch to table Richard Henderson
2025-08-30  5:40 ` [PATCH v4 36/84] target/arm: Convert regime_has_2_ranges " Richard Henderson
2025-08-30  5:40 ` [PATCH v4 37/84] target/arm: Remove unused env argument from regime_is_pan Richard Henderson
2025-08-30  5:40 ` [PATCH v4 38/84] target/arm: Convert regime_is_pan from switch to table Richard Henderson
2025-08-30  5:40 ` [PATCH v4 39/84] target/arm: Remove unused env argument from regime_is_user Richard Henderson
2025-08-30  5:40 ` [PATCH v4 40/84] target/arm: Convert regime_is_user from switch to table Richard Henderson
2025-08-30  5:40 ` [PATCH v4 41/84] target/arm: Convert arm_mmu_idx_is_stage1_of_2 " Richard Henderson
2025-08-30  5:40 ` [PATCH v4 42/84] target/arm: Convert regime_is_stage2 " Richard Henderson
2025-08-30  5:40 ` [PATCH v4 43/84] target/arm: Introduce mmu indexes for GCS Richard Henderson
2025-08-30  5:40 ` [PATCH v4 44/84] target/arm: Introduce regime_to_gcs Richard Henderson
2025-08-30  5:40 ` [PATCH v4 45/84] target/arm: Support page protections for GCS mmu indexes Richard Henderson
2025-08-30  5:40 ` [PATCH v4 46/84] target/arm: Implement gcs bit for data abort Richard Henderson
2025-08-30  5:40 ` [PATCH v4 47/84] target/arm: Add GCS cpregs Richard Henderson
2025-08-30  5:40 ` [PATCH v4 48/84] target/arm: Add GCS enable and trap levels to DisasContext Richard Henderson
2025-08-30  5:40 ` [PATCH v4 49/84] target/arm: Implement FEAT_CHK Richard Henderson
2025-08-30  5:40 ` [PATCH v4 50/84] target/arm: Expand pstate to 64 bits Richard Henderson
2025-09-08 15:57   ` Peter Maydell
2025-09-15 19:45     ` Richard Henderson
2025-08-30  5:40 ` [PATCH v4 51/84] target/arm: Add syndrome data for EC_GCS Richard Henderson
2025-08-30  5:40 ` [PATCH v4 52/84] target/arm: Implement EXLOCKException for ELR_ELx and SPSR_ELx Richard Henderson
2025-09-09 13:14   ` Peter Maydell
2025-08-30  5:40 ` [PATCH v4 53/84] target/arm: Split {arm,core}_user_mem_index Richard Henderson
2025-09-09 13:21   ` Peter Maydell
2025-08-30  5:40 ` [PATCH v4 54/84] target/arm: Introduce delay_exception{_el} Richard Henderson
2025-08-30  5:40 ` [PATCH v4 55/84] target/arm: Emit HSTR trap exception out of line Richard Henderson
2025-09-09 13:33   ` Peter Maydell
2025-09-16  1:00     ` Richard Henderson
2025-08-30  5:41 ` [PATCH v4 56/84] target/arm: Emit v7m LTPSIZE " Richard Henderson
2025-08-30  5:41 ` [PATCH v4 57/84] target/arm: Implement GCSSTR, GCSSTTR Richard Henderson
2025-08-30  5:41 ` [PATCH v4 58/84] target/arm: Implement GCSB Richard Henderson
2025-08-30  5:41 ` [PATCH v4 59/84] target/arm: Implement GCSPUSHM Richard Henderson
2025-08-30  5:41 ` [PATCH v4 60/84] target/arm: Implement GCSPOPM Richard Henderson
2025-08-30  5:41 ` [PATCH v4 61/84] target/arm: Implement GCSPUSHX Richard Henderson
2025-08-30  5:41 ` [PATCH v4 62/84] target/arm: Implement GCSPOPX Richard Henderson
2025-08-30  5:41 ` [PATCH v4 63/84] target/arm: Implement GCSPOPCX Richard Henderson
2025-08-30  5:41 ` [PATCH v4 64/84] target/arm: Implement GCSSS1 Richard Henderson
2025-08-30  5:41 ` [PATCH v4 65/84] target/arm: Implement GCSSS2 Richard Henderson
2025-08-30  5:41 ` [PATCH v4 66/84] target/arm: Add gcs record for BL Richard Henderson
2025-08-30  5:41 ` [PATCH v4 67/84] target/arm: Add gcs record for BLR Richard Henderson
2025-08-30  5:41 ` [PATCH v4 68/84] target/arm: Add gcs record for BLR with PAuth Richard Henderson
2025-09-09 17:17   ` Peter Maydell
2025-08-30  5:41 ` [PATCH v4 69/84] target/arm: Load gcs record for RET Richard Henderson
2025-08-30  5:41 ` [PATCH v4 70/84] target/arm: Load gcs record for RET with PAuth Richard Henderson
2025-08-30  5:41 ` [PATCH v4 71/84] target/arm: Copy EXLOCKEn to EXLOCK on exception to the same EL Richard Henderson
2025-08-30  5:41 ` [PATCH v4 72/84] target/arm: Implement EXLOCK check during exception return Richard Henderson
2025-08-30  5:41 ` [PATCH v4 73/84] target/arm: Enable FEAT_GCS with -cpu max Richard Henderson
2025-08-30  5:41 ` [PATCH v4 74/84] linux-user/aarch64: Implement prctls for GCS Richard Henderson
2025-08-30  5:41 ` [PATCH v4 75/84] linux-user/aarch64: Allocate new gcs stack on clone Richard Henderson
2025-08-30  5:41 ` [PATCH v4 76/84] linux-user/aarch64: Release gcs stack on thread exit Richard Henderson
2025-08-30  5:41 ` [PATCH v4 77/84] linux-user/aarch64: Implement map_shadow_stack syscall Richard Henderson
2025-08-30  5:41 ` [PATCH v4 78/84] target/arm: Enable GCSPR_EL0 for read in user-mode Richard Henderson
2025-08-30  5:41 ` [PATCH v4 79/84] linux-user/aarch64: Inject SIGSEGV for GCS faults Richard Henderson
2025-08-30  5:41 ` [PATCH v4 80/84] linux-user/aarch64: Generate GCS signal records Richard Henderson
2025-08-30  5:41 ` [PATCH v4 81/84] linux-user/aarch64: Enable GCS in HWCAP Richard Henderson
2025-08-30  5:41 ` [PATCH v4 82/84] tests/tcg/aarch64: Add gcsstr Richard Henderson
2025-08-30  5:41 ` [PATCH v4 83/84] tests/tcg/aarch64: Add gcspushm Richard Henderson
2025-08-30  5:41 ` [PATCH v4 84/84] tests/tcg/aarch64: Add gcsss Richard Henderson
2025-09-11 12:18 ` [PATCH v4 00/84] target/arm: Implement FEAT_GCS Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250830054128.448363-27-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=philmd@linaro.org \
    --cc=pierrick.bouvier@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).