qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Subject: [PATCH 23/82] include/hw/core/cpu: Introduce MMUIdxMap
Date: Sat, 26 Jul 2025 22:01:55 -1000	[thread overview]
Message-ID: <20250727080254.83840-24-richard.henderson@linaro.org> (raw)
In-Reply-To: <20250727080254.83840-1-richard.henderson@linaro.org>

Use a typedef instead of uint16_t directly when
describing sets of mmu indexes.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/exec/cputlb.h | 32 ++++++++++++++++----------------
 include/hw/core/cpu.h |  6 +++---
 accel/tcg/cputlb.c    | 30 +++++++++++++++---------------
 3 files changed, 34 insertions(+), 34 deletions(-)

diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
index 03ed7e2165..9bec0e7890 100644
--- a/include/exec/cputlb.h
+++ b/include/exec/cputlb.h
@@ -150,7 +150,7 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
  * MMU indexes.
  */
 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
-                              uint16_t idxmap);
+                              MMUIdxMap idxmap);
 
 /**
  * tlb_flush_page_by_mmuidx_all_cpus_synced:
@@ -165,7 +165,7 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
  * translations using the flushed TLBs.
  */
 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
-                                              uint16_t idxmap);
+                                              MMUIdxMap idxmap);
 
 /**
  * tlb_flush_by_mmuidx:
@@ -176,7 +176,7 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
  * Flush all entries from the TLB of the specified CPU, for the specified
  * MMU indexes.
  */
-void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
+void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap);
 
 /**
  * tlb_flush_by_mmuidx_all_cpus_synced:
@@ -189,7 +189,7 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
  * When this function returns, no CPUs will subsequently perform
  * translations using the flushed TLBs.
  */
-void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, MMUIdxMap idxmap);
 
 /**
  * tlb_flush_page_bits_by_mmuidx
@@ -201,11 +201,11 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
  * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
  */
 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
-                                   uint16_t idxmap, unsigned bits);
+                                   MMUIdxMap idxmap, unsigned bits);
 
 /* Similarly, with broadcast and syncing. */
 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
-                                                   uint16_t idxmap,
+                                                   MMUIdxMap idxmap,
                                                    unsigned bits);
 
 /**
@@ -220,14 +220,14 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
  * comparing only the low @bits worth of each virtual page.
  */
 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
-                               vaddr len, uint16_t idxmap,
+                               vaddr len, MMUIdxMap idxmap,
                                unsigned bits);
 
 /* Similarly, with broadcast and syncing. */
 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                vaddr addr,
                                                vaddr len,
-                                               uint16_t idxmap,
+                                               MMUIdxMap idxmap,
                                                unsigned bits);
 #else
 static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
@@ -243,42 +243,42 @@ static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
 {
 }
 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
-                                            vaddr addr, uint16_t idxmap)
+                                            vaddr addr, MMUIdxMap idxmap)
 {
 }
 
-static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap)
 {
 }
 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                             vaddr addr,
-                                                            uint16_t idxmap)
+                                                            MMUIdxMap idxmap)
 {
 }
 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
-                                                       uint16_t idxmap)
+                                                       MMUIdxMap idxmap)
 {
 }
 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
                                                  vaddr addr,
-                                                 uint16_t idxmap,
+                                                 MMUIdxMap idxmap,
                                                  unsigned bits)
 {
 }
 static inline void
 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
-                                              uint16_t idxmap, unsigned bits)
+                                              MMUIdxMap idxmap, unsigned bits)
 {
 }
 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
-                                             vaddr len, uint16_t idxmap,
+                                             vaddr len, MMUIdxMap idxmap,
                                              unsigned bits)
 {
 }
 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                              vaddr addr,
                                                              vaddr len,
-                                                             uint16_t idxmap,
+                                                             MMUIdxMap idxmap,
                                                              unsigned bits)
 {
 }
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 5eaf41a566..1153cadb70 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -198,10 +198,10 @@ struct CPUClass {
 };
 
 /*
- * Fix the number of mmu modes to 16, which is also the maximum
- * supported by the softmmu tlb api.
+ * Fix the number of mmu modes to 16.
  */
 #define NB_MMU_MODES 16
+typedef uint16_t MMUIdxMap;
 
 /* Use a fully associative victim tlb of 8 entries. */
 #define CPU_VTLB_SIZE 8
@@ -306,7 +306,7 @@ typedef struct CPUTLBCommon {
      * mmu_idx N since the last time that mmu_idx was flushed.
      * Protected by tlb_c.lock.
      */
-    uint16_t dirty;
+     MMUIdxMap dirty;
     /*
      * Statistics.  These are not lock protected, but are read and
      * written atomically.  This allows the monitor to print a snapshot
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 87e14bde4f..d324f33339 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -370,8 +370,8 @@ static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
 
 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
 {
-    uint16_t asked = data.host_int;
-    uint16_t all_dirty, work, to_clean;
+    MMUIdxMap asked = data.host_int;
+    MMUIdxMap all_dirty, work, to_clean;
     int64_t now = get_clock_realtime();
 
     assert_cpu_is_self(cpu);
@@ -408,7 +408,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
     }
 }
 
-void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
+void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap)
 {
     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
 
@@ -422,7 +422,7 @@ void tlb_flush(CPUState *cpu)
     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
 }
 
-void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, MMUIdxMap idxmap)
 {
     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
 
@@ -531,7 +531,7 @@ static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
  */
 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
                                              vaddr addr,
-                                             uint16_t idxmap)
+                                             MMUIdxMap idxmap)
 {
     int mmu_idx;
 
@@ -570,14 +570,14 @@ static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
 {
     vaddr addr_and_idxmap = data.target_ptr;
     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
-    uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
+    MMUIdxMap idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
 
     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
 }
 
 typedef struct {
     vaddr addr;
-    uint16_t idxmap;
+    MMUIdxMap idxmap;
 } TLBFlushPageByMMUIdxData;
 
 /**
@@ -599,7 +599,7 @@ static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
     g_free(d);
 }
 
-void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, MMUIdxMap idxmap)
 {
     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
 
@@ -618,7 +618,7 @@ void tlb_flush_page(CPUState *cpu, vaddr addr)
 
 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
                                               vaddr addr,
-                                              uint16_t idxmap)
+                                              MMUIdxMap idxmap)
 {
     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
 
@@ -715,8 +715,8 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
 typedef struct {
     vaddr addr;
     vaddr len;
-    uint16_t idxmap;
-    uint16_t bits;
+    MMUIdxMap idxmap;
+    unsigned bits;
 } TLBFlushRangeData;
 
 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
@@ -766,7 +766,7 @@ static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
 }
 
 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
-                               vaddr len, uint16_t idxmap,
+                               vaddr len, MMUIdxMap idxmap,
                                unsigned bits)
 {
     TLBFlushRangeData d;
@@ -797,7 +797,7 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
 }
 
 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
-                                   uint16_t idxmap, unsigned bits)
+                                   MMUIdxMap idxmap, unsigned bits)
 {
     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
 }
@@ -805,7 +805,7 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
                                                vaddr addr,
                                                vaddr len,
-                                               uint16_t idxmap,
+                                               MMUIdxMap idxmap,
                                                unsigned bits)
 {
     TLBFlushRangeData d, *p;
@@ -847,7 +847,7 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
 
 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
                                                    vaddr addr,
-                                                   uint16_t idxmap,
+                                                   MMUIdxMap idxmap,
                                                    unsigned bits)
 {
     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
-- 
2.43.0



  parent reply	other threads:[~2025-07-27  8:21 UTC|newest]

Thread overview: 188+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-27  8:01 [PATCH for-10.2 00/82] target/arm: Implement FEAT_GCS Richard Henderson
2025-07-27  8:01 ` [PATCH 01/82] target/arm: Add prot_check parameter to pmsav8_mpu_lookup Richard Henderson
2025-07-30 20:27   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 02/82] target/arm: Add in_prot_check to S1Translate Richard Henderson
2025-07-30 20:27   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 03/82] target/arm: Skip permission check from arm_cpu_get_phys_page_attrs_debug Richard Henderson
2025-07-30 20:28   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 04/82] target/arm: Introduce get_phys_addr_for_at Richard Henderson
2025-07-30 20:29   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 05/82] target/arm: Skip AF and DB updates for AccessType_AT Richard Henderson
2025-07-30 20:31   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 06/82] target/arm: Add prot_check parameter to do_ats_write Richard Henderson
2025-07-30 20:35   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 07/82] target/arm: Fill in HFG[RWI]TR_EL2 bits for Arm v9.5 Richard Henderson
2025-07-30 20:35   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 08/82] target/arm: Remove outdated comment for ZCR_EL12 Richard Henderson
2025-07-30 20:36   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 09/82] target/arm: Implement FEAT_ATS1A Richard Henderson
2025-07-30 20:38   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 10/82] target/arm: Add isar feature test for FEAT_S1PIE, FEAT_S2PIE Richard Henderson
2025-07-30 20:38   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 11/82] target/arm: Enable TCR2_ELx.PIE Richard Henderson
2025-07-30 20:39   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 12/82] target/arm: Implement PIR_ELx, PIRE0_ELx, S2PIR_EL2 registers Richard Henderson
2025-07-30 20:41   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 13/82] target/arm: Force HPD for stage2 translations Richard Henderson
2025-07-30 20:42   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 14/82] target/arm: Cache NV1 early in get_phys_addr_lpae Richard Henderson
2025-07-30 20:43   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 15/82] target/arm: Populate PIE in aa64_va_parameters Richard Henderson
2025-07-30 20:45   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 16/82] target/arm: Implement get_S1prot_indirect Richard Henderson
2025-07-31 21:15   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 17/82] target/arm: Implement get_S2prot_indirect Richard Henderson
2025-07-31 21:13   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 18/82] target/arm: Do not migrate env->exception Richard Henderson
2025-07-30 20:46   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 19/82] target/arm: Expand CPUARMState.exception.syndrome to 64 bits Richard Henderson
2025-07-30 20:47   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 20/82] target/arm: Expand syndrome parameter to raise_exception* Richard Henderson
2025-07-30 20:47   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 21/82] target/arm: Implement dirtybit check for PIE Richard Henderson
2025-07-30 20:50   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 22/82] target/arm: Enable FEAT_S1PIE and FEAT_S2PIE on -cpu max Richard Henderson
2025-07-30 20:50   ` Pierrick Bouvier
2025-07-27  8:01 ` Richard Henderson [this message]
2025-07-30 20:52   ` [PATCH 23/82] include/hw/core/cpu: Introduce MMUIdxMap Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 24/82] include/hw/core/cpu: Introduce cpu_tlb_fast Richard Henderson
2025-07-30 20:53   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 25/82] include/hw/core/cpu: Invert the indexing into CPUTLBDescFast Richard Henderson
2025-07-30 21:00   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 26/82] target/hppa: Adjust mmu indexes to begin with 0 Richard Henderson
2025-07-30 21:03   ` Pierrick Bouvier
2025-07-27  8:01 ` [PATCH 27/82] include/exec/memopidx: Adjust for 32 mmu indexes Richard Henderson
2025-07-30 21:03   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 28/82] include/hw/core/cpu: Widen MMUIdxMap Richard Henderson
2025-07-30 21:07   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 29/82] target/arm: Split out mmuidx.h from cpu.h Richard Henderson
2025-07-30 21:07   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 30/82] target/arm: Convert arm_mmu_idx_to_el from switch to table Richard Henderson
2025-07-30 21:10   ` Pierrick Bouvier
2025-07-30 21:12     ` Pierrick Bouvier
2025-07-30 21:31       ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 31/82] target/arm: Remove unused env argument from regime_el Richard Henderson
2025-07-30 21:13   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 32/82] target/arm: Convert regime_el from switch to table Richard Henderson
2025-07-30 21:15   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 33/82] target/arm: Convert regime_has_2_ranges " Richard Henderson
2025-07-30 21:16   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 34/82] target/arm: Remove unused env argument from regime_is_pan Richard Henderson
2025-07-30 21:17   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 35/82] target/arm: Convert regime_is_pan from switch to table Richard Henderson
2025-07-30 21:17   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 36/82] target/arm: Remove unused env argument from regime_is_user Richard Henderson
2025-07-30 21:18   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 37/82] target/arm: Convert regime_is_user from switch to table Richard Henderson
2025-07-30 21:21   ` Pierrick Bouvier
2025-08-01  3:53     ` Richard Henderson
2025-08-01 16:10       ` Pierrick Bouvier
2025-08-01 18:06       ` Peter Maydell
2025-07-27  8:02 ` [PATCH 38/82] target/arm: Convert arm_mmu_idx_is_stage1_of_2 " Richard Henderson
2025-07-30 21:23   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 39/82] target/arm: Convert regime_is_stage2 " Richard Henderson
2025-07-30 21:23   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 40/82] target/arm: Introduce mmu indexes for GCS Richard Henderson
2025-07-30 21:28   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 41/82] target/arm: Introduce regime_to_gcs Richard Henderson
2025-07-30 21:33   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 42/82] target/arm: Support page protections for GCS mmu indexes Richard Henderson
2025-07-31 18:40   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 43/82] target/arm: Implement gcs bit for data abort Richard Henderson
2025-07-31 18:41   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 44/82] target/arm: Add GCS cpregs Richard Henderson
2025-07-31 18:45   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 45/82] target/arm: Add GCS enable and trap levels to DisasContext Richard Henderson
2025-07-31 18:54   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 46/82] target/arm: Implement FEAT_CHK Richard Henderson
2025-07-29 16:01   ` Gustavo Romero
2025-07-29 17:21     ` Richard Henderson
2025-07-31 18:59   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 47/82] target/arm: Expand pstate to 64 bits Richard Henderson
2025-07-31 19:13   ` Pierrick Bouvier
2025-08-01  4:24     ` Richard Henderson
2025-08-01 12:35       ` Manos Pitsidianakis
2025-08-01  4:29     ` Richard Henderson
2025-08-01 16:12       ` Pierrick Bouvier
2025-08-01 20:19         ` Richard Henderson
2025-08-01 13:22     ` Peter Maydell
2025-08-01 16:26       ` Pierrick Bouvier
2025-08-01 16:37         ` Peter Maydell
2025-08-01 16:41           ` Pierrick Bouvier
2025-08-01 16:44             ` Peter Maydell
2025-08-01 16:53               ` Pierrick Bouvier
2025-08-01 16:55                 ` Pierrick Bouvier
2025-08-01 18:45       ` Manos Pitsidianakis
2025-08-02  3:14         ` Thiago Jung Bauermann
2025-08-01 20:22   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 48/82] target/arm: Add syndrome data for EC_GCS Richard Henderson
2025-07-31 19:35   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 49/82] target/arm: Implement EXLOCKException for ELR_ELx and SPSR_ELx Richard Henderson
2025-07-31 20:53   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 50/82] target/arm: Split {arm,core}_user_mem_index Richard Henderson
2025-07-31 21:06   ` Pierrick Bouvier
2025-07-31 21:24     ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 51/82] target/arm: Introduce delay_exception{_el} Richard Henderson
2025-07-31 21:09   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 52/82] target/arm: Emit HSTR trap exception out of line Richard Henderson
2025-07-31 21:10   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 53/82] target/arm: Emit v7m LTPSIZE " Richard Henderson
2025-07-31 21:10   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 54/82] target/arm: Implement GCSSTR, GCSSTTR Richard Henderson
2025-07-31 21:25   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 55/82] target/arm: Implement GCSB Richard Henderson
2025-07-31 21:27   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 56/82] target/arm: Implement GCSPUSHM Richard Henderson
2025-07-31 21:33   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 57/82] target/arm: Implement GCSPOPM Richard Henderson
2025-07-31 21:34   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 58/82] target/arm: Implement GCSPUSHX Richard Henderson
2025-07-31 21:36   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 59/82] target/arm: Implement GCSPOPX Richard Henderson
2025-07-31 21:37   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 60/82] target/arm: Implement GCSPOPCX Richard Henderson
2025-07-31 21:38   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 61/82] target/arm: Implement GCSSS1 Richard Henderson
2025-07-31 21:43   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 62/82] target/arm: Implement GCSSS2 Richard Henderson
2025-07-31 21:45   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 63/82] target/arm: Add gcs record for BL Richard Henderson
2025-07-31 21:45   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 64/82] target/arm: Add gcs record for BLR Richard Henderson
2025-07-31 21:46   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 65/82] target/arm: Add gcs record for BLR with PAuth Richard Henderson
2025-07-31 21:46   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 66/82] target/arm: Load gcs record for RET Richard Henderson
2025-07-31 21:47   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 67/82] target/arm: Load gcs record for RET with PAuth Richard Henderson
2025-07-31 21:48   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 68/82] target/arm: Copy EXLOCKEn to EXLOCK on exception to the same EL Richard Henderson
2025-07-31 21:48   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 69/82] target/arm: Implement EXLOCK check during exception return Richard Henderson
2025-07-31 21:49   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 70/82] target/arm: Enable FEAT_GCS with -cpu max Richard Henderson
2025-07-31 21:49   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 71/82] linux-user/aarch64: Implement prctls for GCS Richard Henderson
2025-07-31 21:53   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 72/82] linux-user/aarch64: Allocate new gcs stack on clone Richard Henderson
2025-07-31 21:54   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 73/82] linux-user/aarch64: Release gcs stack on thread exit Richard Henderson
2025-07-31 21:54   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 74/82] linux-user/aarch64: Implement map_shadow_stack syscall Richard Henderson
2025-07-31 21:55   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 75/82] target/arm: Enable GCSPR_EL0 for read in user-mode Richard Henderson
2025-07-31 21:56   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 76/82] linux-user/aarch64: Inject SIGSEGV for GCS faults Richard Henderson
2025-07-31 21:56   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 77/82] linux-user/aarch64: Generate GCS signal records Richard Henderson
2025-07-31 21:57   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 78/82] linux-user: Change exported get_elf_hwcap to abi_ulong Richard Henderson
2025-07-31 21:58   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 79/82] linux-user/aarch64: Enable GCS in HWCAP Richard Henderson
2025-07-31 21:58   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 80/82] tests/tcg/aarch64: Add gcsstr Richard Henderson
2025-07-31 22:05   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 81/82] tests/tcg/aarch64: Add gcspushm Richard Henderson
2025-07-31 22:11   ` Pierrick Bouvier
2025-07-27  8:02 ` [PATCH 82/82] tests/tcg/aarch64: Add gcsss Richard Henderson
2025-07-31 22:14   ` Pierrick Bouvier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250727080254.83840-24-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).