qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Subject: [PATCH v2 37/66] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull
Date: Mon, 22 Aug 2022 08:27:12 -0700	[thread overview]
Message-ID: <20220822152741.1617527-38-richard.henderson@linaro.org> (raw)
In-Reply-To: <20220822152741.1617527-1-richard.henderson@linaro.org>

This structure will shortly contain more than just
data for accessing MMIO.  Rename the 'addr' member
to 'xlat_section' to more clearly indicate its purpose.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/exec/cpu-defs.h    |  22 ++++----
 accel/tcg/cputlb.c         | 102 +++++++++++++++++++------------------
 target/arm/mte_helper.c    |  14 ++---
 target/arm/sve_helper.c    |   4 +-
 target/arm/translate-a64.c |   2 +-
 5 files changed, 73 insertions(+), 71 deletions(-)

diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index ba3cd32a1e..f70f54d850 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -108,6 +108,7 @@ typedef uint64_t target_ulong;
 #  endif
 # endif
 
+/* Minimalized TLB entry for use by TCG fast path. */
 typedef struct CPUTLBEntry {
     /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
        bit TARGET_PAGE_BITS-1..4  : Nonzero for accesses that should not
@@ -131,14 +132,14 @@ typedef struct CPUTLBEntry {
 
 QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
 
-/* The IOTLB is not accessed directly inline by generated TCG code,
- * so the CPUIOTLBEntry layout is not as critical as that of the
- * CPUTLBEntry. (This is also why we don't want to combine the two
- * structs into one.)
+/*
+ * The full TLB entry, which is not accessed by generated TCG code,
+ * so the layout is not as critical as that of CPUTLBEntry. This is
+ * also why we don't want to combine the two structs.
  */
-typedef struct CPUIOTLBEntry {
+typedef struct CPUTLBEntryFull {
     /*
-     * @addr contains:
+     * @xlat_section contains:
      *  - in the lower TARGET_PAGE_BITS, a physical section number
      *  - with the lower TARGET_PAGE_BITS masked off, an offset which
      *    must be added to the virtual address to obtain:
@@ -146,9 +147,9 @@ typedef struct CPUIOTLBEntry {
      *       number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
      *     + the offset within the target MemoryRegion (otherwise)
      */
-    hwaddr addr;
+    hwaddr xlat_section;
     MemTxAttrs attrs;
-} CPUIOTLBEntry;
+} CPUTLBEntryFull;
 
 /*
  * Data elements that are per MMU mode, minus the bits accessed by
@@ -172,9 +173,8 @@ typedef struct CPUTLBDesc {
     size_t vindex;
     /* The tlb victim table, in two parts.  */
     CPUTLBEntry vtable[CPU_VTLB_SIZE];
-    CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
-    /* The iotlb.  */
-    CPUIOTLBEntry *iotlb;
+    CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
+    CPUTLBEntryFull *fulltlb;
 } CPUTLBDesc;
 
 /*
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index a46f3a654d..a37275bf8e 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -200,13 +200,13 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
     }
 
     g_free(fast->table);
-    g_free(desc->iotlb);
+    g_free(desc->fulltlb);
 
     tlb_window_reset(desc, now, 0);
     /* desc->n_used_entries is cleared by the caller */
     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
     fast->table = g_try_new(CPUTLBEntry, new_size);
-    desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
+    desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
 
     /*
      * If the allocations fail, try smaller sizes. We just freed some
@@ -215,7 +215,7 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
      * allocations to fail though, so we progressively reduce the allocation
      * size, aborting if we cannot even allocate the smallest TLB we support.
      */
-    while (fast->table == NULL || desc->iotlb == NULL) {
+    while (fast->table == NULL || desc->fulltlb == NULL) {
         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
             error_report("%s: %s", __func__, strerror(errno));
             abort();
@@ -224,9 +224,9 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
 
         g_free(fast->table);
-        g_free(desc->iotlb);
+        g_free(desc->fulltlb);
         fast->table = g_try_new(CPUTLBEntry, new_size);
-        desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
+        desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
     }
 }
 
@@ -258,7 +258,7 @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
     desc->n_used_entries = 0;
     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
     fast->table = g_new(CPUTLBEntry, n_entries);
-    desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
+    desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
     tlb_mmu_flush_locked(desc, fast);
 }
 
@@ -299,7 +299,7 @@ void tlb_destroy(CPUState *cpu)
         CPUTLBDescFast *fast = &env_tlb(env)->f[i];
 
         g_free(fast->table);
-        g_free(desc->iotlb);
+        g_free(desc->fulltlb);
     }
 }
 
@@ -1219,7 +1219,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 
         /* Evict the old entry into the victim tlb.  */
         copy_tlb_helper_locked(tv, te);
-        desc->viotlb[vidx] = desc->iotlb[index];
+        desc->vfulltlb[vidx] = desc->fulltlb[index];
         tlb_n_used_entries_dec(env, mmu_idx);
     }
 
@@ -1236,8 +1236,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
      * subtract here is that of the page base, and not the same as the
      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
      */
-    desc->iotlb[index].addr = iotlb - vaddr_page;
-    desc->iotlb[index].attrs = attrs;
+    desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
+    desc->fulltlb[index].attrs = attrs;
 
     /* Now calculate the new entry */
     tn.addend = addend - vaddr_page;
@@ -1341,7 +1341,7 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
     }
 }
 
-static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
+static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
                          MMUAccessType access_type, MemOp op)
 {
@@ -1353,9 +1353,9 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
     bool locked = false;
     MemTxResult r;
 
-    section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
+    section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
     mr = section->mr;
-    mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
+    mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
     cpu->mem_io_pc = retaddr;
     if (!cpu->can_do_io) {
         cpu_io_recompile(cpu, retaddr);
@@ -1365,14 +1365,14 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
         qemu_mutex_lock_iothread();
         locked = true;
     }
-    r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
+    r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
     if (r != MEMTX_OK) {
         hwaddr physaddr = mr_offset +
             section->offset_within_address_space -
             section->offset_within_region;
 
         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
-                               mmu_idx, iotlbentry->attrs, r, retaddr);
+                               mmu_idx, full->attrs, r, retaddr);
     }
     if (locked) {
         qemu_mutex_unlock_iothread();
@@ -1382,8 +1382,8 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
 }
 
 /*
- * Save a potentially trashed IOTLB entry for later lookup by plugin.
- * This is read by tlb_plugin_lookup if the iotlb entry doesn't match
+ * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
+ * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
  * because of the side effect of io_writex changing memory layout.
  */
 static void save_iotlb_data(CPUState *cs, hwaddr addr,
@@ -1397,7 +1397,7 @@ static void save_iotlb_data(CPUState *cs, hwaddr addr,
 #endif
 }
 
-static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
+static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
                       int mmu_idx, uint64_t val, target_ulong addr,
                       uintptr_t retaddr, MemOp op)
 {
@@ -1408,9 +1408,9 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
     bool locked = false;
     MemTxResult r;
 
-    section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
+    section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
     mr = section->mr;
-    mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
+    mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
     if (!cpu->can_do_io) {
         cpu_io_recompile(cpu, retaddr);
     }
@@ -1420,20 +1420,20 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
      * The memory_region_dispatch may trigger a flush/resize
      * so for plugins we save the iotlb_data just in case.
      */
-    save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
+    save_iotlb_data(cpu, full->xlat_section, section, mr_offset);
 
     if (!qemu_mutex_iothread_locked()) {
         qemu_mutex_lock_iothread();
         locked = true;
     }
-    r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
+    r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
     if (r != MEMTX_OK) {
         hwaddr physaddr = mr_offset +
             section->offset_within_address_space -
             section->offset_within_region;
 
         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
-                               MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
+                               MMU_DATA_STORE, mmu_idx, full->attrs, r,
                                retaddr);
     }
     if (locked) {
@@ -1480,9 +1480,10 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
             copy_tlb_helper_locked(vtlb, &tmptlb);
             qemu_spin_unlock(&env_tlb(env)->c.lock);
 
-            CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
-            CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
-            tmpio = *io; *io = *vio; *vio = tmpio;
+            CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+            CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
+            CPUTLBEntryFull tmpf;
+            tmpf = *f1; *f1 = *f2; *f2 = tmpf;
             return true;
         }
     }
@@ -1550,9 +1551,9 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
 }
 
 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
-                           CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
+                           CPUTLBEntryFull *full, uintptr_t retaddr)
 {
-    ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
+    ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
 
     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
 
@@ -1645,9 +1646,9 @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
     /* Handle clean RAM pages.  */
     if (unlikely(flags & TLB_NOTDIRTY)) {
         uintptr_t index = tlb_index(env, mmu_idx, addr);
-        CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+        CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
 
-        notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
+        notdirty_write(env_cpu(env), addr, 1, full, retaddr);
         flags &= ~TLB_NOTDIRTY;
     }
 
@@ -1672,19 +1673,19 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
 
     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
         uintptr_t index = tlb_index(env, mmu_idx, addr);
-        CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+        CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
 
         /* Handle watchpoints.  */
         if (flags & TLB_WATCHPOINT) {
             int wp_access = (access_type == MMU_DATA_STORE
                              ? BP_MEM_WRITE : BP_MEM_READ);
             cpu_check_watchpoint(env_cpu(env), addr, size,
-                                 iotlbentry->attrs, wp_access, retaddr);
+                                 full->attrs, wp_access, retaddr);
         }
 
         /* Handle clean RAM pages.  */
         if (flags & TLB_NOTDIRTY) {
-            notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
+            notdirty_write(env_cpu(env), addr, 1, full, retaddr);
         }
     }
 
@@ -1715,7 +1716,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
  * should have just filled the TLB. The one corner case is io_writex
  * which can cause TLB flushes and potential resizing of the TLBs
  * losing the information we need. In those cases we need to recover
- * data from a copy of the iotlbentry. As long as this always occurs
+ * data from a copy of the CPUTLBEntryFull. As long as this always occurs
  * from the same thread (which a mem callback will be) this is safe.
  */
 
@@ -1730,11 +1731,12 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
     if (likely(tlb_hit(tlb_addr, addr))) {
         /* We must have an iotlb entry for MMIO */
         if (tlb_addr & TLB_MMIO) {
-            CPUIOTLBEntry *iotlbentry;
-            iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+            CPUTLBEntryFull *full;
+            full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
             data->is_io = true;
-            data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
-            data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
+            data->v.io.section =
+                iotlb_to_section(cpu, full->xlat_section, full->attrs);
+            data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
         } else {
             data->is_io = false;
             data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
@@ -1842,7 +1844,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
 
     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
         notdirty_write(env_cpu(env), addr, size,
-                       &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
+                       &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
     }
 
     return hostaddr;
@@ -1950,7 +1952,7 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
 
     /* Handle anything that isn't just a straight memory access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
-        CPUIOTLBEntry *iotlbentry;
+        CPUTLBEntryFull *full;
         bool need_swap;
 
         /* For anything that is unaligned, recurse through full_load.  */
@@ -1958,20 +1960,20 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
             goto do_unaligned_access;
         }
 
-        iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+        full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
 
         /* Handle watchpoints.  */
         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
             /* On watchpoint hit, this will longjmp out.  */
             cpu_check_watchpoint(env_cpu(env), addr, size,
-                                 iotlbentry->attrs, BP_MEM_READ, retaddr);
+                                 full->attrs, BP_MEM_READ, retaddr);
         }
 
         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
 
         /* Handle I/O access.  */
         if (likely(tlb_addr & TLB_MMIO)) {
-            return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
+            return io_readx(env, full, mmu_idx, addr, retaddr,
                             access_type, op ^ (need_swap * MO_BSWAP));
         }
 
@@ -2286,12 +2288,12 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
      */
     if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
         cpu_check_watchpoint(env_cpu(env), addr, size - size2,
-                             env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
+                             env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
                              BP_MEM_WRITE, retaddr);
     }
     if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
         cpu_check_watchpoint(env_cpu(env), page2, size2,
-                             env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
+                             env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
                              BP_MEM_WRITE, retaddr);
     }
 
@@ -2355,7 +2357,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
 
     /* Handle anything that isn't just a straight memory access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
-        CPUIOTLBEntry *iotlbentry;
+        CPUTLBEntryFull *full;
         bool need_swap;
 
         /* For anything that is unaligned, recurse through byte stores.  */
@@ -2363,20 +2365,20 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
             goto do_unaligned_access;
         }
 
-        iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+        full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
 
         /* Handle watchpoints.  */
         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
             /* On watchpoint hit, this will longjmp out.  */
             cpu_check_watchpoint(env_cpu(env), addr, size,
-                                 iotlbentry->attrs, BP_MEM_WRITE, retaddr);
+                                 full->attrs, BP_MEM_WRITE, retaddr);
         }
 
         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
 
         /* Handle I/O access.  */
         if (tlb_addr & TLB_MMIO) {
-            io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
+            io_writex(env, full, mmu_idx, val, addr, retaddr,
                       op ^ (need_swap * MO_BSWAP));
             return;
         }
@@ -2388,7 +2390,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
 
         /* Handle clean RAM pages.  */
         if (tlb_addr & TLB_NOTDIRTY) {
-            notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
+            notdirty_write(env_cpu(env), addr, size, full, retaddr);
         }
 
         haddr = (void *)((uintptr_t)addr + entry->addend);
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index d11a8c70d0..fdd23ab3f8 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -106,7 +106,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
     return tags + index;
 #else
     uintptr_t index;
-    CPUIOTLBEntry *iotlbentry;
+    CPUTLBEntryFull *full;
     int in_page, flags;
     ram_addr_t ptr_ra;
     hwaddr ptr_paddr, tag_paddr, xlat;
@@ -129,7 +129,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
     assert(!(flags & TLB_INVALID_MASK));
 
     /*
-     * Find the iotlbentry for ptr.  This *must* be present in the TLB
+     * Find the CPUTLBEntryFull for ptr.  This *must* be present in the TLB
      * because we just found the mapping.
      * TODO: Perhaps there should be a cputlb helper that returns a
      * matching tlb entry + iotlb entry.
@@ -144,10 +144,10 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
         g_assert(tlb_hit(comparator, ptr));
     }
 # endif
-    iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index];
+    full = &env_tlb(env)->d[ptr_mmu_idx].fulltlb[index];
 
     /* If the virtual page MemAttr != Tagged, access unchecked. */
-    if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) {
+    if (!arm_tlb_mte_tagged(&full->attrs)) {
         return NULL;
     }
 
@@ -181,7 +181,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
         int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
         assert(ra != 0);
         cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
-                             iotlbentry->attrs, wp, ra);
+                             full->attrs, wp, ra);
     }
 
     /*
@@ -202,11 +202,11 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
     tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
 
     /* Look up the address in tag space. */
-    tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
+    tag_asi = full->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
     tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
     mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
                                  tag_access == MMU_DATA_STORE,
-                                 iotlbentry->attrs);
+                                 full->attrs);
 
     /*
      * Note that @mr will never be NULL.  If there is nothing in the address
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index d6f7ef94fe..9cae8fd352 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -5384,8 +5384,8 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
         g_assert(tlb_hit(comparator, addr));
 # endif
 
-        CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
-        info->attrs = iotlbentry->attrs;
+        CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+        info->attrs = full->attrs;
     }
 #endif
 
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 1b593ada36..305044a141 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14626,7 +14626,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
      * table entry even for that case.
      */
     return (tlb_hit(entry->addr_code, addr) &&
-            arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
+            arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].fulltlb[index].attrs));
 #endif
 }
 
-- 
2.34.1



  parent reply	other threads:[~2022-08-22 16:08 UTC|newest]

Thread overview: 106+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-22 15:26 [PATCH v2 00/66] target/arm: Implement FEAT_HAFDBS Richard Henderson
2022-08-22 15:26 ` [PATCH v2 01/66] target/arm: Create GetPhysAddrResult Richard Henderson
2022-09-20 13:50   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 02/66] target/arm: Fix ipa_secure in get_phys_addr Richard Henderson
2022-09-20 14:21   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 03/66] target/arm: Use GetPhysAddrResult in get_phys_addr_lpae Richard Henderson
2022-09-20 14:24   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 04/66] target/arm: Use GetPhysAddrResult in get_phys_addr_v6 Richard Henderson
2022-09-20 14:25   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 05/66] target/arm: Use GetPhysAddrResult in get_phys_addr_v5 Richard Henderson
2022-09-20 14:25   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 06/66] target/arm: Use GetPhysAddrResult in get_phys_addr_pmsav5 Richard Henderson
2022-09-20 14:26   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 07/66] target/arm: Use GetPhysAddrResult in get_phys_addr_pmsav7 Richard Henderson
2022-09-20 14:26   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 08/66] target/arm: Use GetPhysAddrResult in get_phys_addr_pmsav8 Richard Henderson
2022-09-20 14:28   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 09/66] target/arm: Use GetPhysAddrResult in pmsav8_mpu_lookup Richard Henderson
2022-09-20 14:30   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 10/66] target/arm: Remove is_subpage argument to pmsav8_mpu_lookup Richard Henderson
2022-09-20 14:35   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 11/66] target/arm: Add is_secure parameter to v8m_security_lookup Richard Henderson
2022-09-20 14:42   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 12/66] target/arm: Add secure parameter to pmsav8_mpu_lookup Richard Henderson
2022-09-20 14:43   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 13/66] target/arm: Add is_secure parameter to get_phys_addr_v5 Richard Henderson
2022-09-20 14:44   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 14/66] target/arm: Add is_secure parameter to get_phys_addr_v6 Richard Henderson
2022-09-20 14:45   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 15/66] target/arm: Add secure parameter to get_phys_addr_pmsav8 Richard Henderson
2022-09-20 14:46   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 16/66] target/arm: Add is_secure parameter to pmsav7_use_background_region Richard Henderson
2022-09-20 14:46   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 17/66] target/arm: Add is_secure parameter to get_phys_addr_lpae Richard Henderson
2022-09-20 14:52   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 18/66] target/arm: Add secure parameter to get_phys_addr_pmsav7 Richard Henderson
2022-09-20 15:15   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 19/66] target/arm: Add is_secure parameter to regime_translation_disabled Richard Henderson
2022-09-20 15:17   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 20/66] target/arm: Add is_secure parameter to get_phys_addr_pmsav5 Richard Henderson
2022-09-20 15:18   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 21/66] target/arm: Split out get_phys_addr_with_secure Richard Henderson
2022-09-20 15:19   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 22/66] target/arm: Add is_secure parameter to v7m_read_half_insn Richard Henderson
2022-09-20 15:22   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 23/66] target/arm: Add TBFLAG_M32.SECURE Richard Henderson
2022-09-20 15:24   ` Peter Maydell
2022-08-22 15:26 ` [PATCH v2 24/66] target/arm: Merge regime_is_secure into get_phys_addr Richard Henderson
2022-09-20 15:25   ` Peter Maydell
2022-08-22 15:27 ` [PATCH v2 25/66] target/arm: Add is_secure parameter to do_ats_write Richard Henderson
2022-09-20 15:33   ` Peter Maydell
2022-08-22 15:27 ` [PATCH v2 26/66] target/arm: Fold secure and non-secure a-profile mmu indexes Richard Henderson
2022-09-20 15:44   ` Peter Maydell
2022-08-22 15:27 ` [PATCH v2 27/66] target/arm: Reorg regime_translation_disabled Richard Henderson
2022-09-20 15:46   ` Peter Maydell
2022-08-22 15:27 ` [PATCH v2 28/66] target/arm: Drop secure check for HCR.TGE vs SCTLR_EL1.M Richard Henderson
2022-09-20 15:49   ` Peter Maydell
2022-08-22 15:27 ` [PATCH v2 29/66] target/arm: Introduce arm_hcr_el2_eff_secstate Richard Henderson
2022-09-20 15:52   ` Peter Maydell
2022-09-28 19:38     ` Richard Henderson
2022-08-22 15:27 ` [PATCH v2 30/66] target/arm: Hoist read of *is_secure in S1_ptw_translate Richard Henderson
2022-09-20 15:53   ` Peter Maydell
2022-08-22 15:27 ` [PATCH v2 31/66] target/arm: Fix S2 disabled check " Richard Henderson
2022-09-20 16:01   ` Peter Maydell
2022-09-28 23:31     ` Richard Henderson
2022-08-22 15:27 ` [PATCH v2 32/66] target/arm: Remove env argument from combined_attrs_fwb Richard Henderson
2022-09-20 16:05   ` Peter Maydell
2022-08-22 15:27 ` [PATCH v2 33/66] target/arm: Pass HCR to attribute subroutines Richard Henderson
2022-09-20 16:07   ` Peter Maydell
2022-08-22 15:27 ` [PATCH v2 34/66] target/arm: Fix ATS12NSO* from S PL1 Richard Henderson
2022-09-20 16:09   ` Peter Maydell
2022-08-22 15:27 ` [PATCH v2 35/66] target/arm: Split out get_phys_addr_disabled Richard Henderson
2022-09-20 16:11   ` Peter Maydell
2022-08-22 15:27 ` [PATCH v2 36/66] target/arm: Reorg get_phys_addr_disabled Richard Henderson
2022-09-20 16:21   ` Peter Maydell
2022-08-22 15:27 ` Richard Henderson [this message]
2022-08-22 15:27 ` [PATCH v2 38/66] accel/tcg: Drop addr member from SavedIOTLB Richard Henderson
2022-08-22 15:27 ` [PATCH v2 39/66] accel/tcg: Suppress auto-invalidate in probe_access_internal Richard Henderson
2022-08-22 15:27 ` [PATCH v2 40/66] accel/tcg: Introduce probe_access_full Richard Henderson
2022-08-22 15:27 ` [PATCH v2 41/66] accel/tcg: Introduce tlb_set_page_full Richard Henderson
2022-08-22 15:27 ` [PATCH v2 42/66] target/arm: Use tlb_set_page_full Richard Henderson
2022-08-22 15:27 ` [PATCH v2 43/66] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA Richard Henderson
2022-08-22 15:27 ` [PATCH v2 44/66] target/arm: Enable TARGET_PAGE_ENTRY_EXTRA Richard Henderson
2022-08-22 15:27 ` [PATCH v2 45/66] target/arm: Use probe_access_full for MTE Richard Henderson
2022-08-22 15:27 ` [PATCH v2 46/66] target/arm: Use probe_access_full for BTI Richard Henderson
2022-08-22 15:27 ` [PATCH v2 47/66] include/exec: Remove target_tlb_bitN from MemTxAttrs Richard Henderson
2022-08-22 15:27 ` [PATCH v2 48/66] target/arm: Add ARMMMUIdx_Phys_{S,NS} Richard Henderson
2022-08-22 15:27 ` [PATCH v2 49/66] target/arm: Move ARMMMUIdx_Stage2 to a real tlb mmu_idx Richard Henderson
2022-08-22 15:27 ` [PATCH v2 50/66] target/arm: Use softmmu tlbs for page table walking Richard Henderson
2022-08-22 15:27 ` [PATCH v2 51/66] target/arm: Hoist check for disabled stage2 translation Richard Henderson
2022-08-22 15:27 ` [PATCH v2 52/66] target/arm: Split out get_phys_addr_twostage Richard Henderson
2022-08-22 15:27 ` [PATCH v2 53/66] target/arm: Use bool consistently for get_phys_addr subroutines Richard Henderson
2022-08-22 15:27 ` [PATCH v2 54/66] target/arm: Only use ARMMMUIdx_Stage1* for two-stage translation Richard Henderson
2022-08-22 15:27 ` [PATCH v2 55/66] target/arm: Add ptw_idx argument to S1_ptw_translate Richard Henderson
2022-08-22 15:27 ` [PATCH v2 56/66] target/arm: Add isar predicates for FEAT_HAFDBS Richard Henderson
2022-08-22 15:27 ` [PATCH v2 57/66] target/arm: Extract HA and HD in aa64_va_parameters Richard Henderson
2022-08-22 15:27 ` [PATCH v2 58/66] target/arm: Split out S1TranslateResult type Richard Henderson
2022-08-22 15:27 ` [PATCH v2 59/66] target/arm: Move be test for regime into S1TranslateResult Richard Henderson
2022-08-22 15:27 ` [PATCH v2 60/66] target/arm: Move S1_ptw_translate outside arm_ld[lq]_ptw Richard Henderson
2022-08-22 15:27 ` [PATCH v2 61/66] target/arm: Add ARMFault_UnsuppAtomicUpdate Richard Henderson
2022-08-22 15:27 ` [PATCH v2 62/66] target/arm: Remove loop from get_phys_addr_lpae Richard Henderson
2022-08-22 15:27 ` [PATCH v2 63/66] target/arm: Fix fault reporting in get_phys_addr_lpae Richard Henderson
2022-08-22 15:27 ` [PATCH v2 64/66] target/arm: Don't shift attrs " Richard Henderson
2022-08-22 15:27 ` [PATCH v2 65/66] target/arm: Consider GP an attribute " Richard Henderson
2022-08-22 15:27 ` [PATCH v2 66/66] target/arm: Implement FEAT_HAFDBS Richard Henderson
2022-09-22 10:56 ` [PATCH v2 00/66] " Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220822152741.1617527-38-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).