* [PATCH v3 0/6] tcg: Introduce CPUTLBEntryFull
@ 2022-09-05 20:22 Richard Henderson
2022-09-05 20:22 ` [PATCH v3 1/6] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull Richard Henderson
` (5 more replies)
0 siblings, 6 replies; 19+ messages in thread
From: Richard Henderson @ 2022-09-05 20:22 UTC (permalink / raw)
To: qemu-devel; +Cc: f4bug, qemu-arm, pbonzini
This is split out of two patch sets that I have in flight
that allow atomic updates of guest page tables.
v3 fixes some trivial conflicts with the current tcg-next PR:
https://patchew.org/QEMU/20220904002317.60158-1-richard.henderson@linaro.org/
r~
Richard Henderson (6):
accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull
accel/tcg: Drop addr member from SavedIOTLB
accel/tcg: Suppress auto-invalidate in probe_access_internal
accel/tcg: Introduce probe_access_full
accel/tcg: Introduce tlb_set_page_full
include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA
include/exec/cpu-defs.h | 45 +++++--
include/exec/exec-all.h | 33 +++++
include/hw/core/cpu.h | 1 -
accel/tcg/cputlb.c | 218 ++++++++++++++++++++--------------
target/arm/mte_helper.c | 14 +--
target/arm/sve_helper.c | 4 +-
target/arm/translate-a64.c | 2 +-
target/s390x/tcg/mem_helper.c | 4 -
8 files changed, 207 insertions(+), 114 deletions(-)
--
2.34.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v3 1/6] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull
2022-09-05 20:22 [PATCH v3 0/6] tcg: Introduce CPUTLBEntryFull Richard Henderson
@ 2022-09-05 20:22 ` Richard Henderson
2022-09-05 21:07 ` Philippe Mathieu-Daudé via
2022-09-20 10:42 ` Peter Maydell
2022-09-05 20:22 ` [PATCH v3 2/6] accel/tcg: Drop addr member from SavedIOTLB Richard Henderson
` (4 subsequent siblings)
5 siblings, 2 replies; 19+ messages in thread
From: Richard Henderson @ 2022-09-05 20:22 UTC (permalink / raw)
To: qemu-devel; +Cc: f4bug, qemu-arm, pbonzini
This structure will shortly contain more than just
data for accessing MMIO. Rename the 'addr' member
to 'xlat_section' to more clearly indicate its purpose.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-defs.h | 22 ++++----
accel/tcg/cputlb.c | 102 +++++++++++++++++++------------------
target/arm/mte_helper.c | 14 ++---
target/arm/sve_helper.c | 4 +-
target/arm/translate-a64.c | 2 +-
5 files changed, 73 insertions(+), 71 deletions(-)
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index ba3cd32a1e..f70f54d850 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -108,6 +108,7 @@ typedef uint64_t target_ulong;
# endif
# endif
+/* Minimalized TLB entry for use by TCG fast path. */
typedef struct CPUTLBEntry {
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
@@ -131,14 +132,14 @@ typedef struct CPUTLBEntry {
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
-/* The IOTLB is not accessed directly inline by generated TCG code,
- * so the CPUIOTLBEntry layout is not as critical as that of the
- * CPUTLBEntry. (This is also why we don't want to combine the two
- * structs into one.)
+/*
+ * The full TLB entry, which is not accessed by generated TCG code,
+ * so the layout is not as critical as that of CPUTLBEntry. This is
+ * also why we don't want to combine the two structs.
*/
-typedef struct CPUIOTLBEntry {
+typedef struct CPUTLBEntryFull {
/*
- * @addr contains:
+ * @xlat_section contains:
* - in the lower TARGET_PAGE_BITS, a physical section number
* - with the lower TARGET_PAGE_BITS masked off, an offset which
* must be added to the virtual address to obtain:
@@ -146,9 +147,9 @@ typedef struct CPUIOTLBEntry {
* number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
* + the offset within the target MemoryRegion (otherwise)
*/
- hwaddr addr;
+ hwaddr xlat_section;
MemTxAttrs attrs;
-} CPUIOTLBEntry;
+} CPUTLBEntryFull;
/*
* Data elements that are per MMU mode, minus the bits accessed by
@@ -172,9 +173,8 @@ typedef struct CPUTLBDesc {
size_t vindex;
/* The tlb victim table, in two parts. */
CPUTLBEntry vtable[CPU_VTLB_SIZE];
- CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
- /* The iotlb. */
- CPUIOTLBEntry *iotlb;
+ CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
+ CPUTLBEntryFull *fulltlb;
} CPUTLBDesc;
/*
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 8fad2d9b83..4585d7c015 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -200,13 +200,13 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
}
g_free(fast->table);
- g_free(desc->iotlb);
+ g_free(desc->fulltlb);
tlb_window_reset(desc, now, 0);
/* desc->n_used_entries is cleared by the caller */
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
fast->table = g_try_new(CPUTLBEntry, new_size);
- desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
+ desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
/*
* If the allocations fail, try smaller sizes. We just freed some
@@ -215,7 +215,7 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
* allocations to fail though, so we progressively reduce the allocation
* size, aborting if we cannot even allocate the smallest TLB we support.
*/
- while (fast->table == NULL || desc->iotlb == NULL) {
+ while (fast->table == NULL || desc->fulltlb == NULL) {
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
error_report("%s: %s", __func__, strerror(errno));
abort();
@@ -224,9 +224,9 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
g_free(fast->table);
- g_free(desc->iotlb);
+ g_free(desc->fulltlb);
fast->table = g_try_new(CPUTLBEntry, new_size);
- desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
+ desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
}
}
@@ -258,7 +258,7 @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
desc->n_used_entries = 0;
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
fast->table = g_new(CPUTLBEntry, n_entries);
- desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
+ desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
tlb_mmu_flush_locked(desc, fast);
}
@@ -299,7 +299,7 @@ void tlb_destroy(CPUState *cpu)
CPUTLBDescFast *fast = &env_tlb(env)->f[i];
g_free(fast->table);
- g_free(desc->iotlb);
+ g_free(desc->fulltlb);
}
}
@@ -1219,7 +1219,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
/* Evict the old entry into the victim tlb. */
copy_tlb_helper_locked(tv, te);
- desc->viotlb[vidx] = desc->iotlb[index];
+ desc->vfulltlb[vidx] = desc->fulltlb[index];
tlb_n_used_entries_dec(env, mmu_idx);
}
@@ -1236,8 +1236,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
* subtract here is that of the page base, and not the same as the
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
*/
- desc->iotlb[index].addr = iotlb - vaddr_page;
- desc->iotlb[index].attrs = attrs;
+ desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
+ desc->fulltlb[index].attrs = attrs;
/* Now calculate the new entry */
tn.addend = addend - vaddr_page;
@@ -1329,7 +1329,7 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
}
}
-static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
+static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
int mmu_idx, target_ulong addr, uintptr_t retaddr,
MMUAccessType access_type, MemOp op)
{
@@ -1341,9 +1341,9 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false;
MemTxResult r;
- section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
+ section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
mr = section->mr;
- mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
+ mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
if (!cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr);
@@ -1353,14 +1353,14 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
qemu_mutex_lock_iothread();
locked = true;
}
- r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
+ r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
if (r != MEMTX_OK) {
hwaddr physaddr = mr_offset +
section->offset_within_address_space -
section->offset_within_region;
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
- mmu_idx, iotlbentry->attrs, r, retaddr);
+ mmu_idx, full->attrs, r, retaddr);
}
if (locked) {
qemu_mutex_unlock_iothread();
@@ -1370,8 +1370,8 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
}
/*
- * Save a potentially trashed IOTLB entry for later lookup by plugin.
- * This is read by tlb_plugin_lookup if the iotlb entry doesn't match
+ * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
+ * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
* because of the side effect of io_writex changing memory layout.
*/
static void save_iotlb_data(CPUState *cs, hwaddr addr,
@@ -1385,7 +1385,7 @@ static void save_iotlb_data(CPUState *cs, hwaddr addr,
#endif
}
-static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
+static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
int mmu_idx, uint64_t val, target_ulong addr,
uintptr_t retaddr, MemOp op)
{
@@ -1396,9 +1396,9 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false;
MemTxResult r;
- section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
+ section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
mr = section->mr;
- mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
+ mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
if (!cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr);
}
@@ -1408,20 +1408,20 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
* The memory_region_dispatch may trigger a flush/resize
* so for plugins we save the iotlb_data just in case.
*/
- save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
+ save_iotlb_data(cpu, full->xlat_section, section, mr_offset);
if (!qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread();
locked = true;
}
- r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
+ r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
if (r != MEMTX_OK) {
hwaddr physaddr = mr_offset +
section->offset_within_address_space -
section->offset_within_region;
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
- MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
+ MMU_DATA_STORE, mmu_idx, full->attrs, r,
retaddr);
}
if (locked) {
@@ -1468,9 +1468,10 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
copy_tlb_helper_locked(vtlb, &tmptlb);
qemu_spin_unlock(&env_tlb(env)->c.lock);
- CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
- CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
- tmpio = *io; *io = *vio; *vio = tmpio;
+ CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
+ CPUTLBEntryFull tmpf;
+ tmpf = *f1; *f1 = *f2; *f2 = tmpf;
return true;
}
}
@@ -1483,9 +1484,9 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
(ADDR) & TARGET_PAGE_MASK)
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
- CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
+ CPUTLBEntryFull *full, uintptr_t retaddr)
{
- ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
+ ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
@@ -1578,9 +1579,9 @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
- notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
+ notdirty_write(env_cpu(env), addr, 1, full, retaddr);
flags &= ~TLB_NOTDIRTY;
}
@@ -1605,19 +1606,19 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
/* Handle watchpoints. */
if (flags & TLB_WATCHPOINT) {
int wp_access = (access_type == MMU_DATA_STORE
? BP_MEM_WRITE : BP_MEM_READ);
cpu_check_watchpoint(env_cpu(env), addr, size,
- iotlbentry->attrs, wp_access, retaddr);
+ full->attrs, wp_access, retaddr);
}
/* Handle clean RAM pages. */
if (flags & TLB_NOTDIRTY) {
- notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
+ notdirty_write(env_cpu(env), addr, 1, full, retaddr);
}
}
@@ -1674,7 +1675,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
* should have just filled the TLB. The one corner case is io_writex
* which can cause TLB flushes and potential resizing of the TLBs
* losing the information we need. In those cases we need to recover
- * data from a copy of the iotlbentry. As long as this always occurs
+ * data from a copy of the CPUTLBEntryFull. As long as this always occurs
* from the same thread (which a mem callback will be) this is safe.
*/
@@ -1689,11 +1690,12 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
if (likely(tlb_hit(tlb_addr, addr))) {
/* We must have an iotlb entry for MMIO */
if (tlb_addr & TLB_MMIO) {
- CPUIOTLBEntry *iotlbentry;
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+ CPUTLBEntryFull *full;
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
data->is_io = true;
- data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
- data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
+ data->v.io.section =
+ iotlb_to_section(cpu, full->xlat_section, full->attrs);
+ data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
} else {
data->is_io = false;
data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
@@ -1801,7 +1803,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
notdirty_write(env_cpu(env), addr, size,
- &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
+ &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
}
return hostaddr;
@@ -1909,7 +1911,7 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- CPUIOTLBEntry *iotlbentry;
+ CPUTLBEntryFull *full;
bool need_swap;
/* For anything that is unaligned, recurse through full_load. */
@@ -1917,20 +1919,20 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
goto do_unaligned_access;
}
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
/* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
- iotlbentry->attrs, BP_MEM_READ, retaddr);
+ full->attrs, BP_MEM_READ, retaddr);
}
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
/* Handle I/O access. */
if (likely(tlb_addr & TLB_MMIO)) {
- return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
+ return io_readx(env, full, mmu_idx, addr, retaddr,
access_type, op ^ (need_swap * MO_BSWAP));
}
@@ -2245,12 +2247,12 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
*/
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
cpu_check_watchpoint(env_cpu(env), addr, size - size2,
- env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
+ env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
BP_MEM_WRITE, retaddr);
}
if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
cpu_check_watchpoint(env_cpu(env), page2, size2,
- env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
+ env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
BP_MEM_WRITE, retaddr);
}
@@ -2314,7 +2316,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- CPUIOTLBEntry *iotlbentry;
+ CPUTLBEntryFull *full;
bool need_swap;
/* For anything that is unaligned, recurse through byte stores. */
@@ -2322,20 +2324,20 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
goto do_unaligned_access;
}
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
/* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
- iotlbentry->attrs, BP_MEM_WRITE, retaddr);
+ full->attrs, BP_MEM_WRITE, retaddr);
}
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
/* Handle I/O access. */
if (tlb_addr & TLB_MMIO) {
- io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
+ io_writex(env, full, mmu_idx, val, addr, retaddr,
op ^ (need_swap * MO_BSWAP));
return;
}
@@ -2347,7 +2349,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Handle clean RAM pages. */
if (tlb_addr & TLB_NOTDIRTY) {
- notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
+ notdirty_write(env_cpu(env), addr, size, full, retaddr);
}
haddr = (void *)((uintptr_t)addr + entry->addend);
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index d11a8c70d0..fdd23ab3f8 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -106,7 +106,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
return tags + index;
#else
uintptr_t index;
- CPUIOTLBEntry *iotlbentry;
+ CPUTLBEntryFull *full;
int in_page, flags;
ram_addr_t ptr_ra;
hwaddr ptr_paddr, tag_paddr, xlat;
@@ -129,7 +129,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
assert(!(flags & TLB_INVALID_MASK));
/*
- * Find the iotlbentry for ptr. This *must* be present in the TLB
+ * Find the CPUTLBEntryFull for ptr. This *must* be present in the TLB
* because we just found the mapping.
* TODO: Perhaps there should be a cputlb helper that returns a
* matching tlb entry + iotlb entry.
@@ -144,10 +144,10 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
g_assert(tlb_hit(comparator, ptr));
}
# endif
- iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index];
+ full = &env_tlb(env)->d[ptr_mmu_idx].fulltlb[index];
/* If the virtual page MemAttr != Tagged, access unchecked. */
- if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) {
+ if (!arm_tlb_mte_tagged(&full->attrs)) {
return NULL;
}
@@ -181,7 +181,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
assert(ra != 0);
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
- iotlbentry->attrs, wp, ra);
+ full->attrs, wp, ra);
}
/*
@@ -202,11 +202,11 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
/* Look up the address in tag space. */
- tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
+ tag_asi = full->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
tag_access == MMU_DATA_STORE,
- iotlbentry->attrs);
+ full->attrs);
/*
* Note that @mr will never be NULL. If there is nothing in the address
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index d6f7ef94fe..9cae8fd352 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -5384,8 +5384,8 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
g_assert(tlb_hit(comparator, addr));
# endif
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
- info->attrs = iotlbentry->attrs;
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ info->attrs = full->attrs;
}
#endif
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 163df8c615..b7787e7786 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14634,7 +14634,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
* table entry even for that case.
*/
return (tlb_hit(entry->addr_code, addr) &&
- arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
+ arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].fulltlb[index].attrs));
#endif
}
--
2.34.1
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH v3 2/6] accel/tcg: Drop addr member from SavedIOTLB
2022-09-05 20:22 [PATCH v3 0/6] tcg: Introduce CPUTLBEntryFull Richard Henderson
2022-09-05 20:22 ` [PATCH v3 1/6] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull Richard Henderson
@ 2022-09-05 20:22 ` Richard Henderson
2022-09-05 21:07 ` Philippe Mathieu-Daudé via
2022-09-20 10:43 ` Peter Maydell
2022-09-05 20:22 ` [PATCH v3 3/6] accel/tcg: Suppress auto-invalidate in probe_access_internal Richard Henderson
` (3 subsequent siblings)
5 siblings, 2 replies; 19+ messages in thread
From: Richard Henderson @ 2022-09-05 20:22 UTC (permalink / raw)
To: qemu-devel; +Cc: f4bug, qemu-arm, pbonzini
This field is only written, not read; remove it.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/hw/core/cpu.h | 1 -
accel/tcg/cputlb.c | 7 +++----
2 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 500503da13..9e47184513 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -218,7 +218,6 @@ struct CPUWatchpoint {
* the memory regions get moved around by io_writex.
*/
typedef struct SavedIOTLB {
- hwaddr addr;
MemoryRegionSection *section;
hwaddr mr_offset;
} SavedIOTLB;
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 4585d7c015..03395e725d 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1374,12 +1374,11 @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
* This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
* because of the side effect of io_writex changing memory layout.
*/
-static void save_iotlb_data(CPUState *cs, hwaddr addr,
- MemoryRegionSection *section, hwaddr mr_offset)
+static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
+ hwaddr mr_offset)
{
#ifdef CONFIG_PLUGIN
SavedIOTLB *saved = &cs->saved_iotlb;
- saved->addr = addr;
saved->section = section;
saved->mr_offset = mr_offset;
#endif
@@ -1408,7 +1407,7 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
* The memory_region_dispatch may trigger a flush/resize
* so for plugins we save the iotlb_data just in case.
*/
- save_iotlb_data(cpu, full->xlat_section, section, mr_offset);
+ save_iotlb_data(cpu, section, mr_offset);
if (!qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread();
--
2.34.1
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH v3 3/6] accel/tcg: Suppress auto-invalidate in probe_access_internal
2022-09-05 20:22 [PATCH v3 0/6] tcg: Introduce CPUTLBEntryFull Richard Henderson
2022-09-05 20:22 ` [PATCH v3 1/6] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull Richard Henderson
2022-09-05 20:22 ` [PATCH v3 2/6] accel/tcg: Drop addr member from SavedIOTLB Richard Henderson
@ 2022-09-05 20:22 ` Richard Henderson
2022-09-20 11:21 ` Peter Maydell
2022-09-05 20:22 ` [PATCH v3 4/6] accel/tcg: Introduce probe_access_full Richard Henderson
` (2 subsequent siblings)
5 siblings, 1 reply; 19+ messages in thread
From: Richard Henderson @ 2022-09-05 20:22 UTC (permalink / raw)
To: qemu-devel; +Cc: f4bug, qemu-arm, pbonzini, David Hildenbrand
When PAGE_WRITE_INV is set when calling tlb_set_page,
we immediately set TLB_INVALID_MASK in order to force
tlb_fill to be called on the next lookup. Here in
probe_access_internal, we have just called tlb_fill
and eliminated true misses, thus the lookup must be valid.
This allows us to remove a warning comment from s390x.
There doesn't seem to be a reason to change the code though.
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/cputlb.c | 10 +++++++++-
target/s390x/tcg/mem_helper.c | 4 ----
2 files changed, 9 insertions(+), 5 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 03395e725d..91f2b53142 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1535,6 +1535,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
}
tlb_addr = tlb_read_ofs(entry, elt_ofs);
+ flags = TLB_FLAGS_MASK;
page_addr = addr & TARGET_PAGE_MASK;
if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
@@ -1550,10 +1551,17 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
/* TLB resize via tlb_fill may have moved the entry. */
entry = tlb_entry(env, mmu_idx, addr);
+
+ /*
+ * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
+ * to force the next access through tlb_fill. We've just
+ * called tlb_fill, so we know that this entry *is* valid.
+ */
+ flags &= ~TLB_INVALID_MASK;
}
tlb_addr = tlb_read_ofs(entry, elt_ofs);
}
- flags = tlb_addr & TLB_FLAGS_MASK;
+ flags &= tlb_addr;
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
index fc52aa128b..3758b9e688 100644
--- a/target/s390x/tcg/mem_helper.c
+++ b/target/s390x/tcg/mem_helper.c
@@ -148,10 +148,6 @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
#else
int flags;
- /*
- * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL
- * to detect if there was an exception during tlb_fill().
- */
env->tlb_fill_exc = 0;
flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost,
ra);
--
2.34.1
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH v3 4/6] accel/tcg: Introduce probe_access_full
2022-09-05 20:22 [PATCH v3 0/6] tcg: Introduce CPUTLBEntryFull Richard Henderson
` (2 preceding siblings ...)
2022-09-05 20:22 ` [PATCH v3 3/6] accel/tcg: Suppress auto-invalidate in probe_access_internal Richard Henderson
@ 2022-09-05 20:22 ` Richard Henderson
2022-09-05 21:10 ` Philippe Mathieu-Daudé via
2022-09-20 10:46 ` Peter Maydell
2022-09-05 20:22 ` [PATCH v3 5/6] accel/tcg: Introduce tlb_set_page_full Richard Henderson
2022-09-05 20:22 ` [PATCH v3 6/6] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA Richard Henderson
5 siblings, 2 replies; 19+ messages in thread
From: Richard Henderson @ 2022-09-05 20:22 UTC (permalink / raw)
To: qemu-devel; +Cc: f4bug, qemu-arm, pbonzini
Add an interface to return the CPUTLBEntryFull struct
that goes with the lookup. The result is not intended
to be valid across multiple lookups, so the user must
use the results immediately.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/exec-all.h | 11 ++++++++++
accel/tcg/cputlb.c | 47 +++++++++++++++++++++++++----------------
2 files changed, 40 insertions(+), 18 deletions(-)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index bcad607c4e..758cf6bcc7 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -434,6 +434,17 @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr);
+#ifndef CONFIG_USER_ONLY
+/**
+ * probe_access_full:
+ * Like probe_access_flags, except also return into @pfull.
+ */
+int probe_access_full(CPUArchState *env, target_ulong addr,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost,
+ CPUTLBEntryFull **pfull, uintptr_t retaddr);
+#endif
+
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
/* Estimated block size for TB allocation. */
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 91f2b53142..62159549f6 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1512,7 +1512,8 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
static int probe_access_internal(CPUArchState *env, target_ulong addr,
int fault_size, MMUAccessType access_type,
int mmu_idx, bool nonfault,
- void **phost, uintptr_t retaddr)
+ void **phost, CPUTLBEntryFull **pfull,
+ uintptr_t retaddr)
{
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
@@ -1546,10 +1547,12 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
mmu_idx, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
+ *pfull = NULL;
return TLB_INVALID_MASK;
}
/* TLB resize via tlb_fill may have moved the entry. */
+ index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
/*
@@ -1563,6 +1566,8 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
}
flags &= tlb_addr;
+ *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
*phost = NULL;
@@ -1574,37 +1579,44 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
return flags;
}
-int probe_access_flags(CPUArchState *env, target_ulong addr,
- MMUAccessType access_type, int mmu_idx,
- bool nonfault, void **phost, uintptr_t retaddr)
+int probe_access_full(CPUArchState *env, target_ulong addr,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost, CPUTLBEntryFull **pfull,
+ uintptr_t retaddr)
{
- int flags;
-
- flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
- nonfault, phost, retaddr);
+ int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
+ nonfault, phost, pfull, retaddr);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
-
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
+ notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
flags &= ~TLB_NOTDIRTY;
}
return flags;
}
+int probe_access_flags(CPUArchState *env, target_ulong addr,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost, uintptr_t retaddr)
+{
+ CPUTLBEntryFull *full;
+
+ return probe_access_full(env, addr, access_type, mmu_idx,
+ nonfault, phost, &full, retaddr);
+}
+
void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
+ CPUTLBEntryFull *full;
void *host;
int flags;
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- false, &host, retaddr);
+ false, &host, &full, retaddr);
/* Per the interface, size == 0 merely faults the access. */
if (size == 0) {
@@ -1612,9 +1624,6 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
}
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
-
/* Handle watchpoints. */
if (flags & TLB_WATCHPOINT) {
int wp_access = (access_type == MMU_DATA_STORE
@@ -1635,11 +1644,12 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
MMUAccessType access_type, int mmu_idx)
{
+ CPUTLBEntryFull *full;
void *host;
int flags;
flags = probe_access_internal(env, addr, 0, access_type,
- mmu_idx, true, &host, 0);
+ mmu_idx, true, &host, &full, 0);
/* No combination of flags are expected by the caller. */
return flags ? NULL : host;
@@ -1658,10 +1668,11 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp)
{
+ CPUTLBEntryFull *full;
void *p;
(void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
- cpu_mmu_index(env, true), false, &p, 0);
+ cpu_mmu_index(env, true), false, &p, &full, 0);
if (p == NULL) {
return -1;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH v3 5/6] accel/tcg: Introduce tlb_set_page_full
2022-09-05 20:22 [PATCH v3 0/6] tcg: Introduce CPUTLBEntryFull Richard Henderson
` (3 preceding siblings ...)
2022-09-05 20:22 ` [PATCH v3 4/6] accel/tcg: Introduce probe_access_full Richard Henderson
@ 2022-09-05 20:22 ` Richard Henderson
2022-09-05 21:20 ` Philippe Mathieu-Daudé via
2022-09-20 10:59 ` Peter Maydell
2022-09-05 20:22 ` [PATCH v3 6/6] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA Richard Henderson
5 siblings, 2 replies; 19+ messages in thread
From: Richard Henderson @ 2022-09-05 20:22 UTC (permalink / raw)
To: qemu-devel; +Cc: f4bug, qemu-arm, pbonzini
Now that we have collected all of the page data into
CPUTLBEntryFull, provide an interface to record that
all in one go, instead of using 4 arguments. This interface
allows CPUTLBEntryFull to be extended without having to
change the number of arguments.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-defs.h | 14 ++++++++++
include/exec/exec-all.h | 22 +++++++++++++++
accel/tcg/cputlb.c | 62 ++++++++++++++++++++++++++++-------------
3 files changed, 78 insertions(+), 20 deletions(-)
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index f70f54d850..5e12cc1854 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -148,7 +148,21 @@ typedef struct CPUTLBEntryFull {
* + the offset within the target MemoryRegion (otherwise)
*/
hwaddr xlat_section;
+
+ /*
+ * @phys_addr contains the physical address in the address space
+ * given by cpu_asidx_from_attrs(cpu, @attrs).
+ */
+ hwaddr phys_addr;
+
+ /* @attrs contains the memory transaction attributes for the page. */
MemTxAttrs attrs;
+
+ /* @prot contains the complete protections for the page. */
+ uint8_t prot;
+
+ /* @lg_page_size contains the log2 of the page size. */
+ uint8_t lg_page_size;
} CPUTLBEntryFull;
/*
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 758cf6bcc7..1a30c857f4 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -257,6 +257,28 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
uint16_t idxmap,
unsigned bits);
+/**
+ * tlb_set_page_full:
+ * @cpu: CPU context
+ * @mmu_idx: mmu index of the tlb to modify
+ * @vaddr: virtual address of the entry to add
+ * @full: the details of the tlb entry
+ *
+ * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
+ * @full must be filled, except for xlat_section, and constitute
+ * the complete description of the translated page.
+ *
+ * This is generally called by the target tlb_fill function after
+ * having performed a successful page table walk to find the physical
+ * address and attributes for the translation.
+ *
+ * At most one entry for a given virtual address is permitted. Only a
+ * single TARGET_PAGE_SIZE region is mapped; @full->ld_page_size is only
+ * used by tlb_flush_page.
+ */
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
+ CPUTLBEntryFull *full);
+
/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 62159549f6..3a3549ad4a 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1095,16 +1095,16 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
}
-/* Add a new TLB entry. At most one entry for a given virtual address
+/*
+ * Add a new TLB entry. At most one entry for a given virtual address
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
* supplied size is only used by tlb_flush_page.
*
* Called from TCG-generated code, which is under an RCU read-side
* critical section.
*/
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
- hwaddr paddr, MemTxAttrs attrs, int prot,
- int mmu_idx, target_ulong size)
+void tlb_set_page_full(CPUState *cpu, int mmu_idx,
+ target_ulong vaddr, CPUTLBEntryFull *full)
{
CPUArchState *env = cpu->env_ptr;
CPUTLB *tlb = env_tlb(env);
@@ -1117,35 +1117,36 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
CPUTLBEntry *te, tn;
hwaddr iotlb, xlat, sz, paddr_page;
target_ulong vaddr_page;
- int asidx = cpu_asidx_from_attrs(cpu, attrs);
- int wp_flags;
+ int asidx, wp_flags, prot;
bool is_ram, is_romd;
assert_cpu_is_self(cpu);
- if (size <= TARGET_PAGE_SIZE) {
+ if (full->lg_page_size <= TARGET_PAGE_BITS) {
sz = TARGET_PAGE_SIZE;
} else {
- tlb_add_large_page(env, mmu_idx, vaddr, size);
- sz = size;
+ sz = (hwaddr)1 << full->lg_page_size;
+ tlb_add_large_page(env, mmu_idx, vaddr, sz);
}
vaddr_page = vaddr & TARGET_PAGE_MASK;
- paddr_page = paddr & TARGET_PAGE_MASK;
+ paddr_page = full->phys_addr & TARGET_PAGE_MASK;
+ prot = full->prot;
+ asidx = cpu_asidx_from_attrs(cpu, full->attrs);
section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
- &xlat, &sz, attrs, &prot);
+ &xlat, &sz, full->attrs, &prot);
assert(sz >= TARGET_PAGE_SIZE);
tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
" prot=%x idx=%d\n",
- vaddr, paddr, prot, mmu_idx);
+ vaddr, full->phys_addr, prot, mmu_idx);
address = vaddr_page;
- if (size < TARGET_PAGE_SIZE) {
+ if (full->lg_page_size < TARGET_PAGE_BITS) {
/* Repeat the MMU check and TLB fill on every access. */
address |= TLB_INVALID_MASK;
}
- if (attrs.byte_swap) {
+ if (full->attrs.byte_swap) {
address |= TLB_BSWAP;
}
@@ -1236,8 +1237,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
* subtract here is that of the page base, and not the same as the
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
*/
+ desc->fulltlb[index] = *full;
desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
- desc->fulltlb[index].attrs = attrs;
+ desc->fulltlb[index].phys_addr = paddr_page;
+ desc->fulltlb[index].prot = prot;
/* Now calculate the new entry */
tn.addend = addend - vaddr_page;
@@ -1272,15 +1275,34 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
qemu_spin_unlock(&tlb->c.lock);
}
-/* Add a new TLB entry, but without specifying the memory
- * transaction attributes to be used.
- */
+void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
+ hwaddr paddr, MemTxAttrs attrs, int prot,
+ int mmu_idx, target_ulong size)
+{
+ CPUTLBEntryFull full = {
+ .phys_addr = paddr,
+ .attrs = attrs,
+ .prot = prot,
+ .lg_page_size = ctz64(size)
+ };
+
+ assert(is_power_of_2(size));
+ tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
+}
+
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size)
{
- tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
- prot, mmu_idx, size);
+ CPUTLBEntryFull full = {
+ .phys_addr = paddr,
+ .attrs = MEMTXATTRS_UNSPECIFIED,
+ .prot = prot,
+ .lg_page_size = ctz64(size)
+ };
+
+ assert(is_power_of_2(size));
+ tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
}
/*
--
2.34.1
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH v3 6/6] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA
2022-09-05 20:22 [PATCH v3 0/6] tcg: Introduce CPUTLBEntryFull Richard Henderson
` (4 preceding siblings ...)
2022-09-05 20:22 ` [PATCH v3 5/6] accel/tcg: Introduce tlb_set_page_full Richard Henderson
@ 2022-09-05 20:22 ` Richard Henderson
2022-09-05 21:28 ` Philippe Mathieu-Daudé via
2022-09-20 10:59 ` Peter Maydell
5 siblings, 2 replies; 19+ messages in thread
From: Richard Henderson @ 2022-09-05 20:22 UTC (permalink / raw)
To: qemu-devel; +Cc: f4bug, qemu-arm, pbonzini
Allow the target to cache items from the guest page tables.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-defs.h | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 5e12cc1854..67239b4e5e 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -163,6 +163,15 @@ typedef struct CPUTLBEntryFull {
/* @lg_page_size contains the log2 of the page size. */
uint8_t lg_page_size;
+
+ /*
+ * Allow target-specific additions to this structure.
+ * This may be used to cache items from the guest cpu
+ * page tables for later use by the implementation.
+ */
+#ifdef TARGET_PAGE_ENTRY_EXTRA
+ TARGET_PAGE_ENTRY_EXTRA
+#endif
} CPUTLBEntryFull;
/*
--
2.34.1
^ permalink raw reply related [flat|nested] 19+ messages in thread
* Re: [PATCH v3 1/6] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull
2022-09-05 20:22 ` [PATCH v3 1/6] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull Richard Henderson
@ 2022-09-05 21:07 ` Philippe Mathieu-Daudé via
2022-09-20 10:42 ` Peter Maydell
1 sibling, 0 replies; 19+ messages in thread
From: Philippe Mathieu-Daudé via @ 2022-09-05 21:07 UTC (permalink / raw)
To: Richard Henderson, qemu-devel; +Cc: qemu-arm, pbonzini
On 5/9/22 22:22, Richard Henderson wrote:
> This structure will shortly contain more than just
> data for accessing MMIO. Rename the 'addr' member
> to 'xlat_section' to more clearly indicate its purpose.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> include/exec/cpu-defs.h | 22 ++++----
> accel/tcg/cputlb.c | 102 +++++++++++++++++++------------------
> target/arm/mte_helper.c | 14 ++---
> target/arm/sve_helper.c | 4 +-
> target/arm/translate-a64.c | 2 +-
> 5 files changed, 73 insertions(+), 71 deletions(-)
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 2/6] accel/tcg: Drop addr member from SavedIOTLB
2022-09-05 20:22 ` [PATCH v3 2/6] accel/tcg: Drop addr member from SavedIOTLB Richard Henderson
@ 2022-09-05 21:07 ` Philippe Mathieu-Daudé via
2022-09-20 10:43 ` Peter Maydell
1 sibling, 0 replies; 19+ messages in thread
From: Philippe Mathieu-Daudé via @ 2022-09-05 21:07 UTC (permalink / raw)
To: Richard Henderson, qemu-devel; +Cc: qemu-arm, pbonzini
On 5/9/22 22:22, Richard Henderson wrote:
> This field is only written, not read; remove it.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> include/hw/core/cpu.h | 1 -
> accel/tcg/cputlb.c | 7 +++----
> 2 files changed, 3 insertions(+), 5 deletions(-)
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 4/6] accel/tcg: Introduce probe_access_full
2022-09-05 20:22 ` [PATCH v3 4/6] accel/tcg: Introduce probe_access_full Richard Henderson
@ 2022-09-05 21:10 ` Philippe Mathieu-Daudé via
2022-09-20 10:46 ` Peter Maydell
1 sibling, 0 replies; 19+ messages in thread
From: Philippe Mathieu-Daudé via @ 2022-09-05 21:10 UTC (permalink / raw)
To: Richard Henderson, qemu-devel; +Cc: qemu-arm, pbonzini
On 5/9/22 22:22, Richard Henderson wrote:
> Add an interface to return the CPUTLBEntryFull struct
> that goes with the lookup. The result is not intended
> to be valid across multiple lookups, so the user must
> use the results immediately.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> include/exec/exec-all.h | 11 ++++++++++
> accel/tcg/cputlb.c | 47 +++++++++++++++++++++++++----------------
> 2 files changed, 40 insertions(+), 18 deletions(-)
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 5/6] accel/tcg: Introduce tlb_set_page_full
2022-09-05 20:22 ` [PATCH v3 5/6] accel/tcg: Introduce tlb_set_page_full Richard Henderson
@ 2022-09-05 21:20 ` Philippe Mathieu-Daudé via
2022-09-20 10:59 ` Peter Maydell
1 sibling, 0 replies; 19+ messages in thread
From: Philippe Mathieu-Daudé via @ 2022-09-05 21:20 UTC (permalink / raw)
To: Richard Henderson, qemu-devel; +Cc: qemu-arm, pbonzini
On 5/9/22 22:22, Richard Henderson wrote:
> Now that we have collected all of the page data into
> CPUTLBEntryFull, provide an interface to record that
> all in one go, instead of using 4 arguments. This interface
> allows CPUTLBEntryFull to be extended without having to
> change the number of arguments.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> include/exec/cpu-defs.h | 14 ++++++++++
> include/exec/exec-all.h | 22 +++++++++++++++
> accel/tcg/cputlb.c | 62 ++++++++++++++++++++++++++++-------------
> 3 files changed, 78 insertions(+), 20 deletions(-)
> @@ -1117,35 +1117,36 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
> CPUTLBEntry *te, tn;
> hwaddr iotlb, xlat, sz, paddr_page;
> target_ulong vaddr_page;
> - int asidx = cpu_asidx_from_attrs(cpu, attrs);
> - int wp_flags;
> + int asidx, wp_flags, prot;
> bool is_ram, is_romd;
>
> assert_cpu_is_self(cpu);
>
> - if (size <= TARGET_PAGE_SIZE) {
> + if (full->lg_page_size <= TARGET_PAGE_BITS) {
> sz = TARGET_PAGE_SIZE;
> } else {
> - tlb_add_large_page(env, mmu_idx, vaddr, size);
> - sz = size;
> + sz = (hwaddr)1 << full->lg_page_size;
Could use BIT_ULL() here.
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 6/6] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA
2022-09-05 20:22 ` [PATCH v3 6/6] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA Richard Henderson
@ 2022-09-05 21:28 ` Philippe Mathieu-Daudé via
2022-09-06 7:42 ` Richard Henderson
2022-09-20 10:59 ` Peter Maydell
1 sibling, 1 reply; 19+ messages in thread
From: Philippe Mathieu-Daudé via @ 2022-09-05 21:28 UTC (permalink / raw)
To: Richard Henderson, qemu-devel; +Cc: qemu-arm, pbonzini
On 5/9/22 22:22, Richard Henderson wrote:
> Allow the target to cache items from the guest page tables.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> include/exec/cpu-defs.h | 9 +++++++++
> 1 file changed, 9 insertions(+)
>
> diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
> index 5e12cc1854..67239b4e5e 100644
> --- a/include/exec/cpu-defs.h
> +++ b/include/exec/cpu-defs.h
> @@ -163,6 +163,15 @@ typedef struct CPUTLBEntryFull {
>
> /* @lg_page_size contains the log2 of the page size. */
> uint8_t lg_page_size;
> +
> + /*
> + * Allow target-specific additions to this structure.
> + * This may be used to cache items from the guest cpu
> + * page tables for later use by the implementation.
> + */
> +#ifdef TARGET_PAGE_ENTRY_EXTRA
> + TARGET_PAGE_ENTRY_EXTRA
> +#endif
> } CPUTLBEntryFull;
Alternatively declare a per-target structure in cpu-param.h
and here:
typedef struct CPUTLBEntryTarget CPUTLBEntryTarget;
#ifndef TARGET_HAS_PAGE_ENTRY_EXTRA_STRUCT
struct CPUTLBEntryTarget { }
#endif
typedef struct CPUTLBEntryFull {
...
CPUTLBEntryTarget target_extra;
} CPUTLBEntryFull;
Meanwhile:
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 6/6] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA
2022-09-05 21:28 ` Philippe Mathieu-Daudé via
@ 2022-09-06 7:42 ` Richard Henderson
0 siblings, 0 replies; 19+ messages in thread
From: Richard Henderson @ 2022-09-06 7:42 UTC (permalink / raw)
To: Philippe Mathieu-Daudé, qemu-devel; +Cc: qemu-arm, pbonzini
On 9/5/22 22:28, Philippe Mathieu-Daudé wrote:
>> + /*
>> + * Allow target-specific additions to this structure.
>> + * This may be used to cache items from the guest cpu
>> + * page tables for later use by the implementation.
>> + */
>> +#ifdef TARGET_PAGE_ENTRY_EXTRA
>> + TARGET_PAGE_ENTRY_EXTRA
>> +#endif
>> } CPUTLBEntryFull;
>
> Alternatively declare a per-target structure in cpu-param.h
> and here:
>
> typedef struct CPUTLBEntryTarget CPUTLBEntryTarget;
>
> #ifndef TARGET_HAS_PAGE_ENTRY_EXTRA_STRUCT
> struct CPUTLBEntryTarget { }
> #endif
This was v1, more or less,
https://lore.kernel.org/qemu-devel/20220703082419.770989-2-richard.henderson@linaro.org/
In the end I thought the "extra" part confusing.
I was also concerned that I might be introducing
extra structure padding unnecessarily.
r~
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 1/6] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull
2022-09-05 20:22 ` [PATCH v3 1/6] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull Richard Henderson
2022-09-05 21:07 ` Philippe Mathieu-Daudé via
@ 2022-09-20 10:42 ` Peter Maydell
1 sibling, 0 replies; 19+ messages in thread
From: Peter Maydell @ 2022-09-20 10:42 UTC (permalink / raw)
To: Richard Henderson; +Cc: qemu-devel, f4bug, qemu-arm, pbonzini
On Mon, 5 Sept 2022 at 21:23, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> This structure will shortly contain more than just
> data for accessing MMIO. Rename the 'addr' member
> to 'xlat_section' to more clearly indicate its purpose.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
thanks
-- PMM
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 2/6] accel/tcg: Drop addr member from SavedIOTLB
2022-09-05 20:22 ` [PATCH v3 2/6] accel/tcg: Drop addr member from SavedIOTLB Richard Henderson
2022-09-05 21:07 ` Philippe Mathieu-Daudé via
@ 2022-09-20 10:43 ` Peter Maydell
1 sibling, 0 replies; 19+ messages in thread
From: Peter Maydell @ 2022-09-20 10:43 UTC (permalink / raw)
To: Richard Henderson; +Cc: qemu-devel, f4bug, qemu-arm, pbonzini
On Mon, 5 Sept 2022 at 21:27, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> This field is only written, not read; remove it.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
thanks
-- PMM
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 4/6] accel/tcg: Introduce probe_access_full
2022-09-05 20:22 ` [PATCH v3 4/6] accel/tcg: Introduce probe_access_full Richard Henderson
2022-09-05 21:10 ` Philippe Mathieu-Daudé via
@ 2022-09-20 10:46 ` Peter Maydell
1 sibling, 0 replies; 19+ messages in thread
From: Peter Maydell @ 2022-09-20 10:46 UTC (permalink / raw)
To: Richard Henderson; +Cc: qemu-devel, f4bug, qemu-arm, pbonzini
On Mon, 5 Sept 2022 at 21:26, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Add an interface to return the CPUTLBEntryFull struct
> that goes with the lookup. The result is not intended
> to be valid across multiple lookups, so the user must
> use the results immediately.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
thanks
-- PMM
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 5/6] accel/tcg: Introduce tlb_set_page_full
2022-09-05 20:22 ` [PATCH v3 5/6] accel/tcg: Introduce tlb_set_page_full Richard Henderson
2022-09-05 21:20 ` Philippe Mathieu-Daudé via
@ 2022-09-20 10:59 ` Peter Maydell
1 sibling, 0 replies; 19+ messages in thread
From: Peter Maydell @ 2022-09-20 10:59 UTC (permalink / raw)
To: Richard Henderson; +Cc: qemu-devel, f4bug, qemu-arm, pbonzini
On Mon, 5 Sept 2022 at 21:29, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Now that we have collected all of the page data into
> CPUTLBEntryFull, provide an interface to record that
> all in one go, instead of using 4 arguments. This interface
> allows CPUTLBEntryFull to be extended without having to
> change the number of arguments.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> include/exec/cpu-defs.h | 14 ++++++++++
> include/exec/exec-all.h | 22 +++++++++++++++
> accel/tcg/cputlb.c | 62 ++++++++++++++++++++++++++++-------------
> 3 files changed, 78 insertions(+), 20 deletions(-)
>
> diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
> index f70f54d850..5e12cc1854 100644
> --- a/include/exec/cpu-defs.h
> +++ b/include/exec/cpu-defs.h
> @@ -148,7 +148,21 @@ typedef struct CPUTLBEntryFull {
> * + the offset within the target MemoryRegion (otherwise)
> */
> hwaddr xlat_section;
> +
> + /*
> + * @phys_addr contains the physical address in the address space
> + * given by cpu_asidx_from_attrs(cpu, @attrs).
> + */
> + hwaddr phys_addr;
> +
> + /* @attrs contains the memory transaction attributes for the page. */
> MemTxAttrs attrs;
> +
> + /* @prot contains the complete protections for the page. */
> + uint8_t prot;
> +
> + /* @lg_page_size contains the log2 of the page size. */
> + uint8_t lg_page_size;
> } CPUTLBEntryFull;
>
> /*
> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
> index 758cf6bcc7..1a30c857f4 100644
> --- a/include/exec/exec-all.h
> +++ b/include/exec/exec-all.h
> @@ -257,6 +257,28 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
> uint16_t idxmap,
> unsigned bits);
>
> +/**
> + * tlb_set_page_full:
> + * @cpu: CPU context
> + * @mmu_idx: mmu index of the tlb to modify
> + * @vaddr: virtual address of the entry to add
> + * @full: the details of the tlb entry
> + *
> + * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
> + * @full must be filled, except for xlat_section, and constitute
> + * the complete description of the translated page.
> + *
> + * This is generally called by the target tlb_fill function after
> + * having performed a successful page table walk to find the physical
> + * address and attributes for the translation.
> + *
> + * At most one entry for a given virtual address is permitted. Only a
> + * single TARGET_PAGE_SIZE region is mapped; @full->ld_page_size is only
typo: lg_page_size
> + * used by tlb_flush_page.
> + */
> +void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
> + CPUTLBEntryFull *full);
> +
> /**
> * tlb_set_page_with_attrs:
> * @cpu: CPU to add this TLB entry for
> @@ -1272,15 +1275,34 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
> qemu_spin_unlock(&tlb->c.lock);
> }
>
> -/* Add a new TLB entry, but without specifying the memory
> - * transaction attributes to be used.
> - */
> +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
> + hwaddr paddr, MemTxAttrs attrs, int prot,
> + int mmu_idx, target_ulong size)
> +{
> + CPUTLBEntryFull full = {
> + .phys_addr = paddr,
> + .attrs = attrs,
> + .prot = prot,
> + .lg_page_size = ctz64(size)
> + };
> +
> + assert(is_power_of_2(size));
> + tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
> +}
> +
> void tlb_set_page(CPUState *cpu, target_ulong vaddr,
> hwaddr paddr, int prot,
> int mmu_idx, target_ulong size)
> {
> - tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
> - prot, mmu_idx, size);
> + CPUTLBEntryFull full = {
> + .phys_addr = paddr,
> + .attrs = MEMTXATTRS_UNSPECIFIED,
> + .prot = prot,
> + .lg_page_size = ctz64(size)
> + };
> +
> + assert(is_power_of_2(size));
> + tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
> }
Why not just leave tlb_set_page() the way it was? Writing
it out results in this code duplication...
Otherwise
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
thanks
-- PMM
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 6/6] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA
2022-09-05 20:22 ` [PATCH v3 6/6] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA Richard Henderson
2022-09-05 21:28 ` Philippe Mathieu-Daudé via
@ 2022-09-20 10:59 ` Peter Maydell
1 sibling, 0 replies; 19+ messages in thread
From: Peter Maydell @ 2022-09-20 10:59 UTC (permalink / raw)
To: Richard Henderson; +Cc: qemu-devel, f4bug, qemu-arm, pbonzini
On Mon, 5 Sept 2022 at 21:26, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Allow the target to cache items from the guest page tables.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> include/exec/cpu-defs.h | 9 +++++++++
> 1 file changed, 9 insertions(+)
>
> diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
> index 5e12cc1854..67239b4e5e 100644
> --- a/include/exec/cpu-defs.h
> +++ b/include/exec/cpu-defs.h
> @@ -163,6 +163,15 @@ typedef struct CPUTLBEntryFull {
>
> /* @lg_page_size contains the log2 of the page size. */
> uint8_t lg_page_size;
> +
> + /*
> + * Allow target-specific additions to this structure.
> + * This may be used to cache items from the guest cpu
> + * page tables for later use by the implementation.
> + */
> +#ifdef TARGET_PAGE_ENTRY_EXTRA
> + TARGET_PAGE_ENTRY_EXTRA
> +#endif
> } CPUTLBEntryFull;
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Shouldn't be too painful to adjust in future if we decide we don't
like doing it by macro magic, I guess.
thanks
-- PMM
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3 3/6] accel/tcg: Suppress auto-invalidate in probe_access_internal
2022-09-05 20:22 ` [PATCH v3 3/6] accel/tcg: Suppress auto-invalidate in probe_access_internal Richard Henderson
@ 2022-09-20 11:21 ` Peter Maydell
0 siblings, 0 replies; 19+ messages in thread
From: Peter Maydell @ 2022-09-20 11:21 UTC (permalink / raw)
To: Richard Henderson
Cc: qemu-devel, f4bug, qemu-arm, pbonzini, David Hildenbrand
On Mon, 5 Sept 2022 at 21:26, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> When PAGE_WRITE_INV is set when calling tlb_set_page,
> we immediately set TLB_INVALID_MASK in order to force
> tlb_fill to be called on the next lookup. Here in
> probe_access_internal, we have just called tlb_fill
> and eliminated true misses, thus the lookup must be valid.
>
> This allows us to remove a warning comment from s390x.
> There doesn't seem to be a reason to change the code though.
I looked at the s390 code and although it seems a bit awkward
to still have the action-at-a-distance env->tlb_fill_exc, I
couldn't think of an obviously neater way to do it. So
assuming the s390 folks are happy with this,
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
thanks
-- PMM
^ permalink raw reply [flat|nested] 19+ messages in thread
end of thread, other threads:[~2022-09-20 14:02 UTC | newest]
Thread overview: 19+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-09-05 20:22 [PATCH v3 0/6] tcg: Introduce CPUTLBEntryFull Richard Henderson
2022-09-05 20:22 ` [PATCH v3 1/6] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull Richard Henderson
2022-09-05 21:07 ` Philippe Mathieu-Daudé via
2022-09-20 10:42 ` Peter Maydell
2022-09-05 20:22 ` [PATCH v3 2/6] accel/tcg: Drop addr member from SavedIOTLB Richard Henderson
2022-09-05 21:07 ` Philippe Mathieu-Daudé via
2022-09-20 10:43 ` Peter Maydell
2022-09-05 20:22 ` [PATCH v3 3/6] accel/tcg: Suppress auto-invalidate in probe_access_internal Richard Henderson
2022-09-20 11:21 ` Peter Maydell
2022-09-05 20:22 ` [PATCH v3 4/6] accel/tcg: Introduce probe_access_full Richard Henderson
2022-09-05 21:10 ` Philippe Mathieu-Daudé via
2022-09-20 10:46 ` Peter Maydell
2022-09-05 20:22 ` [PATCH v3 5/6] accel/tcg: Introduce tlb_set_page_full Richard Henderson
2022-09-05 21:20 ` Philippe Mathieu-Daudé via
2022-09-20 10:59 ` Peter Maydell
2022-09-05 20:22 ` [PATCH v3 6/6] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA Richard Henderson
2022-09-05 21:28 ` Philippe Mathieu-Daudé via
2022-09-06 7:42 ` Richard Henderson
2022-09-20 10:59 ` Peter Maydell
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).