From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: deller@kernel.org, peter.maydell@linaro.org,
alex.bennee@linaro.org, linux-parisc@vger.kernel.org,
qemu-arm@nongnu.org
Subject: [PATCH v2 07/21] accel/tcg: Use the tlb_fill_align hook
Date: Sat, 5 Oct 2024 13:05:46 -0700 [thread overview]
Message-ID: <20241005200600.493604-8-richard.henderson@linaro.org> (raw)
In-Reply-To: <20241005200600.493604-1-richard.henderson@linaro.org>
When we have a tlb miss, defer the alignment check to
the new tlb_fill_align hook. Move the existing alignment
check so that we only perform it with a tlb hit.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/cputlb.c | 89 +++++++++++++++++++++++++---------------------
1 file changed, 49 insertions(+), 40 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 4bc34c8a37..0e6ae65a39 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1616,14 +1616,14 @@ typedef struct MMULookupLocals {
* tlb_fill will longjmp out. Return true if the softmmu tlb for
* @mmu_idx may have resized.
*/
-static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
+static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{
vaddr addr = data->addr;
uintptr_t index = tlb_index(cpu, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
- bool maybe_resized = false;
+ bool did_tlb_fill = false;
CPUTLBEntryFull *full;
int flags;
@@ -1631,17 +1631,26 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) {
- bool ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, data->size,
- access_type, mmu_idx,
- false, ra);
+ bool ok = cpu->cc->tcg_ops->tlb_fill_align(cpu, addr, memop,
+ data->size, access_type,
+ mmu_idx, false, ra);
assert(ok);
- maybe_resized = true;
+ did_tlb_fill = true;
index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr);
}
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
}
+ if (!did_tlb_fill) {
+ /* We didn't use tlb_fill_align, so alignment not yet checked. */
+ unsigned a_bits = memop_alignment_bits(memop);
+
+ if (unlikely(addr & ((1 << a_bits) - 1))) {
+ cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra);
+ }
+ }
+
full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];
@@ -1651,7 +1660,7 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
/* Compute haddr speculatively; depending on flags it might be invalid. */
data->haddr = (void *)((uintptr_t)addr + entry->addend);
- return maybe_resized;
+ return did_tlb_fill;
}
/**
@@ -1702,7 +1711,6 @@ static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{
- unsigned a_bits;
bool crosspage;
int flags;
@@ -1711,12 +1719,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
- /* Handle CPU specific unaligned behaviour */
- a_bits = memop_alignment_bits(l->memop);
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
- }
-
l->page[0].addr = addr;
l->page[0].size = memop_size(l->memop);
l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
@@ -1724,7 +1726,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
if (likely(!crosspage)) {
- mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
flags = l->page[0].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
@@ -1743,8 +1745,8 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* Lookup both pages, recognizing exceptions from either. If the
* second lookup potentially resized, refresh first CPUTLBEntryFull.
*/
- mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
- if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
+ if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) {
uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
}
@@ -1770,7 +1772,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* Device memory type require alignment.
*/
if (unlikely(flags & TLB_CHECK_ALIGNED)) {
- a_bits = memop_atomicity_bits(l->memop);
+ unsigned a_bits = memop_atomicity_bits(l->memop);
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
}
@@ -1788,34 +1790,18 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
{
uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi);
- int a_bits = memop_alignment_bits(mop);
uintptr_t index;
CPUTLBEntry *tlbe;
vaddr tlb_addr;
void *hostaddr;
CPUTLBEntryFull *full;
+ bool did_tlb_fill = false;
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
- /* Enforce guest required alignment. */
- if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
- /* ??? Maybe indicate atomic op to cpu_unaligned_access */
- cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- /* Enforce qemu required alignment. */
- if (unlikely(addr & (size - 1))) {
- /* We get here if guest alignment was not requested,
- or was not enforced by cpu_unaligned_access above.
- We might widen the access and emulate, but for now
- mark an exception and exit the cpu loop. */
- goto stop_the_world;
- }
-
index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr);
@@ -1824,10 +1810,11 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) {
- bool ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
- MMU_DATA_STORE, mmu_idx,
- false, retaddr);
+ bool ok = cpu->cc->tcg_ops->tlb_fill_align(cpu, addr, mop, size,
+ MMU_DATA_STORE, mmu_idx,
+ false, retaddr);
assert(ok);
+ did_tlb_fill = true;
index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr);
}
@@ -1841,8 +1828,8 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* but addr_read will only be -1 if PAGE_READ was unset.
*/
if (unlikely(tlbe->addr_read == -1)) {
- cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, MMU_DATA_LOAD,
- mmu_idx, false, retaddr);
+ cpu->cc->tcg_ops->tlb_fill_align(cpu, addr, mop, size, MMU_DATA_LOAD,
+ mmu_idx, false, retaddr);
/*
* Since we don't support reads and writes to different
* addresses, and we do have the proper page loaded for
@@ -1850,6 +1837,28 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
*/
g_assert_not_reached();
}
+
+ /* Enforce guest required alignment, if not handled by tlb_fill_align. */
+ if (!did_tlb_fill) {
+ int a_bits = memop_alignment_bits(mop);
+ if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
+ /* ??? Maybe indicate atomic op to cpu_unaligned_access */
+ cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+ }
+
+ /* Enforce qemu required alignment. */
+ if (unlikely(addr & (size - 1))) {
+ /*
+ * We get here if guest alignment was not requested,
+ * or was not enforced by cpu_unaligned_access above.
+ * We might widen the access and emulate, but for now
+ * mark an exception and exit the cpu loop.
+ */
+ goto stop_the_world;
+ }
+
/* Collect tlb flags for read. */
tlb_addr |= tlbe->addr_read;
--
2.43.0
next prev parent reply other threads:[~2024-10-05 20:10 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-05 20:05 [PATCH v2 00/21] accel/tcg: Introduce tlb_fill_align hook Richard Henderson
2024-10-05 20:05 ` [PATCH v2 01/21] accel/tcg: Assert noreturn from write-only page for atomics Richard Henderson
2024-10-07 20:58 ` Helge Deller
2024-10-08 14:04 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 02/21] accel/tcg: Expand tlb_fill for 3 callers Richard Henderson
2024-10-07 21:01 ` Helge Deller
2024-10-08 14:04 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 03/21] include/exec/memop: Move get_alignment_bits from tcg.h Richard Henderson
2024-10-07 21:02 ` Helge Deller
2024-10-08 14:04 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 04/21] include/exec/memop: Rename get_alignment_bits Richard Henderson
2024-10-07 21:03 ` Helge Deller
2024-10-08 14:05 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 05/21] include/exec/memop: Introduce memop_atomicity_bits Richard Henderson
2024-10-07 21:04 ` Helge Deller
2024-10-08 14:05 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 06/21] hw/core/tcg-cpu-ops: Introduce tlb_fill_align hook Richard Henderson
2024-10-07 21:09 ` Helge Deller
2024-10-08 14:12 ` Peter Maydell
2024-10-05 20:05 ` Richard Henderson [this message]
2024-10-07 21:13 ` [PATCH v2 07/21] accel/tcg: Use the " Helge Deller
2024-10-08 14:12 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 08/21] target/hppa: Add MemOp argument to hppa_get_physical_address Richard Henderson
2024-10-07 21:14 ` Helge Deller
2024-10-05 20:05 ` [PATCH v2 09/21] target/hppa: Perform access rights before protection id check Richard Henderson
2024-10-07 21:15 ` Helge Deller
2024-10-05 20:05 ` [PATCH v2 10/21] target/hppa: Fix priority of T, D, and B page faults Richard Henderson
2024-10-07 21:16 ` Helge Deller
2024-10-05 20:05 ` [PATCH v2 11/21] target/hppa: Handle alignment faults in hppa_get_physical_address Richard Henderson
2024-10-07 21:18 ` Helge Deller
2024-10-05 20:05 ` [PATCH v2 12/21] target/hppa: Add hppa_cpu_tlb_fill_align Richard Henderson
2024-10-07 21:19 ` Helge Deller
2024-10-05 20:05 ` [PATCH v2 13/21] target/arm: Pass MemOp to get_phys_addr Richard Henderson
2024-10-07 21:20 ` Helge Deller
2024-10-08 14:45 ` Peter Maydell
2024-10-08 17:32 ` Richard Henderson
2024-10-09 13:59 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 14/21] target/arm: Pass MemOp to get_phys_addr_with_space_nogpc Richard Henderson
2024-10-07 21:21 ` Helge Deller
2024-10-08 14:35 ` Peter Maydell
2024-10-08 17:50 ` Richard Henderson
2024-10-05 20:05 ` [PATCH v2 15/21] target/arm: Pass MemOp to get_phys_addr_gpc Richard Henderson
2024-10-07 21:21 ` Helge Deller
2024-10-08 14:26 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 16/21] target/arm: Pass MemOp to get_phys_addr_nogpc Richard Henderson
2024-10-07 21:22 ` Helge Deller
2024-10-08 14:25 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 17/21] target/arm: Pass MemOp through get_phys_addr_twostage Richard Henderson
2024-10-07 21:22 ` Helge Deller
2024-10-08 14:24 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 18/21] target/arm: Pass MemOp to get_phys_addr_lpae Richard Henderson
2024-10-07 21:23 ` Helge Deller
2024-10-08 14:24 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 19/21] target/arm: Move device detection earlier in get_phys_addr_lpae Richard Henderson
2024-10-07 21:25 ` Helge Deller
2024-10-08 14:22 ` Peter Maydell
2024-10-05 20:05 ` [PATCH v2 20/21] target/arm: Add arm_cpu_tlb_fill_align Richard Henderson
2024-10-07 21:26 ` Helge Deller
2024-10-08 14:22 ` Peter Maydell
2024-10-05 20:06 ` [PATCH v2 21/21] target/arm: Fix alignment fault priority in get_phys_addr_lpae Richard Henderson
2024-10-08 14:23 ` Peter Maydell
2024-10-07 20:55 ` [PATCH v2 00/21] accel/tcg: Introduce tlb_fill_align hook Helge Deller
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241005200600.493604-8-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=alex.bennee@linaro.org \
--cc=deller@kernel.org \
--cc=linux-parisc@vger.kernel.org \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).