qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Subject: [PULL 31/39] accel/tcg: Merge io_writex into do_st_mmio_leN
Date: Fri, 15 Sep 2023 20:30:03 -0700	[thread overview]
Message-ID: <20230916033011.479144-32-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230916033011.479144-1-richard.henderson@linaro.org>

Avoid multiple calls to io_prepare for unaligned acceses.
One call to do_st_mmio_leN will never cross pages.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/cputlb.c | 82 +++++++++++++++++-----------------------------
 1 file changed, 30 insertions(+), 52 deletions(-)

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 6cf69bd79d..1d56e3ec0c 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1388,30 +1388,6 @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
     }
 }
 
-static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
-                      int mmu_idx, uint64_t val, vaddr addr,
-                      uintptr_t retaddr, MemOp op)
-{
-    MemoryRegionSection *section;
-    hwaddr mr_offset;
-    MemoryRegion *mr;
-    MemTxResult r;
-
-    section = io_prepare(&mr_offset, env, full->xlat_section,
-                         full->attrs, addr, retaddr);
-    mr = section->mr;
-
-    {
-        QEMU_IOTHREAD_LOCK_GUARD();
-        r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
-    }
-
-    if (r != MEMTX_OK) {
-        io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
-                  r, retaddr);
-    }
-}
-
 /* Return true if ADDR is present in the victim tlb, and has been copied
    back to the main tlb.  */
 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
@@ -2682,39 +2658,41 @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
                                uint64_t val_le, vaddr addr, int size,
                                int mmu_idx, uintptr_t ra)
 {
+    MemoryRegionSection *section;
+    hwaddr mr_offset;
+    MemoryRegion *mr;
+    MemTxAttrs attrs;
+
     tcg_debug_assert(size > 0 && size <= 8);
 
+    attrs = full->attrs;
+    section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+    mr = section->mr;
+
     do {
+        MemOp this_mop;
+        unsigned this_size;
+        MemTxResult r;
+
         /* Store aligned pieces up to 8 bytes. */
-        switch ((size | (int)addr) & 7) {
-        case 1:
-        case 3:
-        case 5:
-        case 7:
-            io_writex(env, full, mmu_idx, val_le, addr, ra, MO_UB);
-            val_le >>= 8;
-            size -= 1;
-            addr += 1;
-            break;
-        case 2:
-        case 6:
-            io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUW);
-            val_le >>= 16;
-            size -= 2;
-            addr += 2;
-            break;
-        case 4:
-            io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUL);
-            val_le >>= 32;
-            size -= 4;
-            addr += 4;
-            break;
-        case 0:
-            io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUQ);
-            return 0;
-        default:
-            qemu_build_not_reached();
+        this_mop = ctz32(size | (int)addr | 8);
+        this_size = 1 << this_mop;
+        this_mop |= MO_LE;
+
+        r = memory_region_dispatch_write(mr, mr_offset, val_le,
+                                         this_mop, attrs);
+        if (unlikely(r != MEMTX_OK)) {
+            io_failed(env, full, addr, this_size, MMU_DATA_STORE,
+                      mmu_idx, r, ra);
         }
+        if (this_size == 8) {
+            return 0;
+        }
+
+        val_le >>= this_size * 8;
+        addr += this_size;
+        mr_offset += this_size;
+        size -= this_size;
     } while (size);
 
     return val_le;
-- 
2.34.1



  parent reply	other threads:[~2023-09-16  3:38 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-16  3:29 [PULL 00/39] tcg patch queue Richard Henderson
2023-09-16  3:29 ` [PULL 01/39] accel/tcg: mttcg remove false-negative halted assertion Richard Henderson
2023-09-18  6:44   ` Nicholas Piggin
2023-09-18  7:59     ` Alex Bennée
2023-09-18 10:53       ` Nicholas Piggin
2023-09-18 12:19         ` Alex Bennée
2023-09-16  3:29 ` [PULL 02/39] accel/tcg: Fix the comment for CPUTLBEntryFull Richard Henderson
2023-09-16  3:29 ` [PULL 03/39] util: Delete checks for old host definitions Richard Henderson
2023-09-16  3:29 ` [PULL 04/39] softmmu: " Richard Henderson
2023-09-16  3:29 ` [PULL 05/39] thunk: " Richard Henderson
2023-09-16  3:29 ` [PULL 06/39] tcg/loongarch64: Import LSX instructions Richard Henderson
2023-09-16  3:29 ` [PULL 07/39] tcg/loongarch64: Lower basic tcg vec ops to LSX Richard Henderson
2023-09-16  3:29 ` [PULL 08/39] tcg: pass vece to tcg_target_const_match() Richard Henderson
2023-09-16  3:29 ` [PULL 09/39] tcg/loongarch64: Lower cmp_vec to vseq/vsle/vslt Richard Henderson
2023-09-16  3:29 ` [PULL 10/39] tcg/loongarch64: Lower add/sub_vec to vadd/vsub Richard Henderson
2023-09-16  3:29 ` [PULL 11/39] tcg/loongarch64: Lower vector bitwise operations Richard Henderson
2023-09-16  3:29 ` [PULL 12/39] tcg/loongarch64: Lower neg_vec to vneg Richard Henderson
2023-09-16  3:29 ` [PULL 13/39] tcg/loongarch64: Lower mul_vec to vmul Richard Henderson
2023-09-16  3:29 ` [PULL 14/39] tcg/loongarch64: Lower vector min max ops Richard Henderson
2023-09-16  3:29 ` [PULL 15/39] tcg/loongarch64: Lower vector saturated ops Richard Henderson
2023-09-16  3:29 ` [PULL 16/39] tcg/loongarch64: Lower vector shift vector ops Richard Henderson
2023-09-16  3:29 ` [PULL 17/39] tcg/loongarch64: Lower bitsel_vec to vbitsel Richard Henderson
2023-09-16  3:29 ` [PULL 18/39] tcg/loongarch64: Lower vector shift integer ops Richard Henderson
2023-09-16  3:29 ` [PULL 19/39] tcg/loongarch64: Lower rotv_vec ops to LSX Richard Henderson
2023-09-16  3:29 ` [PULL 20/39] tcg/loongarch64: Lower rotli_vec to vrotri Richard Henderson
2023-09-16  3:29 ` [PULL 21/39] tcg/loongarch64: Implement 128-bit load & store Richard Henderson
2023-09-16  3:29 ` [PULL 22/39] tcg: Add gvec compare with immediate and scalar operand Richard Henderson
2023-09-16  3:29 ` [PULL 23/39] target/arm: Use tcg_gen_gvec_cmpi for compare vs 0 Richard Henderson
2023-09-16  3:29 ` [PULL 24/39] accel/tcg: Simplify tlb_plugin_lookup Richard Henderson
2023-09-16  3:29 ` [PULL 25/39] accel/tcg: Split out io_prepare and io_failed Richard Henderson
2023-09-16  3:29 ` [PULL 26/39] accel/tcg: Use CPUTLBEntryFull.phys_addr in io_failed Richard Henderson
2023-09-16  3:29 ` [PULL 27/39] plugin: Simplify struct qemu_plugin_hwaddr Richard Henderson
2023-09-16  3:30 ` [PULL 28/39] accel/tcg: Merge cpu_transaction_failed into io_failed Richard Henderson
2023-09-16  3:30 ` [PULL 29/39] accel/tcg: Replace direct use of io_readx/io_writex in do_{ld, st}_1 Richard Henderson
2023-09-16  3:30 ` [PULL 30/39] accel/tcg: Merge io_readx into do_ld_mmio_beN Richard Henderson
2023-09-16  3:30 ` Richard Henderson [this message]
2023-09-16  3:30 ` [PULL 32/39] accel/tcg: Introduce do_ld16_mmio_beN Richard Henderson
2023-09-16  3:30 ` [PULL 33/39] accel/tcg: Introduce do_st16_mmio_leN Richard Henderson
2023-09-16  3:30 ` [PULL 34/39] fpu: Add conversions between bfloat16 and [u]int8 Richard Henderson
2023-09-16  3:30 ` [PULL 35/39] fpu: Handle m68k extended precision denormals properly Richard Henderson
2023-09-16  3:30 ` [PULL 36/39] tcg: Add tcg_out_tb_start backend hook Richard Henderson
2023-09-16  3:30 ` [PULL 37/39] util/cpuinfo-aarch64: Add CPUINFO_BTI Richard Henderson
2023-09-16  3:30 ` [PULL 38/39] tcg/aarch64: Emit BTI insns at jump landing pads Richard Henderson
2023-09-16  3:30 ` [PULL 39/39] tcg: Map code_gen_buffer with PROT_BTI Richard Henderson
2023-09-16  4:07 ` [PULL 00/39] tcg patch queue Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230916033011.479144-32-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).