qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Alvise Rigo <a.rigo@virtualopensystems.com>
To: mttcg@listserver.greensocs.com, alex.bennee@linaro.org
Cc: qemu-devel@nongnu.org, jani.kokkonen@huawei.com,
	claudio.fontana@huawei.com, tech@virtualopensystems.com,
	fred.konrad@greensocs.com, pbonzini@redhat.com, rth@twiddle.net,
	serge.fdrv@gmail.com, cota@braap.org, peter.maydell@linaro.org,
	Alvise Rigo <a.rigo@virtualopensystems.com>
Subject: [Qemu-devel]  [RFC 07/10] cputlb: Query tlb_flush_by_mmuidx
Date: Thu, 26 May 2016 18:35:46 +0200	[thread overview]
Message-ID: <20160526163549.3276-8-a.rigo@virtualopensystems.com> (raw)
In-Reply-To: <20160526163549.3276-1-a.rigo@virtualopensystems.com>

Some architectures need to flush the TLB by MMU index. As per
tlb_flush(), also these flushes have to be properly queried to the
target VCPU. For the time being, this type of flush is used only in the
ARM/aarch64 target architecture and is the result of guest instructions
emulation. As a result, we can always get safely the CPUState of the
current VCPU without relying on current_cpu. This however complicates a
bit the function prototype by adding an argument pointing to the current
VCPU's CPUState.

Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
---
 cputlb.c                | 49 +++++++++++++++++++++++++++++++++++++++----------
 include/exec/exec-all.h |  4 ++--
 target-arm/helper.c     | 40 +++++++++++++++++++++-------------------
 3 files changed, 62 insertions(+), 31 deletions(-)

diff --git a/cputlb.c b/cputlb.c
index 5bbbf1b..73624d6 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -59,6 +59,8 @@
 /* We need a solution for stuffing 64 bit pointers in 32 bit ones if
  * we care about this combination */
 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(void *));
+/* Size, in bytes, of the bitmap used by tlb_flush_by_mmuidx functions */
+#define MMUIDX_BITMAP_SIZE sizeof(unsigned long) * BITS_TO_LONGS(NB_MMU_MODES)
 
 /* statistics */
 int tlb_flush_count;
@@ -153,10 +155,41 @@ static inline void tlb_tables_flush_bitmap(CPUState *cpu, unsigned long *bitmap)
     memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
 }
 
-static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
+struct TLBFlushByMMUIdxParams {
+    DECLARE_BITMAP(idx_to_flush, NB_MMU_MODES);
+};
+
+static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, void *opaque)
+{
+    struct TLBFlushByMMUIdxParams *params = opaque;
+
+    tlb_tables_flush_bitmap(cpu, params->idx_to_flush);
+
+    g_free(params);
+}
+
+static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, CPUState *target,
+                                         unsigned long *idxmap)
 {
+    if (!qemu_cpu_is_self(target)) {
+        struct TLBFlushByMMUIdxParams *params;
+
+        params = g_malloc(sizeof(struct TLBFlushByMMUIdxParams));
+        memcpy(params->idx_to_flush, idxmap, MMUIDX_BITMAP_SIZE);
+        async_wait_run_on_cpu(target, cpu, tlb_flush_by_mmuidx_async_work,
+                              params);
+    } else {
+        tlb_tables_flush_bitmap(cpu, idxmap);
+    }
+}
+
+void tlb_flush_by_mmuidx(CPUState *cpu, CPUState *target_cpu, ...)
+{
+    va_list argp;
     DECLARE_BITMAP(idxmap, NB_MMU_MODES) = { 0 };
 
+    va_start(argp, target_cpu);
+
     for (;;) {
         int mmu_idx = va_arg(argp, int);
 
@@ -167,15 +200,9 @@ static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
         set_bit(mmu_idx, idxmap);
     }
 
-    tlb_tables_flush_bitmap(cpu, idxmap);
-}
-
-void tlb_flush_by_mmuidx(CPUState *cpu, ...)
-{
-    va_list argp;
-    va_start(argp, cpu);
-    v_tlb_flush_by_mmuidx(cpu, argp);
     va_end(argp);
+
+    v_tlb_flush_by_mmuidx(cpu, target_cpu, idxmap);
 }
 
 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
@@ -244,7 +271,9 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
                   env->tlb_flush_addr, env->tlb_flush_mask);
 
-        v_tlb_flush_by_mmuidx(cpu, argp);
+        /* Temporarily use current_cpu until tlb_flush_page_by_mmuidx
+         * is reworked */
+        tlb_flush_by_mmuidx(current_cpu, cpu, argp);
         va_end(argp);
         return;
     }
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index bc97683..066870b 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -152,7 +152,7 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
  * Flush all entries from the TLB of the specified CPU, for the specified
  * MMU indexes.
  */
-void tlb_flush_by_mmuidx(CPUState *cpu, ...);
+void tlb_flush_by_mmuidx(CPUState *cpu, CPUState *target, ...);
 /**
  * tlb_set_page_with_attrs:
  * @cpu: CPU to add this TLB entry for
@@ -205,7 +205,7 @@ static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
 {
 }
 
-static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, CPUState *target ...)
 {
 }
 static inline void tlb_flush_page_all(target_ulong addr)
diff --git a/target-arm/helper.c b/target-arm/helper.c
index bc9fbda..3dcd910 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -2388,7 +2388,7 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 
     /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
     if (raw_read(env, ri) != value) {
-        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
+        tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
                             ARMMMUIdx_S2NS, -1);
         raw_write(env, ri, value);
     }
@@ -2748,9 +2748,9 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
     CPUState *cs = CPU(cpu);
 
     if (arm_is_secure_below_el3(env)) {
-        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+        tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
     } else {
-        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
+        tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
     }
 }
 
@@ -2758,13 +2758,14 @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                       uint64_t value)
 {
     bool sec = arm_is_secure_below_el3(env);
-    CPUState *other_cs;
+    CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
 
     CPU_FOREACH(other_cs) {
         if (sec) {
-            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+            tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S1SE1,
+                                ARMMMUIdx_S1SE0, -1);
         } else {
-            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+            tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S12NSE1,
                                 ARMMMUIdx_S12NSE0, -1);
         }
     }
@@ -2781,13 +2782,13 @@ static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
     CPUState *cs = CPU(cpu);
 
     if (arm_is_secure_below_el3(env)) {
-        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+        tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
     } else {
         if (arm_feature(env, ARM_FEATURE_EL2)) {
-            tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
+            tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
                                 ARMMMUIdx_S2NS, -1);
         } else {
-            tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
+            tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
         }
     }
 }
@@ -2798,7 +2799,7 @@ static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
     ARMCPU *cpu = arm_env_get_cpu(env);
     CPUState *cs = CPU(cpu);
 
-    tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
+    tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S1E2, -1);
 }
 
 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -2807,7 +2808,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
     ARMCPU *cpu = arm_env_get_cpu(env);
     CPUState *cs = CPU(cpu);
 
-    tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
+    tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S1E3, -1);
 }
 
 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -2819,16 +2820,17 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
      */
     bool sec = arm_is_secure_below_el3(env);
     bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
-    CPUState *other_cs;
+    CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
 
     CPU_FOREACH(other_cs) {
         if (sec) {
-            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+            tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S1SE1,
+                                ARMMMUIdx_S1SE0, -1);
         } else if (has_el2) {
-            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+            tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S12NSE1,
                                 ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
         } else {
-            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+            tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S12NSE1,
                                 ARMMMUIdx_S12NSE0, -1);
         }
     }
@@ -2837,20 +2839,20 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                     uint64_t value)
 {
-    CPUState *other_cs;
+    CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
 
     CPU_FOREACH(other_cs) {
-        tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
+        tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S1E2, -1);
     }
 }
 
 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                     uint64_t value)
 {
-    CPUState *other_cs;
+    CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
 
     CPU_FOREACH(other_cs) {
-        tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
+        tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S1E3, -1);
     }
 }
 
-- 
2.8.3

  parent reply	other threads:[~2016-05-26 16:36 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-26 16:35 [Qemu-devel] [RFC 00/10] MTTCG: Slow-path for atomic insns Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 01/10] exec: Introduce tcg_exclusive_{lock, unlock}() Alvise Rigo
2016-05-31 15:03   ` Pranith Kumar
2016-06-02 16:21     ` alvise rigo
2016-06-08  9:21   ` Alex Bennée
2016-06-08 10:00     ` alvise rigo
2016-06-08 11:32       ` Peter Maydell
2016-06-08 13:52       ` Alex Bennée
2016-05-26 16:35 ` [Qemu-devel] [RFC 02/10] softmmu_llsc_template.h: Move to multi-threading Alvise Rigo
2016-06-10 15:21   ` Sergey Fedorov
2016-06-10 15:53     ` alvise rigo
2016-06-10 16:00       ` Sergey Fedorov
2016-06-10 16:04         ` alvise rigo
2016-06-14 12:00       ` Alex Bennée
2016-06-14 12:58         ` alvise rigo
2016-06-14 13:14           ` Alex Bennée
2016-06-10 16:15     ` Alex Bennée
2016-06-11 19:53       ` Sergey Fedorov
2016-06-14  8:37       ` Alex Bennée
2016-06-14  9:31         ` Sergey Fedorov
2016-05-26 16:35 ` [Qemu-devel] [RFC 03/10] cpus: Introduce async_wait_run_on_cpu() Alvise Rigo
2016-06-08 13:54   ` Alex Bennée
2016-06-08 14:10     ` alvise rigo
2016-06-08 14:53       ` Sergey Fedorov
2016-06-08 15:20         ` Alex Bennée
2016-06-08 16:24           ` alvise rigo
2016-06-13  9:26             ` Alex Bennée
2016-05-26 16:35 ` [Qemu-devel] [RFC 04/10] cputlb: Introduce tlb_flush_other() Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 05/10] target-arm: End TB after ldrex instruction Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 06/10] cputlb: Add tlb_tables_flush_bitmap() Alvise Rigo
2016-05-26 16:35 ` Alvise Rigo [this message]
2016-05-26 16:35 ` [Qemu-devel] [RFC 08/10] cputlb: Query tlb_flush_page_by_mmuidx Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 09/10] cputlb: Query tlb_flush_page_all Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 10/10] cpus: Do not sleep if some work item is pending Alvise Rigo
2016-06-10 15:21 ` [Qemu-devel] [RFC 00/10] MTTCG: Slow-path for atomic insns Alex Bennée
2016-06-10 15:30   ` alvise rigo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160526163549.3276-8-a.rigo@virtualopensystems.com \
    --to=a.rigo@virtualopensystems.com \
    --cc=alex.bennee@linaro.org \
    --cc=claudio.fontana@huawei.com \
    --cc=cota@braap.org \
    --cc=fred.konrad@greensocs.com \
    --cc=jani.kokkonen@huawei.com \
    --cc=mttcg@listserver.greensocs.com \
    --cc=pbonzini@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=rth@twiddle.net \
    --cc=serge.fdrv@gmail.com \
    --cc=tech@virtualopensystems.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).