From: Alvise Rigo <a.rigo@virtualopensystems.com>
To: mttcg@listserver.greensocs.com, alex.bennee@linaro.org
Cc: qemu-devel@nongnu.org, jani.kokkonen@huawei.com,
claudio.fontana@huawei.com, tech@virtualopensystems.com,
fred.konrad@greensocs.com, pbonzini@redhat.com, rth@twiddle.net,
serge.fdrv@gmail.com, cota@braap.org, peter.maydell@linaro.org,
Alvise Rigo <a.rigo@virtualopensystems.com>
Subject: [Qemu-devel] [RFC 08/10] cputlb: Query tlb_flush_page_by_mmuidx
Date: Thu, 26 May 2016 18:35:47 +0200 [thread overview]
Message-ID: <20160526163549.3276-9-a.rigo@virtualopensystems.com> (raw)
In-Reply-To: <20160526163549.3276-1-a.rigo@virtualopensystems.com>
Similarly to the previous commit, make tlb_flush_page_by_mmuidx query the
flushes when targeting different VCPUs.
Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
---
cputlb.c | 90 ++++++++++++++++++++++++++++++++++---------------
include/exec/exec-all.h | 5 +--
target-arm/helper.c | 35 ++++++++++---------
3 files changed, 85 insertions(+), 45 deletions(-)
diff --git a/cputlb.c b/cputlb.c
index 73624d6..77a1997 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -157,6 +157,8 @@ static inline void tlb_tables_flush_bitmap(CPUState *cpu, unsigned long *bitmap)
struct TLBFlushByMMUIdxParams {
DECLARE_BITMAP(idx_to_flush, NB_MMU_MODES);
+ /* Used by tlb_flush_page_by_mmuidx */
+ target_ulong addr;
};
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, void *opaque)
@@ -255,28 +257,13 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
tb_flush_jmp_cache(cpu, addr);
}
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
+static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, void *opaque)
{
CPUArchState *env = cpu->env_ptr;
- int i, k;
- va_list argp;
-
- va_start(argp, addr);
-
- tlb_debug("addr "TARGET_FMT_lx"\n", addr);
-
- /* Check if we need to flush due to large pages. */
- if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
- tlb_debug("forced full flush ("
- TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
- env->tlb_flush_addr, env->tlb_flush_mask);
+ struct TLBFlushByMMUIdxParams *params = opaque;
+ target_ulong addr = params->addr;
+ int mmu_idx, i;
- /* Temporarily use current_cpu until tlb_flush_page_by_mmuidx
- * is reworked */
- tlb_flush_by_mmuidx(current_cpu, cpu, argp);
- va_end(argp);
- return;
- }
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
cpu->current_tb = NULL;
@@ -284,6 +271,49 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
addr &= TARGET_PAGE_MASK;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ if (test_bit(mmu_idx, params->idx_to_flush)) {
+ int k;
+
+ tlb_debug("idx %d\n", mmu_idx);
+ tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
+ /* check whether there are vltb entries that need to be flushed */
+ for (k = 0; k < CPU_VTLB_SIZE; k++) {
+ tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
+ }
+ }
+ }
+
+ tb_flush_jmp_cache(cpu, addr);
+
+ g_free(params);
+}
+
+static void v_tlb_flush_page_by_mmuidx(CPUState *cpu, CPUState *target_cpu,
+ target_ulong addr, unsigned long *idxmap)
+{
+ if (!qemu_cpu_is_self(target_cpu)) {
+ struct TLBFlushByMMUIdxParams *params;
+
+ params = g_malloc(sizeof(struct TLBFlushByMMUIdxParams));
+ params->addr = addr;
+ memcpy(params->idx_to_flush, idxmap, MMUIDX_BITMAP_SIZE);
+ async_wait_run_on_cpu(target_cpu, cpu,
+ tlb_flush_page_by_mmuidx_async_work, params);
+ } else {
+ tlb_tables_flush_bitmap(cpu, idxmap);
+ }
+}
+
+void tlb_flush_page_by_mmuidx(CPUState *cpu, CPUState *target,
+ target_ulong addr, ...)
+{
+ DECLARE_BITMAP(idxmap, NB_MMU_MODES) = { 0 };
+ CPUArchState *env = target->env_ptr;
+ va_list argp;
+
+ va_start(argp, addr);
+
for (;;) {
int mmu_idx = va_arg(argp, int);
@@ -291,18 +321,24 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
break;
}
- tlb_debug("idx %d\n", mmu_idx);
+ set_bit(mmu_idx, idxmap);
+ }
- tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
+ va_end(argp);
- /* check whether there are vltb entries that need to be flushed */
- for (k = 0; k < CPU_VTLB_SIZE; k++) {
- tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
- }
+ tlb_debug("addr "TARGET_FMT_lx"\n", addr);
+
+ /* Check if we need to flush due to large pages. */
+ if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
+ tlb_debug("forced full flush ("
+ TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+ env->tlb_flush_addr, env->tlb_flush_mask);
+
+ v_tlb_flush_by_mmuidx(cpu, target, idxmap);
+ return;
}
- va_end(argp);
- tb_flush_jmp_cache(cpu, addr);
+ v_tlb_flush_page_by_mmuidx(cpu, target, addr, idxmap);
}
static void tlb_flush_page_async_work(CPUState *cpu, void *opaque)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 066870b..cb891d2 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -143,7 +143,8 @@ void tlb_flush(CPUState *cpu, int flush_global);
* Flush one page from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
+void tlb_flush_page_by_mmuidx(CPUState *cpu, CPUState *target,
+ target_ulong addr, ...);
/**
* tlb_flush_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
@@ -200,7 +201,7 @@ static inline void tlb_flush(CPUState *cpu, int flush_global)
{
}
-static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
+static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, CPUState *target,
target_ulong addr, ...)
{
}
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 3dcd910..0187c0a 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -2869,10 +2869,10 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = sextract64(value << 12, 0, 56);
if (arm_is_secure_below_el3(env)) {
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1,
+ tlb_flush_page_by_mmuidx(cs, cs, pageaddr, ARMMMUIdx_S1SE1,
ARMMMUIdx_S1SE0, -1);
} else {
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1,
+ tlb_flush_page_by_mmuidx(cs, cs, pageaddr, ARMMMUIdx_S12NSE1,
ARMMMUIdx_S12NSE0, -1);
}
}
@@ -2888,7 +2888,7 @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
+ tlb_flush_page_by_mmuidx(cs, cs, pageaddr, ARMMMUIdx_S1E2, -1);
}
static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -2902,23 +2902,23 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1);
+ tlb_flush_page_by_mmuidx(cs, cs, pageaddr, ARMMMUIdx_S1E3, -1);
}
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
bool sec = arm_is_secure_below_el3(env);
- CPUState *other_cs;
+ CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
CPU_FOREACH(other_cs) {
if (sec) {
- tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1,
- ARMMMUIdx_S1SE0, -1);
+ tlb_flush_page_by_mmuidx(this_cs, other_cs, pageaddr,
+ ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
} else {
- tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1,
- ARMMMUIdx_S12NSE0, -1);
+ tlb_flush_page_by_mmuidx(this_cs, other_cs, pageaddr,
+ ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
}
}
}
@@ -2926,22 +2926,24 @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- CPUState *other_cs;
+ CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
CPU_FOREACH(other_cs) {
- tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
+ tlb_flush_page_by_mmuidx(this_cs, other_cs, pageaddr,
+ ARMMMUIdx_S1E2, -1);
}
}
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- CPUState *other_cs;
+ CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
CPU_FOREACH(other_cs) {
- tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1);
+ tlb_flush_page_by_mmuidx(this_cs, other_cs, pageaddr,
+ ARMMMUIdx_S1E3, -1);
}
}
@@ -2964,13 +2966,13 @@ static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 48);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
+ tlb_flush_page_by_mmuidx(cs, cs, pageaddr, ARMMMUIdx_S2NS, -1);
}
static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- CPUState *other_cs;
+ CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
uint64_t pageaddr;
if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
@@ -2980,7 +2982,8 @@ static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 48);
CPU_FOREACH(other_cs) {
- tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
+ tlb_flush_page_by_mmuidx(this_cs, other_cs, pageaddr,
+ ARMMMUIdx_S2NS, -1);
}
}
--
2.8.3
next prev parent reply other threads:[~2016-05-26 16:36 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-05-26 16:35 [Qemu-devel] [RFC 00/10] MTTCG: Slow-path for atomic insns Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 01/10] exec: Introduce tcg_exclusive_{lock, unlock}() Alvise Rigo
2016-05-31 15:03 ` Pranith Kumar
2016-06-02 16:21 ` alvise rigo
2016-06-08 9:21 ` Alex Bennée
2016-06-08 10:00 ` alvise rigo
2016-06-08 11:32 ` Peter Maydell
2016-06-08 13:52 ` Alex Bennée
2016-05-26 16:35 ` [Qemu-devel] [RFC 02/10] softmmu_llsc_template.h: Move to multi-threading Alvise Rigo
2016-06-10 15:21 ` Sergey Fedorov
2016-06-10 15:53 ` alvise rigo
2016-06-10 16:00 ` Sergey Fedorov
2016-06-10 16:04 ` alvise rigo
2016-06-14 12:00 ` Alex Bennée
2016-06-14 12:58 ` alvise rigo
2016-06-14 13:14 ` Alex Bennée
2016-06-10 16:15 ` Alex Bennée
2016-06-11 19:53 ` Sergey Fedorov
2016-06-14 8:37 ` Alex Bennée
2016-06-14 9:31 ` Sergey Fedorov
2016-05-26 16:35 ` [Qemu-devel] [RFC 03/10] cpus: Introduce async_wait_run_on_cpu() Alvise Rigo
2016-06-08 13:54 ` Alex Bennée
2016-06-08 14:10 ` alvise rigo
2016-06-08 14:53 ` Sergey Fedorov
2016-06-08 15:20 ` Alex Bennée
2016-06-08 16:24 ` alvise rigo
2016-06-13 9:26 ` Alex Bennée
2016-05-26 16:35 ` [Qemu-devel] [RFC 04/10] cputlb: Introduce tlb_flush_other() Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 05/10] target-arm: End TB after ldrex instruction Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 06/10] cputlb: Add tlb_tables_flush_bitmap() Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 07/10] cputlb: Query tlb_flush_by_mmuidx Alvise Rigo
2016-05-26 16:35 ` Alvise Rigo [this message]
2016-05-26 16:35 ` [Qemu-devel] [RFC 09/10] cputlb: Query tlb_flush_page_all Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 10/10] cpus: Do not sleep if some work item is pending Alvise Rigo
2016-06-10 15:21 ` [Qemu-devel] [RFC 00/10] MTTCG: Slow-path for atomic insns Alex Bennée
2016-06-10 15:30 ` alvise rigo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20160526163549.3276-9-a.rigo@virtualopensystems.com \
--to=a.rigo@virtualopensystems.com \
--cc=alex.bennee@linaro.org \
--cc=claudio.fontana@huawei.com \
--cc=cota@braap.org \
--cc=fred.konrad@greensocs.com \
--cc=jani.kokkonen@huawei.com \
--cc=mttcg@listserver.greensocs.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=rth@twiddle.net \
--cc=serge.fdrv@gmail.com \
--cc=tech@virtualopensystems.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).