From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:45548) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZDTZC-000549-5H for qemu-devel@nongnu.org; Fri, 10 Jul 2015 04:21:57 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1ZDTZ7-0001VQ-UE for qemu-devel@nongnu.org; Fri, 10 Jul 2015 04:21:53 -0400 Received: from mail-wi0-f181.google.com ([209.85.212.181]:38520) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZDTZ7-0001VH-LZ for qemu-devel@nongnu.org; Fri, 10 Jul 2015 04:21:49 -0400 Received: by wicmv11 with SMTP id mv11so7795350wic.1 for ; Fri, 10 Jul 2015 01:21:49 -0700 (PDT) From: Alvise Rigo Date: Fri, 10 Jul 2015 10:23:46 +0200 Message-Id: <1436516626-8322-14-git-send-email-a.rigo@virtualopensystems.com> In-Reply-To: <1436516626-8322-1-git-send-email-a.rigo@virtualopensystems.com> References: <1436516626-8322-1-git-send-email-a.rigo@virtualopensystems.com> Subject: [Qemu-devel] [RFC v3 13/13] softmmu_template.h: move to multithreading List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org, mttcg@listserver.greensocs.com Cc: alex.bennee@linaro.org, jani.kokkonen@huawei.com, tech@virtualopensystems.com, claudio.fontana@huawei.com, pbonzini@redhat.com Exploiting the tcg_excl_access_lock, port the helper_{le,be}_st_name to work in real multithreading. - The macro lookup_cpus_ll_addr now uses directly the env->excl_protected_addr to invalidate others' LL/SC operations Suggested-by: Jani Kokkonen Suggested-by: Claudio Fontana Signed-off-by: Alvise Rigo --- softmmu_template.h | 110 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 89 insertions(+), 21 deletions(-) diff --git a/softmmu_template.h b/softmmu_template.h index bc767f6..522454f 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -141,21 +141,24 @@ vidx >= 0; \ }) +#define EXCLUSIVE_RESET_ADDR ULLONG_MAX + +/* This macro requires the caller to have the tcg_excl_access_lock lock since + * it modifies the excl_protected_hwaddr of a running vCPU. + * The macros scans all the excl_protected_hwaddr of all the vCPUs and compare + * them with the address the current vCPU is writing to. If there is a match, + * we reset the value, making the SC fail. */ #define lookup_cpus_ll_addr(addr) \ ({ \ CPUState *cpu; \ CPUArchState *acpu; \ - bool hit = false; \ \ CPU_FOREACH(cpu) { \ acpu = (CPUArchState *)cpu->env_ptr; \ if (cpu != current_cpu && acpu->excl_protected_hwaddr == addr) { \ - hit = true; \ - break; \ + acpu->excl_protected_hwaddr = EXCLUSIVE_RESET_ADDR; \ } \ } \ - \ - hit; \ }) #ifndef SOFTMMU_CODE_ACCESS @@ -439,18 +442,52 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, * exclusive-protected memory. */ hwaddr hw_addr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; - bool set_to_dirty; - /* Two cases of invalidation: the current vCPU is writing to another * vCPU's exclusive address or the vCPU that issued the LoadLink is * writing to it, but not through a StoreCond. */ - set_to_dirty = lookup_cpus_ll_addr(hw_addr); - set_to_dirty |= env->ll_sc_context && - (env->excl_protected_hwaddr == hw_addr); + qemu_mutex_lock(&tcg_excl_access_lock); + + /* The macro lookup_cpus_ll_addr could have reset the exclusive + * address. Fail the SC in this case. + * N.B.: Here excl_succeeded == 0 means that we don't come from a + * store conditional. */ + if (env->excl_succeeded && + (env->excl_protected_hwaddr == EXCLUSIVE_RESET_ADDR)) { + env->excl_succeeded = 0; + qemu_mutex_unlock(&tcg_excl_access_lock); + + return; + } + + lookup_cpus_ll_addr(hw_addr); + + if (!env->excl_succeeded) { + if (env->ll_sc_context && + (env->excl_protected_hwaddr == hw_addr)) { + cpu_physical_memory_set_excl_dirty(hw_addr); + } + } else { + if (cpu_physical_memory_excl_is_dirty(hw_addr) || + env->excl_protected_hwaddr != hw_addr) { + env->excl_protected_hwaddr = EXCLUSIVE_RESET_ADDR; + qemu_mutex_unlock(&tcg_excl_access_lock); + env->excl_succeeded = 0; + + return; + } + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; + #if DATA_SIZE == 1 + glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); + #else + glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); + #endif + + env->excl_protected_hwaddr = EXCLUSIVE_RESET_ADDR; + qemu_mutex_unlock(&tcg_excl_access_lock); - if (set_to_dirty) { - cpu_physical_memory_set_excl_dirty(hw_addr); - } /* the vCPU is legitimately writing to the protected address */ + return; } else { if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; @@ -537,18 +574,49 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, * exclusive-protected memory. */ hwaddr hw_addr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; - bool set_to_dirty; - /* Two cases of invalidation: the current vCPU is writing to another * vCPU's exclusive address or the vCPU that issued the LoadLink is * writing to it, but not through a StoreCond. */ - set_to_dirty = lookup_cpus_ll_addr(hw_addr); - set_to_dirty |= env->ll_sc_context && - (env->excl_protected_hwaddr == hw_addr); + qemu_mutex_lock(&tcg_excl_access_lock); + + /* The macro lookup_cpus_ll_addr could have reset the exclusive + * address. Fail the SC in this case. + * N.B.: Here excl_succeeded == 0 means that we don't come from a + * store conditional. */ + if (env->excl_succeeded && + (env->excl_protected_hwaddr == EXCLUSIVE_RESET_ADDR)) { + env->excl_succeeded = 0; + qemu_mutex_unlock(&tcg_excl_access_lock); + + return; + } + + lookup_cpus_ll_addr(hw_addr); + + if (!env->excl_succeeded) { + if (env->ll_sc_context && + (env->excl_protected_hwaddr == hw_addr)) { + cpu_physical_memory_set_excl_dirty(hw_addr); + } + } else { + if (cpu_physical_memory_excl_is_dirty(hw_addr) || + env->excl_protected_hwaddr != hw_addr) { + env->excl_protected_hwaddr = EXCLUSIVE_RESET_ADDR; + qemu_mutex_unlock(&tcg_excl_access_lock); + env->excl_succeeded = 0; + + return; + } + } - if (set_to_dirty) { - cpu_physical_memory_set_excl_dirty(hw_addr); - } /* the vCPU is legitimately writing to the protected address */ + haddr = addr + env->tlb_table[mmu_idx][index].addend; + + glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); + + qemu_mutex_unlock(&tcg_excl_access_lock); + env->excl_protected_hwaddr = EXCLUSIVE_RESET_ADDR; + + return; } else { if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; -- 2.4.5