qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Alvise Rigo <a.rigo@virtualopensystems.com>
To: qemu-devel@nongnu.org, mttcg@listserver.greensocs.com
Cc: alex.bennee@linaro.org, jani.kokkonen@huawei.com,
	tech@virtualopensystems.com, claudio.fontana@huawei.com,
	pbonzini@redhat.com
Subject: [Qemu-devel] [RFC v5 1/6] exec.c: Add new exclusive bitmap to ram_list
Date: Thu, 24 Sep 2015 10:32:41 +0200	[thread overview]
Message-ID: <1443083566-10994-2-git-send-email-a.rigo@virtualopensystems.com> (raw)
In-Reply-To: <1443083566-10994-1-git-send-email-a.rigo@virtualopensystems.com>

The purpose of this new bitmap is to flag the memory pages that are in
the middle of LL/SC operations (after a LL, before a SC) on a per-vCPU
basis.
For all these pages, the corresponding TLB entries will be generated
in such a way to force the slow-path if at least one vCPU has the bit
not set.
When the system starts, the whole memory is dirty (all the bitmap is
set). A page, after being marked as exclusively-clean, will be
restored as dirty after the SC.

For each page we keep 8 bits to be shared among all the vCPUs available
in the system. In general, the to the vCPU n correspond the bit n % 8.

Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
---
 exec.c                  |  8 ++++--
 include/exec/memory.h   |  3 +-
 include/exec/ram_addr.h | 75 +++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 83 insertions(+), 3 deletions(-)

diff --git a/exec.c b/exec.c
index 0a4a0c5..cbe559f 100644
--- a/exec.c
+++ b/exec.c
@@ -1496,11 +1496,15 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
         int i;
 
         /* ram_list.dirty_memory[] is protected by the iothread lock.  */
-        for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+        for (i = 0; i < DIRTY_MEMORY_EXCLUSIVE; i++) {
             ram_list.dirty_memory[i] =
                 bitmap_zero_extend(ram_list.dirty_memory[i],
                                    old_ram_size, new_ram_size);
-       }
+        }
+        ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE] = bitmap_zero_extend(
+                ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE],
+                old_ram_size * EXCL_BITMAP_CELL_SZ,
+                new_ram_size * EXCL_BITMAP_CELL_SZ);
     }
     cpu_physical_memory_set_dirty_range(new_block->offset,
                                         new_block->used_length,
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 94d20ea..b71cb98 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -19,7 +19,8 @@
 #define DIRTY_MEMORY_VGA       0
 #define DIRTY_MEMORY_CODE      1
 #define DIRTY_MEMORY_MIGRATION 2
-#define DIRTY_MEMORY_NUM       3        /* num of dirty bits */
+#define DIRTY_MEMORY_EXCLUSIVE 3
+#define DIRTY_MEMORY_NUM       4        /* num of dirty bits */
 
 #include <stdint.h>
 #include <stdbool.h>
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index c113f21..0016a4b 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -21,6 +21,7 @@
 
 #ifndef CONFIG_USER_ONLY
 #include "hw/xen/xen.h"
+#include "sysemu/sysemu.h"
 
 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
                                     bool share, const char *mem_path,
@@ -44,6 +45,13 @@ int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
 #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
 #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
 
+/* Exclusive bitmap support. */
+#define EXCL_BITMAP_CELL_SZ 8
+#define EXCL_BITMAP_GET_BIT_OFFSET(addr) \
+        (EXCL_BITMAP_CELL_SZ * (addr >> TARGET_PAGE_BITS))
+#define EXCL_BITMAP_GET_BYTE_OFFSET(addr) (addr >> TARGET_PAGE_BITS)
+#define EXCL_IDX(cpu) (cpu % EXCL_BITMAP_CELL_SZ)
+
 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
                                                  ram_addr_t length,
                                                  unsigned client)
@@ -135,6 +143,11 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
     if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
         bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
     }
+    if (unlikely(mask & (1 << DIRTY_MEMORY_EXCLUSIVE))) {
+        bitmap_set_atomic(d[DIRTY_MEMORY_EXCLUSIVE],
+                        page * EXCL_BITMAP_CELL_SZ,
+                        (end - page) * EXCL_BITMAP_CELL_SZ);
+    }
     xen_modified_memory(start, length);
 }
 
@@ -249,5 +262,67 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
     return num_dirty;
 }
 
+/* One cell for each page. The n-th bit of a cell describes all the i-th vCPUs
+ * such that (i % EXCL_BITMAP_CELL_SZ) == n.
+ * A bit set to zero ensures that all the vCPUs described by the bit have the
+ * EXCL_BIT set for the page. */
+static inline void cpu_physical_memory_set_excl_dirty(ram_addr_t addr,
+                                                      uint32_t cpu)
+{
+    set_bit_atomic(EXCL_BITMAP_GET_BIT_OFFSET(addr) + EXCL_IDX(cpu),
+            ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE]);
+}
+
+static inline int cpu_physical_memory_excl_atleast_one_clean(ram_addr_t addr)
+{
+    uint8_t *bitmap;
+
+    bitmap = (uint8_t *)(ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE]);
+
+    /* This is safe even if smp_cpus < 8 since the unused bits are always 1. */
+    return bitmap[EXCL_BITMAP_GET_BYTE_OFFSET(addr)] != UCHAR_MAX;
+}
+
+/* Return true if the @cpu has the bit set for the page of @addr.
+ * If @cpu == smp_cpus return true if at least one vCPU has the dirty bit set
+ * for that page. */
+static inline int cpu_physical_memory_excl_is_dirty(ram_addr_t addr,
+                                                    unsigned long cpu)
+{
+    uint8_t *bitmap;
+
+    bitmap = (uint8_t *)ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE];
+
+    if (cpu == smp_cpus) {
+        if (smp_cpus >= EXCL_BITMAP_CELL_SZ) {
+            return bitmap[EXCL_BITMAP_GET_BYTE_OFFSET(addr)];
+        } else {
+            return bitmap[EXCL_BITMAP_GET_BYTE_OFFSET(addr)] &
+                                            ((1 << smp_cpus) - 1);
+        }
+    } else {
+        return bitmap[EXCL_BITMAP_GET_BYTE_OFFSET(addr)] & (1 << EXCL_IDX(cpu));
+    }
+}
+
+/* Clean the dirty bit of @cpu. If @cpu == smp_cpus clean the dirty bit for all
+ * the vCPUs. */
+static inline int cpu_physical_memory_clear_excl_dirty(ram_addr_t addr,
+                                                        uint32_t cpu)
+{
+    if (cpu == smp_cpus) {
+        int nr = (smp_cpus >= EXCL_BITMAP_CELL_SZ) ?
+                            EXCL_BITMAP_CELL_SZ : smp_cpus;
+
+        return bitmap_test_and_clear_atomic(
+                        ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE],
+                        EXCL_BITMAP_GET_BIT_OFFSET(addr), nr);
+    } else {
+        return bitmap_test_and_clear_atomic(
+                        ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE],
+                        EXCL_BITMAP_GET_BIT_OFFSET(addr) + EXCL_IDX(cpu), 1);
+    }
+}
+
 #endif
 #endif
-- 
2.5.3

  reply	other threads:[~2015-09-24  8:29 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-09-24  8:32 [Qemu-devel] [RFC v5 0/6] Slow-path for atomic instruction translation Alvise Rigo
2015-09-24  8:32 ` Alvise Rigo [this message]
2015-09-26 17:15   ` [Qemu-devel] [RFC v5 1/6] exec.c: Add new exclusive bitmap to ram_list Richard Henderson
2015-09-28  7:28     ` alvise rigo
2015-09-24  8:32 ` [Qemu-devel] [RFC v5 2/6] softmmu: Add new TLB_EXCL flag Alvise Rigo
2015-09-30  3:34   ` Richard Henderson
2015-09-30  9:24     ` alvise rigo
2015-09-30 11:09       ` Peter Maydell
2015-09-30 12:44         ` alvise rigo
2015-09-30 20:37           ` Richard Henderson
2015-09-24  8:32 ` [Qemu-devel] [RFC v5 3/6] softmmu: Add helpers for a new slowpath Alvise Rigo
2015-09-30  3:58   ` Richard Henderson
2015-09-30  9:46     ` alvise rigo
2015-09-30 20:42       ` Richard Henderson
2015-10-01  8:05         ` alvise rigo
2015-10-01 19:34           ` Richard Henderson
2015-09-24  8:32 ` [Qemu-devel] [RFC v5 4/6] target-arm: Create new runtime helpers for excl accesses Alvise Rigo
2015-09-30  4:03   ` Richard Henderson
2015-09-30 10:16     ` alvise rigo
2015-09-24  8:32 ` [Qemu-devel] [RFC v5 5/6] configure: Use slow-path for atomic only when the softmmu is enabled Alvise Rigo
2015-09-30  4:05   ` Richard Henderson
2015-09-30  9:51     ` alvise rigo
2015-09-24  8:32 ` [Qemu-devel] [RFC v5 6/6] target-arm: translate: Use ld/st excl for atomic insns Alvise Rigo
2015-09-30  4:44 ` [Qemu-devel] [RFC v5 0/6] Slow-path for atomic instruction translation Paolo Bonzini
2015-09-30  8:14   ` alvise rigo
2015-09-30 13:20     ` Paolo Bonzini
2015-10-01 19:32   ` Emilio G. Cota

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1443083566-10994-2-git-send-email-a.rigo@virtualopensystems.com \
    --to=a.rigo@virtualopensystems.com \
    --cc=alex.bennee@linaro.org \
    --cc=claudio.fontana@huawei.com \
    --cc=jani.kokkonen@huawei.com \
    --cc=mttcg@listserver.greensocs.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=tech@virtualopensystems.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).