qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Alvise Rigo <a.rigo@virtualopensystems.com>
To: qemu-devel@nongnu.org, mttcg@listserver.greensocs.com
Cc: claudio.fontana@huawei.com, pbonzini@redhat.com,
	jani.kokkonen@huawei.com, tech@virtualopensystems.com,
	alex.bennee@linaro.org, aurelien@aurel32.net
Subject: [Qemu-devel] [mttcg RFC v4 3/6] exec: ram_addr: Fix exclusive bitmap accessor
Date: Fri, 14 Aug 2015 17:55:29 +0200	[thread overview]
Message-ID: <1439567732-14118-4-git-send-email-a.rigo@virtualopensystems.com> (raw)
In-Reply-To: <1439567732-14118-1-git-send-email-a.rigo@virtualopensystems.com>

Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
---
 include/exec/ram_addr.h | 61 +++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 51 insertions(+), 10 deletions(-)

diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 6b678d6..34bb486 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -269,11 +269,28 @@ static inline int cpu_physical_memory_excl_atleast_one_clean(ram_addr_t addr)
     unsigned long next, end;
 
     if (likely(smp_cpus <= BITS_PER_LONG)) {
-        unsigned long mask = (1 << smp_cpus) - 1;
-
-        return
-            (mask & (bitmap[BIT_WORD(EXCL_BITMAP_GET_OFFSET(addr))] >>
-            (EXCL_BITMAP_GET_OFFSET(addr) & (BITS_PER_LONG-1)))) != mask;
+        unsigned long mask1;
+        uint32_t shift, first_off;
+        /* Number of vCPUs bits in the next long. */
+        int bits_left;
+
+        first_off = BIT_WORD(EXCL_BITMAP_GET_OFFSET(addr));
+        shift = (EXCL_BITMAP_GET_OFFSET(addr) & (BITS_PER_LONG-1));
+        bits_left = (shift + smp_cpus) - BITS_PER_LONG;
+
+        if (bits_left <= 0) {
+            mask1 = (1 << smp_cpus) - 1;
+            return (mask1 & (bitmap[first_off] >> shift)) != mask1;
+        } else {
+            /* The bits we need to access span two different longs. */
+            unsigned long mask2;
+
+            mask2 = (1 << bits_left) - 1;
+            mask1 = (1 << (smp_cpus - bits_left)) - 1;
+
+            return !(((mask2 & bitmap[first_off + 1]) == mask2) &&
+                   ((mask1 & (bitmap[first_off] >> shift)) == mask1));
+        }
     }
 
     end = BIT_WORD(EXCL_BITMAP_GET_OFFSET(addr)) + smp_cpus;
@@ -288,15 +305,41 @@ static inline int cpu_physical_memory_excl_is_dirty(ram_addr_t addr,
 {
     unsigned long *bitmap = ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE];
     unsigned long end, next;
-    uint32_t add;
+    uint32_t add, first_off;
 
     assert(cpu <= smp_cpus);
 
     if (likely(smp_cpus <= BITS_PER_LONG)) {
-        cpu = (cpu == smp_cpus) ? (1 << cpu) - 1 : (1 << cpu);
+        uint32_t shift = 0;
+
+        if (cpu == smp_cpus) {
+            unsigned long mask1, mask2;
+            int bits_left;
+
+            first_off = BIT_WORD(EXCL_BITMAP_GET_OFFSET(addr));
+            shift = (EXCL_BITMAP_GET_OFFSET(addr) & (BITS_PER_LONG-1));
+            bits_left = (shift + cpu) - BITS_PER_LONG;
+
+            if (bits_left <= 0) {
+                mask1 = (1 << cpu) - 1;
+
+                return mask1 & (bitmap[first_off] >> shift);
+            }
+
+            mask2 = (1 << bits_left) - 1;
+            mask1 = (1 << (cpu - bits_left)) - 1;
+
+            return (mask1 & (bitmap[first_off] >> shift)) |
+                   (mask2 & (bitmap[first_off + 1]));
+        } else {
+            first_off = BIT_WORD(EXCL_BITMAP_GET_OFFSET(addr) + cpu);
+            shift = ((EXCL_BITMAP_GET_OFFSET(addr) + cpu) & (BITS_PER_LONG-1));
+
+            return 1 & (bitmap[first_off] >> shift);
+        }
 
         return cpu & (bitmap[BIT_WORD(EXCL_BITMAP_GET_OFFSET(addr))] >>
-                     (EXCL_BITMAP_GET_OFFSET(addr) & (BITS_PER_LONG-1)));
+                     (shift));
     }
 
     add = (cpu == smp_cpus) ? 0 : 1;
@@ -315,7 +358,5 @@ static inline bool cpu_physical_memory_clear_excl_dirty(ram_addr_t addr,
                                 EXCL_BITMAP_GET_OFFSET(addr) + cpu_index, 1);
 }
 
-
-
 #endif
 #endif
-- 
2.5.0

  parent reply	other threads:[~2015-08-14 15:52 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-08-14 15:55 [Qemu-devel] [mttcg RFC v4 0/6] Atomic slow-path for mttcg Alvise Rigo
2015-08-14 15:55 ` [Qemu-devel] [mttcg RFC v4 1/6] cpus: async_run_on_cpu: kick only if needed Alvise Rigo
2015-08-14 15:55 ` [Qemu-devel] [mttcg RFC v4 2/6] cputlb: wrap tlb_flush with the a new function Alvise Rigo
2015-08-14 15:55 ` Alvise Rigo [this message]
2015-08-14 15:55 ` [Qemu-devel] [mttcg RFC v4 4/6] softmmu_llsc_template.h: move to multithreading Alvise Rigo
2015-08-14 15:55 ` [Qemu-devel] [mttcg RFC v4 5/6] softmmu_template.h: " Alvise Rigo
2015-08-14 15:55 ` [Qemu-devel] [mttcg RFC v4 6/6] target-arm: Use a runtime helper for excl accesses Alvise Rigo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1439567732-14118-4-git-send-email-a.rigo@virtualopensystems.com \
    --to=a.rigo@virtualopensystems.com \
    --cc=alex.bennee@linaro.org \
    --cc=aurelien@aurel32.net \
    --cc=claudio.fontana@huawei.com \
    --cc=jani.kokkonen@huawei.com \
    --cc=mttcg@listserver.greensocs.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=tech@virtualopensystems.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).