xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Wei Liu <wei.liu2@citrix.com>
To: xen-devel@lists.xen.org
Cc: Wei Liu <wei.liu2@citrix.com>,
	keir@xen.org, ian.campbell@citrix.com, jbeulich@suse.com,
	david.vrabel@citrix.com
Subject: [RFC PATCH V4 16/18] Implement 3-level event channel routines
Date: Tue, 5 Mar 2013 12:30:38 +0000	[thread overview]
Message-ID: <1362486640-14707-17-git-send-email-wei.liu2@citrix.com> (raw)
In-Reply-To: <1362486640-14707-1-git-send-email-wei.liu2@citrix.com>

3-level event channel ABI is fully functional at this point, set corresponding
bit in ABI bitmap as well.

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
 xen/common/event_channel.c |  124 ++++++++++++++++++++++++++++++++++++++------
 1 file changed, 108 insertions(+), 16 deletions(-)

diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 4cf172b..504d769 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -33,7 +33,22 @@
 #include <xsm/xsm.h>
 
 /* A bitmap of supported extended event channel ABIs */
-uint64_t extended_event_channel = EVTCHN_EXTENDED_NONE;
+uint64_t extended_event_channel = (EVTCHN_EXTENDED_NONE |
+                                   EVTCHN_EXTENDED_L3);
+
+static inline const char * evtchn_abi_str(unsigned int abi)
+{
+    switch ( abi )
+    {
+    case EVTCHN_EXTENDED_NONE:
+        return "2-level";
+    case EVTCHN_EXTENDED_L3:
+        return "3-level";
+    default:
+        BUG();
+    }
+    return ""; /* make compiler happy */
+}
 
 #define ERROR_EXIT(_errno)                                          \
     do {                                                            \
@@ -625,10 +640,33 @@ out:
     return ret;
 }
 
+static void __check_vcpu_polling(struct vcpu *v, int port)
+{
+    int vcpuid;
+    struct domain *d = v->domain;
+
+    /* Check if some VCPU might be polling for this event. */
+    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
+        return;
+
+    /* Wake any interested (or potentially interested) pollers. */
+    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
+          vcpuid < d->max_vcpus;
+          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
+    {
+        v = d->vcpu[vcpuid];
+        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
+             test_and_clear_bit(vcpuid, d->poll_mask) )
+        {
+            v->poll_evtchn = 0;
+            vcpu_unblock(v);
+        }
+    }
+}
+
 static void evtchn_set_pending_l2(struct vcpu *v, int port)
 {
     struct domain *d = v->domain;
-    int vcpuid;
 
     /*
      * The following bit operations must happen in strict order.
@@ -647,23 +685,36 @@ static void evtchn_set_pending_l2(struct vcpu *v, int port)
         vcpu_mark_events_pending(v);
     }
 
-    /* Check if some VCPU might be polling for this event. */
-    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
+    __check_vcpu_polling(v, port);
+}
+
+static void evtchn_set_pending_l3(struct vcpu *v, int port)
+{
+    struct domain *d = v->domain;
+    unsigned int l1bit = port >> (EVTCHN_WORD_BITORDER(d) << 1);
+    unsigned int l2bit = port >> EVTCHN_WORD_BITORDER(d);
+
+    if (unlikely(!v->evtchn_pending_sel_l2))
         return;
 
-    /* Wake any interested (or potentially interested) pollers. */
-    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
-          vcpuid < d->max_vcpus;
-          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
+    /*
+     * The following bit operations must happen in strict order.
+     * NB. On x86, the atomic bit operations also act as memory barriers.
+     * There is therefore sufficiently strict ordering for this architecture --
+     * others may require explicit memory barriers.
+     */
+
+    if ( test_and_set_bit(port, d->evtchn_pending) )
+         return;
+
+    if ( !test_bit(port, d->evtchn_mask) &&
+         !test_and_set_bit(l2bit, v->evtchn_pending_sel_l2) &&
+         !test_and_set_bit(l1bit, &vcpu_info(v, evtchn_pending_sel)) )
     {
-        v = d->vcpu[vcpuid];
-        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
-             test_and_clear_bit(vcpuid, d->poll_mask) )
-        {
-            v->poll_evtchn = 0;
-            vcpu_unblock(v);
-        }
+        vcpu_mark_events_pending(v);
     }
+
+    __check_vcpu_polling(v, port);
 }
 
 static void evtchn_set_pending(struct vcpu *v, int port)
@@ -675,6 +726,9 @@ static void evtchn_set_pending(struct vcpu *v, int port)
     case EVTCHN_EXTENDED_NONE:
         evtchn_set_pending_l2(v, port);
         break;
+    case EVTCHN_EXTENDED_L3:
+        evtchn_set_pending_l3(v, port);
+        break;
     default:
         BUG();
     }
@@ -975,6 +1029,38 @@ static int evtchn_unmask_l2(unsigned int port)
     return 0;
 }
 
+static int evtchn_unmask_l3(unsigned int port)
+{
+    struct domain *d = current->domain;
+    struct vcpu   *v;
+    unsigned int l1bit = port >> (EVTCHN_WORD_BITORDER(d) << 1);
+    unsigned int l2bit = port >> EVTCHN_WORD_BITORDER(d);
+
+    ASSERT(spin_is_locked(&d->event_lock));
+
+    if ( unlikely(!port_is_valid(d, port)) )
+        return -EINVAL;
+
+    v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
+
+    if (unlikely(!v->evtchn_pending_sel_l2))
+        return -EINVAL;
+
+    /*
+     * These operations must happen in strict order. Based on
+     * include/xen/event.h:evtchn_set_pending().
+     */
+    if ( test_and_clear_bit(port, d->evtchn_mask) &&
+         test_bit          (port, d->evtchn_pending) &&
+         !test_and_set_bit (l2bit, v->evtchn_pending_sel_l2) &&
+         !test_and_set_bit (l1bit, &vcpu_info(v, evtchn_pending_sel)) )
+    {
+        vcpu_mark_events_pending(v);
+    }
+
+    return 0;
+}
+
 int evtchn_unmask(unsigned int port)
 {
     struct domain *d = current->domain;
@@ -985,6 +1071,9 @@ int evtchn_unmask(unsigned int port)
     case EVTCHN_EXTENDED_NONE:
         rc = evtchn_unmask_l2(port);
         break;
+    case EVTCHN_EXTENDED_L3:
+        rc = evtchn_unmask_l3(port);
+        break;
     default:
         BUG();
     }
@@ -1546,8 +1635,11 @@ static void domain_dump_evtchn_info(struct domain *d)
     bitmap_scnlistprintf(keyhandler_scratch, sizeof(keyhandler_scratch),
                          d->poll_mask, d->max_vcpus);
     printk("Event channel information for domain %d:\n"
+           "Using %s event channel ABI\n"
            "Polling vCPUs: {%s}\n"
-           "    port [p/m]\n", d->domain_id, keyhandler_scratch);
+           "    port [p/m]\n",
+           d->domain_id, evtchn_abi_str(d->evtchn_extended),
+           keyhandler_scratch);
 
     spin_lock(&d->event_lock);
 
-- 
1.7.10.4

  parent reply	other threads:[~2013-03-05 12:30 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-03-05 12:30 [RFC PATCH V4] Implement 3-level event channel ABI in Xen Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 01/18] xen: correct BITS_PER_EVTCHN_WORD on arm Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 02/18] Clean up trailing whitespaces Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 03/18] Dynamically allocate d->evtchn Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 04/18] Move event channel macros / struct definition to proper place Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 05/18] Add d->max_evtchns Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 06/18] Add evtchn_is_{pending, masked} and evtchn_clear_pending Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 07/18] Implement extended event channel ABIs query Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 08/18] Define 3-level event channel registration interface Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 09/18] Add evtchn_extended in struct domain Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 10/18] Calculate max event channels for EVTCHN_EXTENDED_L3 Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 11/18] Bump EVTCHNS_PER_BUCKET to 512 Wei Liu
2013-03-15 18:15   ` David Vrabel
2013-03-15 18:37     ` Wei Liu
2013-03-15 18:43       ` David Vrabel
2013-03-05 12:30 ` [RFC PATCH V4 12/18] Update Xen public header xen.h Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 13/18] Genneralized event channel operations Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 14/18] Introduce EVTCHN_WORD_BITORDER macro Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 15/18] Infrastructure to manipulate 3-level event channel pages Wei Liu
2013-03-05 14:22   ` Jan Beulich
2013-03-05 12:30 ` Wei Liu [this message]
2013-03-05 14:28   ` [RFC PATCH V4 16/18] Implement 3-level event channel routines Jan Beulich
2013-03-05 16:07     ` Wei Liu
2013-03-05 16:13       ` Jan Beulich
2013-03-05 12:30 ` [RFC PATCH V4 17/18] Only allow extended event channel on Dom0 and driver domains Wei Liu
2013-03-05 12:30 ` [RFC PATCH V4 18/18] libxl: add evtchn_extended_allowed flag Wei Liu
2013-03-05 13:48   ` Ian Jackson
2013-03-05 17:11     ` Wei Liu
2013-03-05 17:38       ` Ian Jackson
2013-03-05 17:51         ` Wei Liu
2013-03-05 17:56           ` David Vrabel
2013-03-05 18:08             ` Wei Liu
2013-03-06 17:16               ` Wei Liu
2013-03-07 11:23                 ` David Vrabel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1362486640-14707-17-git-send-email-wei.liu2@citrix.com \
    --to=wei.liu2@citrix.com \
    --cc=david.vrabel@citrix.com \
    --cc=ian.campbell@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).