xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Tamas K Lengyel <tklengyel@sec.in.tum.de>
To: xen-devel@lists.xen.org
Cc: ian.campbell@citrix.com, tim@xen.org, ian.jackson@eu.citrix.com,
	stefano.stabellini@citrix.com, andres@lagarcavilla.org,
	jbeulich@suse.com, dgdegra@tycho.nsa.gov,
	Tamas K Lengyel <tklengyel@sec.in.tum.de>
Subject: [PATCH RFC v2 02/12] xen/mem_event: Clean out superflous white-spaces
Date: Wed, 27 Aug 2014 16:06:30 +0200	[thread overview]
Message-ID: <1409148400-14810-3-git-send-email-tklengyel@sec.in.tum.de> (raw)
In-Reply-To: <1409148400-14810-1-git-send-email-tklengyel@sec.in.tum.de>

Signed-off-by: Tamas K Lengyel <tklengyel@sec.in.tum.de>
---
v2: Clean the mem_event header as well.

Signed-off-by: Tamas K Lengyel <tklengyel@sec.in.tum.de>
---
 xen/common/mem_event.c      | 20 ++++++++++----------
 xen/include/xen/mem_event.h |  8 ++++----
 2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c
index 604f94f..e22b78e 100644
--- a/xen/common/mem_event.c
+++ b/xen/common/mem_event.c
@@ -58,7 +58,7 @@ static int mem_event_enable(
     if ( med->ring_page )
         return -EBUSY;
 
-    /* The parameter defaults to zero, and it should be 
+    /* The parameter defaults to zero, and it should be
      * set to something */
     if ( ring_gfn == 0 )
         return -ENOSYS;
@@ -66,7 +66,7 @@ static int mem_event_enable(
     mem_event_ring_lock_init(med);
     mem_event_ring_lock(med);
 
-    rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct, 
+    rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct,
                                     &med->ring_page);
     if ( rc < 0 )
         goto err;
@@ -98,7 +98,7 @@ static int mem_event_enable(
     return 0;
 
  err:
-    destroy_ring_for_helper(&med->ring_page, 
+    destroy_ring_for_helper(&med->ring_page,
                             med->ring_pg_struct);
     mem_event_ring_unlock(med);
 
@@ -227,7 +227,7 @@ static int mem_event_disable(struct domain *d, struct mem_event_domain *med)
             }
         }
 
-        destroy_ring_for_helper(&med->ring_page, 
+        destroy_ring_for_helper(&med->ring_page,
                                 med->ring_pg_struct);
         mem_event_ring_unlock(med);
     }
@@ -480,7 +480,7 @@ void mem_event_cleanup(struct domain *d)
          * the disable routine to complete. It will also drop
          * all domain refs the wait-queued vcpus are holding.
          * Finally, because this code path involves previously
-         * pausing the domain (domain_kill), unpausing the 
+         * pausing the domain (domain_kill), unpausing the
          * vcpus causes no harm. */
         destroy_waitqueue_head(&d->mem_event->paging.wq);
         (void)mem_event_disable(d, &d->mem_event->paging);
@@ -560,7 +560,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
             if ( p2m->pod.entry_count )
                 break;
 
-            rc = mem_event_enable(d, mec, med, _VPF_mem_paging, 
+            rc = mem_event_enable(d, mec, med, _VPF_mem_paging,
                                     HVM_PARAM_PAGING_RING_PFN,
                                     mem_paging_notification);
         }
@@ -580,7 +580,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
     }
     break;
 
-    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: 
+    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS:
     {
         struct mem_event_domain *med = &d->mem_event->access;
         rc = -EINVAL;
@@ -598,7 +598,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
             if ( !cpu_has_vmx )
                 break;
 
-            rc = mem_event_enable(d, mec, med, _VPF_mem_access, 
+            rc = mem_event_enable(d, mec, med, _VPF_mem_access,
                                     HVM_PARAM_ACCESS_RING_PFN,
                                     mem_access_notification);
         }
@@ -618,7 +618,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
     }
     break;
 
-    case XEN_DOMCTL_MEM_EVENT_OP_SHARING: 
+    case XEN_DOMCTL_MEM_EVENT_OP_SHARING:
     {
         struct mem_event_domain *med = &d->mem_event->share;
         rc = -EINVAL;
@@ -637,7 +637,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
             if ( !hap_enabled(d) )
                 break;
 
-            rc = mem_event_enable(d, mec, med, _VPF_mem_sharing, 
+            rc = mem_event_enable(d, mec, med, _VPF_mem_sharing,
                                     HVM_PARAM_SHARING_RING_PFN,
                                     mem_sharing_notification);
         }
diff --git a/xen/include/xen/mem_event.h b/xen/include/xen/mem_event.h
index ecf9f64..774909e 100644
--- a/xen/include/xen/mem_event.h
+++ b/xen/include/xen/mem_event.h
@@ -35,19 +35,19 @@ bool_t mem_event_check_ring(struct mem_event_domain *med);
 /* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
  * available space and the caller is a foreign domain. If the guest itself
  * is the caller, -EBUSY is avoided by sleeping on a wait queue to ensure
- * that the ring does not lose future events. 
+ * that the ring does not lose future events.
  *
  * However, the allow_sleep flag can be set to false in cases in which it is ok
  * to lose future events, and thus -EBUSY can be returned to guest vcpus
- * (handle with care!). 
+ * (handle with care!).
  *
  * In general, you must follow a claim_slot() call with either put_request() or
  * cancel_slot(), both of which are guaranteed to
- * succeed. 
+ * succeed.
  */
 int __mem_event_claim_slot(struct domain *d, struct mem_event_domain *med,
                             bool_t allow_sleep);
-static inline int mem_event_claim_slot(struct domain *d, 
+static inline int mem_event_claim_slot(struct domain *d,
                                         struct mem_event_domain *med)
 {
     return __mem_event_claim_slot(d, med, 1);
-- 
2.1.0.rc1

  parent reply	other threads:[~2014-08-27 14:06 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-08-27 14:06 [PATCH RFC v2 00/12] Mem_event and mem_access for ARM Tamas K Lengyel
2014-08-27 14:06 ` [PATCH RFC v2 01/12] xen: Relocate mem_access and mem_event into common Tamas K Lengyel
2014-08-27 14:17   ` Julien Grall
2014-08-27 14:57     ` Tamas K Lengyel
2014-08-28 10:22   ` Tim Deegan
2014-08-27 14:06 ` Tamas K Lengyel [this message]
2014-08-28 10:22   ` [PATCH RFC v2 02/12] xen/mem_event: Clean out superflous white-spaces Tim Deegan
2014-08-27 14:06 ` [PATCH RFC v2 03/12] xen/mem_event: Relax error condition on debug builds Tamas K Lengyel
2014-08-27 16:39   ` Julien Grall
2014-08-27 17:00     ` Tamas K Lengyel
2014-08-27 17:02   ` Andres Lagar Cavilla
2014-08-27 21:26     ` Tamas K Lengyel
2014-08-28  6:36     ` Jan Beulich
2014-08-29  4:20       ` Andres Lagar Cavilla
2014-08-27 14:06 ` [PATCH RFC v2 04/12] xen/mem_event: Abstract architecture specific sanity checks Tamas K Lengyel
2014-08-27 15:19   ` Jan Beulich
2014-08-27 17:17     ` Tamas K Lengyel
2014-08-27 21:54       ` Tamas K Lengyel
2014-08-28  6:38         ` Jan Beulich
2014-08-28  8:40           ` Tamas K Lengyel
2014-08-28  8:46             ` Jan Beulich
2014-08-28  8:52               ` Tamas K Lengyel
2014-08-27 14:06 ` [PATCH RFC v2 05/12] xen/mem_access: Abstract architecture specific sanity check Tamas K Lengyel
2014-08-27 14:06 ` [PATCH RFC v2 06/12] tools/libxc: Allocate magic page for mem access on ARM Tamas K Lengyel
2014-08-29 20:43   ` Julien Grall
2014-09-04  0:12   ` Stefano Stabellini
2014-08-27 14:06 ` [PATCH RFC v2 07/12] xen/arm: p2m type definitions and changes Tamas K Lengyel
2014-08-27 14:06 ` [PATCH RFC v2 08/12] xen/arm: Add mem_event domctl and mem_access memop Tamas K Lengyel
2014-08-29 20:57   ` Julien Grall
2014-08-30  8:19     ` Tamas K Lengyel
2014-08-27 14:06 ` [PATCH RFC v2 09/12] xen/arm: Data abort exception (R/W) mem_events Tamas K Lengyel
2014-08-27 17:01   ` Julien Grall
2014-08-27 17:22     ` Tamas K Lengyel
2014-08-29 21:41   ` Julien Grall
2014-08-30  8:16     ` Tamas K Lengyel
2014-08-27 14:06 ` [PATCH RFC v2 10/12] xen/arm: Instruction prefetch abort (X) mem_event handling Tamas K Lengyel
2014-08-27 14:06 ` [PATCH RFC v2 11/12] xen/arm: Enable the compilation of mem_access and mem_event on ARM Tamas K Lengyel
2014-08-27 15:24   ` Jan Beulich
2014-08-27 17:12     ` Tamas K Lengyel
2014-08-28  6:39       ` Jan Beulich
2014-08-28  8:42         ` Tamas K Lengyel
2014-08-28  8:54           ` Jan Beulich
2014-08-28  9:00             ` Tamas K Lengyel
2014-08-27 17:05   ` Daniel De Graaf
2014-08-27 17:13     ` Tamas K Lengyel
2014-08-27 14:06 ` [PATCH RFC v2 12/12] tools/tests: Enable xen-access " Tamas K Lengyel
2014-08-27 15:46 ` [PATCH RFC v2 00/12] Mem_event and mem_access for ARM Andrii Tseglytskyi
2014-08-27 17:05   ` Tamas K Lengyel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1409148400-14810-3-git-send-email-tklengyel@sec.in.tum.de \
    --to=tklengyel@sec.in.tum.de \
    --cc=andres@lagarcavilla.org \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=stefano.stabellini@citrix.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).