xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Olaf Hering <olaf@aepfle.de>
To: xen-devel@lists.xensource.com
Cc: Tim Deegan <tim@xen.org>
Subject: [PATCH] mem_event: add ref counting for free requestslots
Date: Mon, 05 Sep 2011 11:18:06 +0200	[thread overview]
Message-ID: <b2c8dacb2dc08bd93e49.1315214286@probook.site> (raw)

# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1315214140 -7200
# Node ID b2c8dacb2dc08bd93e4926eb0c2c7823d40e2174
# Parent  f1349a968a5ac5577d67ad4a3f3490c580dbe264
mem_event: add ref counting for free requestslots

If mem_event_check_ring() is called by many vcpus at the same time before
any of them called also mem_event_put_request(), all of the callers must
assume there are enough free slots available in the ring.

Record the number of request producers in mem_event_check_ring() to keep
track of available free slots.

Add a new mem_event_put_req_producers() function to release a request
attempt made in mem_event_check_ring(). Its required for
p2m_mem_paging_populate() because that function can only modify the p2m type
if there are free request slots. But in some cases p2m_mem_paging_populate()
does not actually have to produce another request when it is known that the
same request was already made earlier by a different vcpu.


mem_event_check_ring() can not return a reference to a free request slot
because there could be multiple references for different vcpus and the order
of mem_event_put_request() calls is not known. As a result, incomplete
requests could be consumed by the ring user.

Signed-off-by: Olaf Hering <olaf@aepfle.de>

diff -r f1349a968a5a -r b2c8dacb2dc0 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -37,8 +37,6 @@
 #define mem_event_ring_lock(_d)       spin_lock(&(_d)->mem_event.ring_lock)
 #define mem_event_ring_unlock(_d)     spin_unlock(&(_d)->mem_event.ring_lock)
 
-#define MEM_EVENT_RING_THRESHOLD 4
-
 static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
 {
     int rc;
@@ -109,6 +107,7 @@ void mem_event_put_request(struct domain
     req_prod++;
 
     /* Update ring */
+    d->mem_event.req_producers--;
     front_ring->req_prod_pvt = req_prod;
     RING_PUSH_REQUESTS(front_ring);
 
@@ -153,11 +152,18 @@ void mem_event_mark_and_pause(struct vcp
     vcpu_sleep_nosync(v);
 }
 
+void mem_event_put_req_producers(struct domain *d)
+{
+    mem_event_ring_lock(d);
+    d->mem_event.req_producers--;
+    mem_event_ring_unlock(d);
+}
+
 int mem_event_check_ring(struct domain *d)
 {
     struct vcpu *curr = current;
     int free_requests;
-    int ring_full;
+    int ring_full = 1;
 
     if ( !d->mem_event.ring_page )
         return -1;
@@ -165,12 +171,11 @@ int mem_event_check_ring(struct domain *
     mem_event_ring_lock(d);
 
     free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
-    if ( unlikely(free_requests < 2) )
+    if ( d->mem_event.req_producers < free_requests )
     {
-        gdprintk(XENLOG_INFO, "free request slots: %d\n", free_requests);
-        WARN_ON(free_requests == 0);
+        d->mem_event.req_producers++;
+        ring_full = 0;
     }
-    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD ? 1 : 0;
 
     if ( (curr->domain->domain_id == d->domain_id) && ring_full )
     {
diff -r f1349a968a5a -r b2c8dacb2dc0 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -281,7 +281,6 @@ static struct page_info* mem_sharing_all
     vcpu_pause_nosync(v);
     req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
 
-    /* XXX: Need to reserve a request, not just check the ring! */
     if(mem_event_check_ring(d)) return page;
 
     req.gfn = gfn;
diff -r f1349a968a5a -r b2c8dacb2dc0 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -803,6 +803,7 @@ void p2m_mem_paging_populate(struct doma
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
+        mem_event_put_req_producers(d);
         return;
     }
 
diff -r f1349a968a5a -r b2c8dacb2dc0 xen/include/asm-x86/mem_event.h
--- a/xen/include/asm-x86/mem_event.h
+++ b/xen/include/asm-x86/mem_event.h
@@ -27,6 +27,7 @@
 /* Pauses VCPU while marking pause flag for mem event */
 void mem_event_mark_and_pause(struct vcpu *v);
 int mem_event_check_ring(struct domain *d);
+void mem_event_put_req_producers(struct domain *d);
 void mem_event_put_request(struct domain *d, mem_event_request_t *req);
 void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
 void mem_event_unpause_vcpus(struct domain *d);
diff -r f1349a968a5a -r b2c8dacb2dc0 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -183,6 +183,7 @@ struct mem_event_domain
 {
     /* ring lock */
     spinlock_t ring_lock;
+    unsigned int req_producers;
     /* shared page */
     mem_event_shared_page_t *shared_page;
     /* shared ring page */

                 reply	other threads:[~2011-09-05  9:18 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b2c8dacb2dc08bd93e49.1315214286@probook.site \
    --to=olaf@aepfle.de \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).