From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:39025) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UfZZU-0003S5-Vd for qemu-devel@nongnu.org; Thu, 23 May 2013 13:45:03 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UfZZT-0006Qj-2L for qemu-devel@nongnu.org; Thu, 23 May 2013 13:45:00 -0400 Received: from e39.co.us.ibm.com ([32.97.110.160]:59376) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UfZZS-0006Qc-Qh for qemu-devel@nongnu.org; Thu, 23 May 2013 13:44:58 -0400 Received: from /spool/local by e39.co.us.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Thu, 23 May 2013 11:44:58 -0600 Received: from d01relay06.pok.ibm.com (d01relay06.pok.ibm.com [9.56.227.116]) by d01dlp03.pok.ibm.com (Postfix) with ESMTP id 550C9C90046 for ; Thu, 23 May 2013 13:44:54 -0400 (EDT) Received: from d01av01.pok.ibm.com (d01av01.pok.ibm.com [9.56.224.215]) by d01relay06.pok.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id r4NHis5j44105916 for ; Thu, 23 May 2013 13:44:54 -0400 Received: from d01av01.pok.ibm.com (loopback [127.0.0.1]) by d01av01.pok.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id r4NHisIT016231 for ; Thu, 23 May 2013 13:44:54 -0400 From: Corey Bryant Date: Thu, 23 May 2013 13:44:43 -0400 Message-Id: <1369331087-22345-4-git-send-email-coreyb@linux.vnet.ibm.com> In-Reply-To: <1369331087-22345-1-git-send-email-coreyb@linux.vnet.ibm.com> References: <1369331087-22345-1-git-send-email-coreyb@linux.vnet.ibm.com> Subject: [Qemu-devel] [PATCH 3/7] vnvram: VNVRAM bottom-half r/w scheduling support List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: kwolf@redhat.com, aliguori@us.ibm.com, stefanb@linux.vnet.ibm.com, Corey Bryant , mdroth@linux.vnet.ibm.com, lcapitulino@redhat.com, jschopp@linux.vnet.ibm.com, stefanha@redhat.com Provides support that schedules and executes VNVRAM read/write requests. A bottom-half is used to perform reads/writes from the QEMU main thread. Signed-off-by: Corey Bryant --- vnvram.c | 142 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 142 insertions(+), 0 deletions(-) diff --git a/vnvram.c b/vnvram.c index 37b7070..4157482 100644 --- a/vnvram.c +++ b/vnvram.c @@ -14,6 +14,7 @@ #include "vnvram.h" #include "block/block.h" #include "monitor/monitor.h" +#include "qemu/thread.h" /* #define VNVRAM_DEBUG @@ -68,6 +69,30 @@ typedef struct VNVRAMDrvEntry { VNVRAM_ENTRY_DATA } QEMU_PACKED VNVRAMDrvEntry; +/* Used to pass read/write requests to the bottom-half function */ +typedef struct VNVRAMRWRequest { + VNVRAM *vnvram; + VNVRAMEntry *entry; + bool is_write; + char **blob_r; + uint32_t *blob_r_size; + char *blob_w; + uint32_t blob_w_size; + int rc; + + QemuMutex completion_mutex; + QemuCond completion; + + QSIMPLEQ_ENTRY(VNVRAMRWRequest) list; +} VNVRAMRWRequest; + +/* A mutex protected queue where read/write requests are stored */ +static QemuMutex vnvram_rwrequests_mutex; +static QSIMPLEQ_HEAD(, VNVRAMRWRequest) vnvram_rwrequests = + QSIMPLEQ_HEAD_INITIALIZER(vnvram_rwrequests); + +static QEMUBH *vnvram_bh; + static int vnvram_drv_entry_create(VNVRAM *, VNVRAMEntry *, uint64_t, uint32_t); static int vnvram_drv_entry_update(VNVRAM *, VNVRAMEntry *, uint64_t, uint32_t); static int vnvram_register_entry_internal(VNVRAM *, const VNVRAMEntryName *, @@ -679,3 +704,120 @@ static VNVRAMEntry *vnvram_find_entry(VNVRAM *vnvram, return NULL; } + +/*********************** VNVRAM rwrequest ****************************/ +/* High-level VNVRAM functions that schedule and kick off read/write */ +/* requests. */ +/*********************************************************************/ + +/* + * VNVRAMRWRequest initialization for read requests + */ +static VNVRAMRWRequest *vnvram_rwrequest_init_read(VNVRAM *vnvram, + VNVRAMEntry *entry, + char **blob, + uint32_t *blob_size) +{ + VNVRAMRWRequest *rwr; + + rwr = g_new0(VNVRAMRWRequest, 1); + + rwr->is_write = false; + rwr->entry = entry; + rwr->vnvram = vnvram; + rwr->blob_r = blob; + rwr->blob_r_size = blob_size; + + return rwr; +} + +/* + * VNVRAMRWRequest initialization for write requests + */ +static VNVRAMRWRequest *vnvram_rwrequest_init_write(VNVRAM *vnvram, + VNVRAMEntry *entry, + char *blob, + uint32_t blob_size) +{ + VNVRAMRWRequest *rwr; + + rwr = g_new0(VNVRAMRWRequest, 1); + + rwr->is_write = true; + rwr->entry = entry; + rwr->vnvram = vnvram; + rwr->blob_w = blob; + rwr->blob_w_size = blob_size; + + return rwr; +} + +/* + * Execute a read or write of blob data based on an VNVRAMRWRequest + */ +static int vnvram_rwrequest_exec(VNVRAMRWRequest *rwr) +{ + int rc = 0; + + if (rwr->is_write) { + rc = vnvram_drv_entry_write_blob(rwr->vnvram, rwr->entry, + rwr->blob_w, rwr->blob_w_size); + } else { + rc = vnvram_drv_entry_read_blob(rwr->vnvram, rwr->entry, + rwr->blob_r, rwr->blob_r_size); + } + + rwr->rc = rc; + + qemu_mutex_lock(&rwr->completion_mutex); + qemu_cond_signal(&rwr->completion); + qemu_mutex_unlock(&rwr->completion_mutex); + + return rc; +} + +/* + * Bottom-half callback that is invoked by QEMU's main thread to + * process VNVRAM read/write requests. + */ +static void vnvram_rwrequest_callback(void *opaque) +{ + VNVRAMRWRequest *rwr, *next; + + qemu_mutex_lock(&vnvram_rwrequests_mutex); + + QSIMPLEQ_FOREACH_SAFE(rwr, &vnvram_rwrequests, list, next) { + QSIMPLEQ_REMOVE(&vnvram_rwrequests, rwr, VNVRAMRWRequest, list); + + qemu_mutex_unlock(&vnvram_rwrequests_mutex); + + vnvram_rwrequest_exec(rwr); + + qemu_mutex_lock(&vnvram_rwrequests_mutex); + } + + qemu_mutex_unlock(&vnvram_rwrequests_mutex); +} + +/* + * Schedules a bottom-half to read or write a blob to the VNVRAM drive. + */ +static int vnvram_rwrequest_schedule(VNVRAMRWRequest *rwr) +{ + int rc = 0; + + qemu_mutex_lock(&vnvram_rwrequests_mutex); + QSIMPLEQ_INSERT_TAIL(&vnvram_rwrequests, rwr, list); + qemu_mutex_unlock(&vnvram_rwrequests_mutex); + + qemu_bh_schedule(vnvram_bh); + + /* All reads/writes are synchronous so we wait for completion */ + qemu_mutex_lock(&rwr->completion_mutex); + qemu_cond_wait(&rwr->completion, &rwr->completion_mutex); + qemu_mutex_unlock(&rwr->completion_mutex); + + rc = rwr->rc; + + return rc; +} -- 1.7.1