qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: qemu-block@nongnu.org, qemu-devel@nongnu.org,
	xen-devel@lists.xenproject.org
Cc: Paul Durrant <paul.durrant@citrix.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Anthony Perard <anthony.perard@citrix.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Kevin Wolf <kwolf@redhat.com>, Max Reitz <mreitz@redhat.com>
Subject: [Qemu-devel] [PATCH 13/18] xen: purge 'blk' and 'ioreq' from function names in dataplane/xen-qdisk.c
Date: Wed, 21 Nov 2018 15:12:06 +0000	[thread overview]
Message-ID: <20181121151211.15997-14-paul.durrant@citrix.com> (raw)
In-Reply-To: <20181121151211.15997-1-paul.durrant@citrix.com>

This is a purely cosmetic patch that purges remaining use of 'blk' and
'ioreq' in local function names.

No functional change.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Anthony Perard <anthony.perard@citrix.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Max Reitz <mreitz@redhat.com>
---
 hw/block/dataplane/xen-qdisk.c | 86 +++++++++++++++++++++---------------------
 1 file changed, 43 insertions(+), 43 deletions(-)

diff --git a/hw/block/dataplane/xen-qdisk.c b/hw/block/dataplane/xen-qdisk.c
index 50a48b6f5f..94785eeff3 100644
--- a/hw/block/dataplane/xen-qdisk.c
+++ b/hw/block/dataplane/xen-qdisk.c
@@ -57,7 +57,7 @@ struct XenQdiskDataPlane {
     AioContext *ctx;
 };
 
-static void ioreq_reset(XenQdiskRequest *request)
+static void reset_request(XenQdiskRequest *request)
 {
     memset(&request->req, 0, sizeof(request->req));
     request->status = 0;
@@ -76,7 +76,7 @@ static void ioreq_reset(XenQdiskRequest *request)
     qemu_iovec_reset(&request->v);
 }
 
-static XenQdiskRequest *ioreq_start(XenQdiskDataPlane *dataplane)
+static XenQdiskRequest *start_request(XenQdiskDataPlane *dataplane)
 {
     XenQdiskRequest *request = NULL;
 
@@ -101,7 +101,7 @@ out:
     return request;
 }
 
-static void ioreq_finish(XenQdiskRequest *request)
+static void finish_request(XenQdiskRequest *request)
 {
     XenQdiskDataPlane *dataplane = request->dataplane;
 
@@ -111,12 +111,12 @@ static void ioreq_finish(XenQdiskRequest *request)
     dataplane->requests_finished++;
 }
 
-static void ioreq_release(XenQdiskRequest *request, bool finish)
+static void release_request(XenQdiskRequest *request, bool finish)
 {
     XenQdiskDataPlane *dataplane = request->dataplane;
 
     QLIST_REMOVE(request, list);
-    ioreq_reset(request);
+    reset_request(request);
     request->dataplane = dataplane;
     QLIST_INSERT_HEAD(&dataplane->freelist, request, list);
     if (finish) {
@@ -130,7 +130,7 @@ static void ioreq_release(XenQdiskRequest *request, bool finish)
  * translate request into iovec + start offset
  * do sanity checks along the way
  */
-static int ioreq_parse(XenQdiskRequest *request)
+static int parse_request(XenQdiskRequest *request)
 {
     XenQdiskDataPlane *dataplane = request->dataplane;
     size_t len;
@@ -191,7 +191,7 @@ err:
     return -1;
 }
 
-static int ioreq_grant_copy(XenQdiskRequest *request)
+static int copy_request(XenQdiskRequest *request)
 {
     XenQdiskDataPlane *dataplane = request->dataplane;
     XenDevice *xendev = dataplane->xendev;
@@ -240,9 +240,9 @@ static int ioreq_grant_copy(XenQdiskRequest *request)
     return 0;
 }
 
-static int ioreq_runio_qemu_aio(XenQdiskRequest *request);
+static int do_aio(XenQdiskRequest *request);
 
-static void qemu_aio_complete(void *opaque, int ret)
+static void complete_aio(void *opaque, int ret)
 {
     XenQdiskRequest *request = opaque;
     XenQdiskDataPlane *dataplane = request->dataplane;
@@ -259,7 +259,7 @@ static void qemu_aio_complete(void *opaque, int ret)
     request->aio_inflight--;
     if (request->presync) {
         request->presync = 0;
-        ioreq_runio_qemu_aio(request);
+        do_aio(request);
         goto done;
     }
     if (request->aio_inflight > 0) {
@@ -270,7 +270,7 @@ static void qemu_aio_complete(void *opaque, int ret)
     case BLKIF_OP_READ:
         /* in case of failure request->aio_errors is increased */
         if (ret == 0) {
-            ioreq_grant_copy(request);
+            copy_request(request);
         }
         qemu_vfree(request->buf);
         break;
@@ -286,7 +286,7 @@ static void qemu_aio_complete(void *opaque, int ret)
     }
 
     request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
-    ioreq_finish(request);
+    finish_request(request);
 
     switch (request->req.operation) {
     case BLKIF_OP_WRITE:
@@ -311,9 +311,8 @@ done:
     aio_context_release(dataplane->ctx);
 }
 
-static bool blk_split_discard(XenQdiskRequest *request,
-                              blkif_sector_t sector_number,
-                              uint64_t nr_sectors)
+static bool split_discard(XenQdiskRequest *request,
+                          blkif_sector_t sector_number, uint64_t nr_sectors)
 {
     XenQdiskDataPlane *dataplane = request->dataplane;
     int64_t byte_offset;
@@ -336,7 +335,7 @@ static bool blk_split_discard(XenQdiskRequest *request,
         byte_chunk = byte_remaining > limit ? limit : byte_remaining;
         request->aio_inflight++;
         blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk,
-                         qemu_aio_complete, request);
+                         complete_aio, request);
         byte_remaining -= byte_chunk;
         byte_offset += byte_chunk;
     } while (byte_remaining > 0);
@@ -344,7 +343,7 @@ static bool blk_split_discard(XenQdiskRequest *request,
     return true;
 }
 
-static int ioreq_runio_qemu_aio(XenQdiskRequest *request)
+static int do_aio(XenQdiskRequest *request)
 {
     XenQdiskDataPlane *dataplane = request->dataplane;
 
@@ -352,14 +351,14 @@ static int ioreq_runio_qemu_aio(XenQdiskRequest *request)
     if (request->req.nr_segments &&
         (request->req.operation == BLKIF_OP_WRITE ||
          request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
-        ioreq_grant_copy(request)) {
+        copy_request(request)) {
         qemu_vfree(request->buf);
         goto err;
     }
 
     request->aio_inflight++;
     if (request->presync) {
-        blk_aio_flush(request->dataplane->blk, qemu_aio_complete, request);
+        blk_aio_flush(request->dataplane->blk, complete_aio, request);
         return 0;
     }
 
@@ -370,7 +369,7 @@ static int ioreq_runio_qemu_aio(XenQdiskRequest *request)
                          request->v.size, BLOCK_ACCT_READ);
         request->aio_inflight++;
         blk_aio_preadv(dataplane->blk, request->start, &request->v, 0,
-                       qemu_aio_complete, request);
+                       complete_aio, request);
         break;
     case BLKIF_OP_WRITE:
     case BLKIF_OP_FLUSH_DISKCACHE:
@@ -385,12 +384,12 @@ static int ioreq_runio_qemu_aio(XenQdiskRequest *request)
                          BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
         request->aio_inflight++;
         blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0,
-                        qemu_aio_complete, request);
+                        complete_aio, request);
         break;
     case BLKIF_OP_DISCARD:
     {
         struct blkif_request_discard *req = (void *)&request->req;
-        if (!blk_split_discard(request, req->sector_number, req->nr_sectors)) {
+        if (!split_discard(request, req->sector_number, req->nr_sectors)) {
             goto err;
         }
         break;
@@ -400,17 +399,17 @@ static int ioreq_runio_qemu_aio(XenQdiskRequest *request)
         goto err;
     }
 
-    qemu_aio_complete(request, 0);
+    complete_aio(request, 0);
 
     return 0;
 
 err:
-    ioreq_finish(request);
+    finish_request(request);
     request->status = BLKIF_RSP_ERROR;
     return -1;
 }
 
-static int blk_send_response_one(XenQdiskRequest *request)
+static int send_response_one(XenQdiskRequest *request)
 {
     XenQdiskDataPlane *dataplane = request->dataplane;
     int send_notify = 0;
@@ -466,15 +465,15 @@ static int blk_send_response_one(XenQdiskRequest *request)
 }
 
 /* walk finished list, send outstanding responses, free requests */
-static void blk_send_response_all(XenQdiskDataPlane *dataplane)
+static void send_response_all(XenQdiskDataPlane *dataplane)
 {
     XenQdiskRequest *request;
     int send_notify = 0;
 
     while (!QLIST_EMPTY(&dataplane->finished)) {
         request = QLIST_FIRST(&dataplane->finished);
-        send_notify += blk_send_response_one(request);
-        ioreq_release(request, true);
+        send_notify += send_response_one(request);
+        release_request(request, true);
     }
     if (send_notify) {
         xen_device_notify_event_channel(dataplane->xendev,
@@ -482,8 +481,8 @@ static void blk_send_response_all(XenQdiskDataPlane *dataplane)
     }
 }
 
-static int blk_get_request(XenQdiskDataPlane *dataplane,
-                           XenQdiskRequest *request, RING_IDX rc)
+static int get_request(XenQdiskDataPlane *dataplane,
+                       XenQdiskRequest *request, RING_IDX rc)
 {
     switch (dataplane->protocol) {
     case BLKIF_PROTOCOL_NATIVE: {
@@ -513,7 +512,7 @@ static int blk_get_request(XenQdiskDataPlane *dataplane,
     return 0;
 }
 
-static void blk_handle_requests(XenQdiskDataPlane *dataplane)
+static void handle_requests(XenQdiskDataPlane *dataplane)
 {
     RING_IDX rc, rp;
     XenQdiskRequest *request;
@@ -524,22 +523,22 @@ static void blk_handle_requests(XenQdiskDataPlane *dataplane)
     rp = dataplane->rings.common.sring->req_prod;
     xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
 
-    blk_send_response_all(dataplane);
+    send_response_all(dataplane);
     while (rc != rp) {
         /* pull request from ring */
         if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) {
             break;
         }
-        request = ioreq_start(dataplane);
+        request = start_request(dataplane);
         if (request == NULL) {
             dataplane->more_work++;
             break;
         }
-        blk_get_request(dataplane, request, rc);
+        get_request(dataplane, request, rc);
         dataplane->rings.common.req_cons = ++rc;
 
         /* parse them */
-        if (ioreq_parse(request) != 0) {
+        if (parse_request(request) != 0) {
 
             switch (request->req.operation) {
             case BLKIF_OP_READ:
@@ -557,15 +556,15 @@ static void blk_handle_requests(XenQdiskDataPlane *dataplane)
                 break;
             };
 
-            if (blk_send_response_one(request)) {
+            if (send_response_one(request)) {
                 xen_device_notify_event_channel(dataplane->xendev,
                                                 dataplane->event_channel);
             }
-            ioreq_release(request, false);
+            release_request(request, false);
             continue;
         }
 
-        ioreq_runio_qemu_aio(request);
+        do_aio(request);
     }
 
     if (dataplane->more_work &&
@@ -574,16 +573,16 @@ static void blk_handle_requests(XenQdiskDataPlane *dataplane)
     }
 }
 
-static void blk_bh(void *opaque)
+static void xen_qdisk_dataplane_bh(void *opaque)
 {
     XenQdiskDataPlane *dataplane = opaque;
 
     aio_context_acquire(dataplane->ctx);
-    blk_handle_requests(dataplane);
+    handle_requests(dataplane);
     aio_context_release(dataplane->ctx);
 }
 
-static void blk_event(void *opaque)
+static void xen_qdisk_dataplane_event(void *opaque)
 {
     XenQdiskDataPlane *dataplane = opaque;
 
@@ -612,7 +611,8 @@ XenQdiskDataPlane *xen_qdisk_dataplane_create(XenDevice *xendev,
     } else {
         dataplane->ctx = qemu_get_aio_context();
     }
-    dataplane->bh = aio_bh_new(dataplane->ctx, blk_bh, dataplane);
+    dataplane->bh = aio_bh_new(dataplane->ctx, xen_qdisk_dataplane_bh,
+                               dataplane);
 
     return dataplane;
 }
@@ -718,7 +718,7 @@ void xen_qdisk_dataplane_start(XenQdiskDataPlane *dataplane,
 
     dataplane->event_channel =
         xen_device_bind_event_channel(xendev, event_channel,
-                                      blk_event, dataplane,
+                                      xen_qdisk_dataplane_event, dataplane,
                                       &error_fatal);
 
     aio_context_acquire(dataplane->ctx);
-- 
2.11.0

  parent reply	other threads:[~2018-11-21 15:29 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-21 15:11 [Qemu-devel] [PATCH 00/18] Xen PV backend 'qdevification' Paul Durrant
2018-11-21 15:11 ` [Qemu-devel] [PATCH 01/18] xen: re-name XenDevice to XenLegacyDevice Paul Durrant
2018-11-28 16:06   ` Anthony PERARD
2018-11-21 15:11 ` [Qemu-devel] [PATCH 02/18] xen: introduce new 'XenBus' and 'XenDevice' object hierarchy Paul Durrant
2018-11-28 16:19   ` [Qemu-devel] [Qemu-block] " Kevin Wolf
2018-11-28 16:26     ` Paul Durrant
2018-11-28 16:28       ` Paul Durrant
2018-11-28 16:28       ` Stefano Stabellini
2018-11-28 16:29         ` Paul Durrant
2018-11-28 16:39           ` Kevin Wolf
2018-11-28 16:45             ` Paul Durrant
2018-11-28 16:46               ` Paul Durrant
2018-11-29  9:04                 ` Kevin Wolf
2018-11-28 17:01       ` Eric Blake
2018-11-28 17:04         ` Paul Durrant
2018-11-28 17:10   ` [Qemu-devel] " Anthony PERARD
2018-11-28 17:17     ` Paul Durrant
2018-11-28 17:32       ` Anthony PERARD
2018-11-21 15:11 ` [Qemu-devel] [PATCH 03/18] xen: introduce 'xen-qdisk' Paul Durrant
2018-11-29 16:05   ` Anthony PERARD
2018-12-04 15:20     ` Paul Durrant
2018-12-04 15:49       ` Anthony PERARD
2018-12-04 15:50         ` Paul Durrant
2018-12-04 17:14       ` Paul Durrant
2018-11-21 15:11 ` [Qemu-devel] [PATCH 04/18] xen: create xenstore areas for XenDevice-s Paul Durrant
2018-11-29 18:48   ` Anthony PERARD
2018-12-05 12:05     ` Paul Durrant
2018-12-05 12:43       ` Paul Durrant
2018-12-05 13:58         ` Anthony PERARD
2018-12-05 14:24           ` Paul Durrant
2018-12-05 16:28       ` Anthony PERARD
2018-11-21 15:11 ` [Qemu-devel] [PATCH 05/18] xen: add xenstore watcher infratructure Paul Durrant
2018-12-03 14:42   ` Anthony PERARD
2018-12-05 15:24     ` Paul Durrant
2018-11-21 15:11 ` [Qemu-devel] [PATCH 06/18] xen: add grant table interface for XenDevice-s Paul Durrant
2018-12-03 15:45   ` Anthony PERARD
2018-12-05 16:12     ` Paul Durrant
2018-11-21 15:12 ` [Qemu-devel] [PATCH 07/18] xen: add event channel " Paul Durrant
2018-12-03 16:24   ` Anthony PERARD
2018-12-04 14:24     ` Anthony PERARD
2018-12-05 16:16       ` Paul Durrant
2018-11-21 15:12 ` [Qemu-devel] [PATCH 08/18] xen: duplicate xen_disk.c as basis of dataplane/xen-qdisk.c Paul Durrant
2018-12-03 16:35   ` Anthony PERARD
2018-12-03 16:42     ` Anthony PERARD
2018-11-21 15:12 ` [Qemu-devel] [PATCH 09/18] xen: remove unnecessary code from dataplane/xen-qdisk.c Paul Durrant
2018-12-03 16:58   ` Anthony PERARD
2018-11-21 15:12 ` [Qemu-devel] [PATCH 10/18] xen: add header and build dataplane/xen-qdisk.c Paul Durrant
2018-12-03 18:09   ` Anthony PERARD
2018-12-05 17:31     ` Paul Durrant
2018-11-21 15:12 ` [Qemu-devel] [PATCH 11/18] xen: remove 'XenBlkDev' and 'blkdev' names from dataplane/xen-qdisk Paul Durrant
2018-12-04 11:05   ` Anthony PERARD
2018-11-21 15:12 ` [Qemu-devel] [PATCH 12/18] xen: remove 'ioreq' struct/varable/field names from dataplane/xen-qdisk.c Paul Durrant
2018-12-04 11:34   ` Anthony PERARD
2018-11-21 15:12 ` Paul Durrant [this message]
2018-12-04 12:10   ` [Qemu-devel] [PATCH 13/18] xen: purge 'blk' and 'ioreq' from function names in dataplane/xen-qdisk.c Anthony PERARD
2018-12-05 17:28     ` Paul Durrant
2018-11-21 15:12 ` [Qemu-devel] [PATCH 14/18] xen: add implementations of xen-qdisk connect and disconnect functions Paul Durrant
2018-11-28 16:34   ` Kevin Wolf
2018-11-28 16:40     ` Paul Durrant
2018-11-29  9:00       ` Kevin Wolf
2018-11-29  9:33         ` Paul Durrant
2018-11-29 10:46           ` Kevin Wolf
2018-11-29 10:47             ` Paul Durrant
2018-12-04 12:33   ` Anthony PERARD
2018-12-06 12:27     ` Paul Durrant
2018-11-21 15:12 ` [Qemu-devel] [PATCH 15/18] xen: add a mechanism to automatically create XenDevice-s Paul Durrant
2018-12-04 15:35   ` Anthony PERARD
2018-12-06 12:36     ` Paul Durrant
2018-12-06 15:24       ` Anthony PERARD
2018-12-06 15:36         ` Paul Durrant
2018-11-21 15:12 ` [Qemu-devel] [PATCH 16/18] xen: automatically create XenQdiskDevice-s Paul Durrant
2018-12-04 16:40   ` Anthony PERARD
2018-12-06 13:06     ` Paul Durrant
2018-11-21 15:12 ` [Qemu-devel] [PATCH 17/18] MAINTAINERS: add myself as a Xen maintainer Paul Durrant
2018-11-27 19:05   ` Stefano Stabellini
2018-11-29 14:00   ` Philippe Mathieu-Daudé
2018-11-29 14:01     ` Paul Durrant
2018-12-04 16:42   ` Anthony PERARD
2018-11-21 15:12 ` [Qemu-devel] [PATCH 18/18] xen: remove the legacy 'xen_disk' backend Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181121151211.15997-14-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=anthony.perard@citrix.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=sstabellini@kernel.org \
    --cc=stefanha@redhat.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).