From: Phil Dennis-Jordan <phil@philjordan.eu>
To: qemu-devel@nongnu.org
Cc: agraf@csgraf.de, phil@philjordan.eu, peter.maydell@linaro.org,
pbonzini@redhat.com, rad@semihalf.com, quic_llindhol@quicinc.com,
marcin.juszkiewicz@linaro.org, stefanha@redhat.com,
mst@redhat.com, slp@redhat.com, richard.henderson@linaro.org,
eduardo@habkost.net, marcel.apfelbaum@gmail.com,
gaosong@loongson.cn, jiaxun.yang@flygoat.com,
chenhuacai@kernel.org, kwolf@redhat.com, hreitz@redhat.com,
philmd@linaro.org, shorne@gmail.com, palmer@dabbelt.com,
alistair.francis@wdc.com, bmeng.cn@gmail.com,
liwei1518@gmail.com, dbarboza@ventanamicro.com,
zhiwei_liu@linux.alibaba.com, jcmvbkbc@gmail.com,
marcandre.lureau@redhat.com, berrange@redhat.com,
akihiko.odaki@daynix.com, qemu-arm@nongnu.org,
qemu-block@nongnu.org, qemu-riscv@nongnu.org
Subject: [PATCH v4 14/15] hw/block/virtio-blk: Replaces request free function with g_free
Date: Thu, 24 Oct 2024 12:28:12 +0200 [thread overview]
Message-ID: <20241024102813.9855-15-phil@philjordan.eu> (raw)
In-Reply-To: <20241024102813.9855-1-phil@philjordan.eu>
The virtio_blk_free_request() function has been a 1-liner forwarding
to g_free() for a while now. We may as well call g_free on the request
pointer directly.
Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu>
---
hw/block/virtio-blk.c | 43 +++++++++++++++-------------------
hw/vmapple/virtio-blk.c | 2 +-
include/hw/virtio/virtio-blk.h | 1 -
3 files changed, 20 insertions(+), 26 deletions(-)
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 9e8337bb639..40d2c9bc591 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -50,11 +50,6 @@ static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
req->mr_next = NULL;
}
-void virtio_blk_free_request(VirtIOBlockReq *req)
-{
- g_free(req);
-}
-
void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
{
VirtIOBlock *s = req->dev;
@@ -93,7 +88,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
if (acct_failed) {
block_acct_failed(blk_get_stats(s->blk), &req->acct);
}
- virtio_blk_free_request(req);
+ g_free(req);
}
blk_error_action(s->blk, action, is_read, error);
@@ -136,7 +131,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
block_acct_done(blk_get_stats(s->blk), &req->acct);
- virtio_blk_free_request(req);
+ g_free(req);
}
}
@@ -151,7 +146,7 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
block_acct_done(blk_get_stats(s->blk), &req->acct);
- virtio_blk_free_request(req);
+ g_free(req);
}
static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
@@ -169,7 +164,7 @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
if (is_write_zeroes) {
block_acct_done(blk_get_stats(s->blk), &req->acct);
}
- virtio_blk_free_request(req);
+ g_free(req);
}
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq)
@@ -214,7 +209,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
fail:
virtio_blk_req_complete(req, status);
- virtio_blk_free_request(req);
+ g_free(req);
}
static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb,
@@ -612,7 +607,7 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret)
out:
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
g_free(data->zone_report_data.zones);
g_free(data);
}
@@ -661,7 +656,7 @@ static void virtio_blk_handle_zone_report(VirtIOBlockReq *req,
return;
out:
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
}
static void virtio_blk_zone_mgmt_complete(void *opaque, int ret)
@@ -677,7 +672,7 @@ static void virtio_blk_zone_mgmt_complete(void *opaque, int ret)
}
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
}
static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
@@ -719,7 +714,7 @@ static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
return 0;
out:
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
return err_status;
}
@@ -750,7 +745,7 @@ static void virtio_blk_zone_append_complete(void *opaque, int ret)
out:
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
g_free(data);
}
@@ -788,7 +783,7 @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req,
out:
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
return err_status;
}
@@ -855,7 +850,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
block_acct_invalid(blk_get_stats(s->blk),
is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
- virtio_blk_free_request(req);
+ g_free(req);
return 0;
}
@@ -911,7 +906,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
VIRTIO_BLK_ID_BYTES));
iov_from_buf(in_iov, in_num, 0, serial, size);
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
- virtio_blk_free_request(req);
+ g_free(req);
break;
}
case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT:
@@ -943,7 +938,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
if (unlikely(!(type & VIRTIO_BLK_T_OUT) ||
out_len > sizeof(dwz_hdr))) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
- virtio_blk_free_request(req);
+ g_free(req);
return 0;
}
@@ -960,7 +955,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
is_write_zeroes);
if (err_status != VIRTIO_BLK_S_OK) {
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
}
break;
@@ -975,7 +970,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
if (!vbk->handle_unknown_request ||
!vbk->handle_unknown_request(req, mrb, type)) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
- virtio_blk_free_request(req);
+ g_free(req);
}
}
}
@@ -998,7 +993,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
while ((req = virtio_blk_get_request(s, vq))) {
if (virtio_blk_handle_request(req, &mrb)) {
virtqueue_detach_element(req->vq, &req->elem, 0);
- virtio_blk_free_request(req);
+ g_free(req);
break;
}
}
@@ -1048,7 +1043,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
while (req) {
next = req->next;
virtqueue_detach_element(req->vq, &req->elem, 0);
- virtio_blk_free_request(req);
+ g_free(req);
req = next;
}
break;
@@ -1131,7 +1126,7 @@ static void virtio_blk_reset(VirtIODevice *vdev)
/* No other threads can access req->vq here */
virtqueue_detach_element(req->vq, &req->elem, 0);
- virtio_blk_free_request(req);
+ g_free(req);
}
}
diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c
index 3a8b47bc55f..9f84c4851f5 100644
--- a/hw/vmapple/virtio-blk.c
+++ b/hw/vmapple/virtio-blk.c
@@ -58,7 +58,7 @@ static bool vmapple_virtio_blk_handle_unknown_request(VirtIOBlockReq *req,
qemu_log_mask(LOG_UNIMP, "%s: Barrier requests are currently no-ops\n",
__func__);
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
- virtio_blk_free_request(req);
+ g_free(req);
return true;
default:
return false;
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 28d5046ea6c..dcb2c89aed5 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -109,7 +109,6 @@ typedef struct VirtIOBlkClass {
} VirtIOBlkClass;
void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
-void virtio_blk_free_request(VirtIOBlockReq *req);
void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status);
#endif
--
2.39.3 (Apple Git-145)
next prev parent reply other threads:[~2024-10-24 10:33 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-24 10:27 [PATCH v4 00/15] macOS PV Graphics and new vmapple machine type Phil Dennis-Jordan
2024-10-24 10:27 ` [PATCH v4 01/15] ui & main loop: Redesign of system-specific main thread event handling Phil Dennis-Jordan
2024-10-25 4:34 ` Akihiko Odaki
2024-10-24 10:28 ` [PATCH v4 02/15] hw/display/apple-gfx: Introduce ParavirtualizedGraphics.Framework support Phil Dennis-Jordan
2024-10-25 6:03 ` Akihiko Odaki
2024-10-25 19:43 ` Phil Dennis-Jordan
2024-10-26 4:40 ` Akihiko Odaki
2024-10-26 10:24 ` Phil Dennis-Jordan
2024-10-28 7:42 ` Akihiko Odaki
2024-10-28 9:00 ` Phil Dennis-Jordan
2024-10-28 13:31 ` Phil Dennis-Jordan
2024-10-28 14:02 ` Akihiko Odaki
2024-10-28 14:13 ` Phil Dennis-Jordan
2024-10-28 16:06 ` Akihiko Odaki
2024-10-28 21:06 ` Phil Dennis-Jordan
2024-10-29 7:42 ` Akihiko Odaki
2024-10-29 21:16 ` Phil Dennis-Jordan
2024-10-31 6:52 ` Akihiko Odaki
2024-11-03 15:08 ` Phil Dennis-Jordan
2024-10-24 10:28 ` [PATCH v4 03/15] hw/display/apple-gfx: Adds PCI implementation Phil Dennis-Jordan
2024-10-26 4:45 ` Akihiko Odaki
2024-10-24 10:28 ` [PATCH v4 04/15] hw/display/apple-gfx: Adds configurable mode list Phil Dennis-Jordan
2024-10-26 5:15 ` Akihiko Odaki
2024-10-24 10:28 ` [PATCH v4 05/15] MAINTAINERS: Add myself as maintainer for apple-gfx, reviewer for HVF Phil Dennis-Jordan
2024-11-05 15:36 ` Roman Bolshakov
2024-10-24 10:28 ` [PATCH v4 06/15] hw: Add vmapple subdir Phil Dennis-Jordan
2024-10-24 10:28 ` [PATCH v4 07/15] hw/misc/pvpanic: Add MMIO interface Phil Dennis-Jordan
2024-10-24 10:28 ` [PATCH v4 08/15] hvf: arm: Ignore writes to CNTP_CTL_EL0 Phil Dennis-Jordan
2024-10-24 10:28 ` [PATCH v4 09/15] gpex: Allow more than 4 legacy IRQs Phil Dennis-Jordan
2024-10-26 5:21 ` Akihiko Odaki
2024-10-24 10:28 ` [PATCH v4 10/15] hw/vmapple/aes: Introduce aes engine Phil Dennis-Jordan
2024-10-26 5:40 ` Akihiko Odaki
2024-10-24 10:28 ` [PATCH v4 11/15] hw/vmapple/bdif: Introduce vmapple backdoor interface Phil Dennis-Jordan
2024-10-24 10:28 ` [PATCH v4 12/15] hw/vmapple/cfg: Introduce vmapple cfg region Phil Dennis-Jordan
2024-10-26 5:48 ` Akihiko Odaki
2024-10-24 10:28 ` [PATCH v4 13/15] hw/vmapple/virtio-blk: Add support for apple virtio-blk Phil Dennis-Jordan
2024-10-26 6:02 ` Akihiko Odaki
2024-10-24 10:28 ` Phil Dennis-Jordan [this message]
2024-10-26 6:03 ` [PATCH v4 14/15] hw/block/virtio-blk: Replaces request free function with g_free Akihiko Odaki
2024-10-24 10:28 ` [PATCH v4 15/15] hw/vmapple/vmapple: Add vmapple machine type Phil Dennis-Jordan
2024-10-26 6:20 ` Akihiko Odaki
2024-10-26 11:58 ` Phil Dennis-Jordan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241024102813.9855-15-phil@philjordan.eu \
--to=phil@philjordan.eu \
--cc=agraf@csgraf.de \
--cc=akihiko.odaki@daynix.com \
--cc=alistair.francis@wdc.com \
--cc=berrange@redhat.com \
--cc=bmeng.cn@gmail.com \
--cc=chenhuacai@kernel.org \
--cc=dbarboza@ventanamicro.com \
--cc=eduardo@habkost.net \
--cc=gaosong@loongson.cn \
--cc=hreitz@redhat.com \
--cc=jcmvbkbc@gmail.com \
--cc=jiaxun.yang@flygoat.com \
--cc=kwolf@redhat.com \
--cc=liwei1518@gmail.com \
--cc=marcandre.lureau@redhat.com \
--cc=marcel.apfelbaum@gmail.com \
--cc=marcin.juszkiewicz@linaro.org \
--cc=mst@redhat.com \
--cc=palmer@dabbelt.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=philmd@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-riscv@nongnu.org \
--cc=quic_llindhol@quicinc.com \
--cc=rad@semihalf.com \
--cc=richard.henderson@linaro.org \
--cc=shorne@gmail.com \
--cc=slp@redhat.com \
--cc=stefanha@redhat.com \
--cc=zhiwei_liu@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).