From: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
To: Simran Singhal <singhalsimran0@gmail.com>,
qemu-devel@nongnu.org, rkagan@virtuozzo.com
Cc: Stefan Hajnoczi <stefanha@gmail.com>,
Julia Suvorova <jusual@mail.ru>,
Yuval Shaia <yuval.shaia.ml@gmail.com>
Subject: Re: [PATCH v2] lockable: Replace locks with lock guard macros
Date: Sat, 18 Apr 2020 15:03:29 +0300 [thread overview]
Message-ID: <f0eec74a-387d-79ce-b23f-d7f16747f9ad@gmail.com> (raw)
In-Reply-To: <20200402065035.GA15477@simran-Inspiron-5558>
Hi Simran,
On 4/2/20 9:50 AM, Simran Singhal wrote:
> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---
> Changes in v2:
> -Drop changes in file hw/rdma/rdma_utils.c
>
> hw/hyperv/hyperv.c | 15 ++++++-------
> hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
> hw/rdma/rdma_rm.c | 3 +--
> 3 files changed, 33 insertions(+), 35 deletions(-)
>
> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> index 8ca3706f5b..4ddafe1de1 100644
> --- a/hw/hyperv/hyperv.c
> +++ b/hw/hyperv/hyperv.c
> @@ -15,6 +15,7 @@
> #include "sysemu/kvm.h"
> #include "qemu/bitops.h"
> #include "qemu/error-report.h"
> +#include "qemu/lockable.h"
> #include "qemu/queue.h"
> #include "qemu/rcu.h"
> #include "qemu/rcu_queue.h"
> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> int ret;
> MsgHandler *mh;
>
> - qemu_mutex_lock(&handlers_mutex);
> + QEMU_LOCK_GUARD(&handlers_mutex);
It does not passes compilation:
export ARCH=x86_64
make docker-image-fedora V=1 NETWORK=1
make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu NETWORK=1
Error:
CC x86_64-softmmu/hw/net/virtio-net.o
/tmp/qemu-test/src/hw/hyperv/hyperv.c:495:5: error: unused variable
'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
QEMU_LOCK_GUARD(&handlers_mutex);
^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from
macro 'QEMU_LOCK_GUARD'
g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
^
<scratch space>:24:1: note: expanded from here
qemu_lockable_auto__COUNTER__
^
/tmp/qemu-test/src/hw/hyperv/hyperv.c:568:5: error: unused variable
'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
QEMU_LOCK_GUARD(&handlers_mutex);
^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from
macro 'QEMU_LOCK_GUARD'
g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
^
<scratch space>:39:1: note: expanded from here
qemu_lockable_auto__COUNTER__
^
2 errors generated.
I suggest splitting it into an pvrdma path and hyperv patch anyway.
It will be nice to get an ack from the hyperv maintainer, adding Roman.
Thanks,
Marcel
> QLIST_FOREACH(mh, &msg_handlers, link) {
> if (mh->conn_id == conn_id) {
> if (handler) {
> @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> g_free_rcu(mh, rcu);
> ret = 0;
> }
> - goto unlock;
> + return ret;
> }
> }
>
> @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> } else {
> ret = -ENOENT;
> }
> -unlock:
> - qemu_mutex_unlock(&handlers_mutex);
> +
> return ret;
> }
>
> @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> int ret;
> EventFlagHandler *handler;
>
> - qemu_mutex_lock(&handlers_mutex);
> + QEMU_LOCK_GUARD(&handlers_mutex);
> QLIST_FOREACH(handler, &event_flag_handlers, link) {
> if (handler->conn_id == conn_id) {
> if (notifier) {
> @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> g_free_rcu(handler, rcu);
> ret = 0;
> }
> - goto unlock;
> + return ret;
> }
> }
>
> @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> } else {
> ret = -ENOENT;
> }
> -unlock:
> - qemu_mutex_unlock(&handlers_mutex);
> +
> return ret;
> }
>
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index 3dd39fe1a7..db7e5c8be5 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
> struct ibv_wc wc[2];
> RdmaProtectedGSList *cqe_ctx_list;
>
> - qemu_mutex_lock(&rdma_dev_res->lock);
> - do {
> - ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
> + WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
> + do {
> + ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
>
> - trace_rdma_poll_cq(ne, ibcq);
> + trace_rdma_poll_cq(ne, ibcq);
>
> - for (i = 0; i < ne; i++) {
> - bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> - if (unlikely(!bctx)) {
> - rdma_error_report("No matching ctx for req %"PRId64,
> - wc[i].wr_id);
> - continue;
> - }
> + for (i = 0; i < ne; i++) {
> + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> + if (unlikely(!bctx)) {
> + rdma_error_report("No matching ctx for req %"PRId64,
> + wc[i].wr_id);
> + continue;
> + }
>
> - comp_handler(bctx->up_ctx, &wc[i]);
> + comp_handler(bctx->up_ctx, &wc[i]);
>
> - if (bctx->backend_qp) {
> - cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> - } else {
> - cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> - }
> + if (bctx->backend_qp) {
> + cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> + } else {
> + cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> + }
>
> - rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> - rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> - g_free(bctx);
> - }
> - total_ne += ne;
> - } while (ne > 0);
> - atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> - qemu_mutex_unlock(&rdma_dev_res->lock);
> + rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> + g_free(bctx);
> + }
> + total_ne += ne;
> + } while (ne > 0);
> + atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> + }
>
> if (ne < 0) {
> rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
> diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
> index 7e9ea283c9..60957f88db 100644
> --- a/hw/rdma/rdma_rm.c
> +++ b/hw/rdma/rdma_rm.c
> @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
> {
> trace_rdma_res_tbl_dealloc(tbl->name, handle);
>
> - qemu_mutex_lock(&tbl->lock);
> + QEMU_LOCK_GUARD(&tbl->lock);
>
> if (handle < tbl->tbl_sz) {
> clear_bit(handle, tbl->bitmap);
> tbl->used--;
> }
>
> - qemu_mutex_unlock(&tbl->lock);
> }
>
> int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
next prev parent reply other threads:[~2020-04-18 12:03 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-02 6:50 [PATCH v2] lockable: Replace locks with lock guard macros Simran Singhal
2020-04-02 7:46 ` no-reply
2020-04-13 10:00 ` Yuval Shaia
2020-04-14 8:11 ` Marcel Apfelbaum
2020-04-18 12:03 ` Marcel Apfelbaum [this message]
2020-04-19 2:46 ` Julia Suvorova
2020-04-19 6:34 ` Marcel Apfelbaum
2020-04-20 9:13 ` Philippe Mathieu-Daudé
2020-04-24 10:51 ` Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=f0eec74a-387d-79ce-b23f-d7f16747f9ad@gmail.com \
--to=marcel.apfelbaum@gmail.com \
--cc=jusual@mail.ru \
--cc=qemu-devel@nongnu.org \
--cc=rkagan@virtuozzo.com \
--cc=singhalsimran0@gmail.com \
--cc=stefanha@gmail.com \
--cc=yuval.shaia.ml@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).