* [PATCH v2] lockable: Replace locks with lock guard macros
@ 2020-04-02 6:50 Simran Singhal
2020-04-02 7:46 ` no-reply
` (5 more replies)
0 siblings, 6 replies; 9+ messages in thread
From: Simran Singhal @ 2020-04-02 6:50 UTC (permalink / raw)
To: qemu-devel; +Cc: Stefan Hajnoczi, Julia Suvorova, Yuval Shaia
Replace manual lock()/unlock() calls with lock guard macros
(QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
---
Changes in v2:
-Drop changes in file hw/rdma/rdma_utils.c
hw/hyperv/hyperv.c | 15 ++++++-------
hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
hw/rdma/rdma_rm.c | 3 +--
3 files changed, 33 insertions(+), 35 deletions(-)
diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
index 8ca3706f5b..4ddafe1de1 100644
--- a/hw/hyperv/hyperv.c
+++ b/hw/hyperv/hyperv.c
@@ -15,6 +15,7 @@
#include "sysemu/kvm.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
+#include "qemu/lockable.h"
#include "qemu/queue.h"
#include "qemu/rcu.h"
#include "qemu/rcu_queue.h"
@@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
int ret;
MsgHandler *mh;
- qemu_mutex_lock(&handlers_mutex);
+ QEMU_LOCK_GUARD(&handlers_mutex);
QLIST_FOREACH(mh, &msg_handlers, link) {
if (mh->conn_id == conn_id) {
if (handler) {
@@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
g_free_rcu(mh, rcu);
ret = 0;
}
- goto unlock;
+ return ret;
}
}
@@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
} else {
ret = -ENOENT;
}
-unlock:
- qemu_mutex_unlock(&handlers_mutex);
+
return ret;
}
@@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
int ret;
EventFlagHandler *handler;
- qemu_mutex_lock(&handlers_mutex);
+ QEMU_LOCK_GUARD(&handlers_mutex);
QLIST_FOREACH(handler, &event_flag_handlers, link) {
if (handler->conn_id == conn_id) {
if (notifier) {
@@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
g_free_rcu(handler, rcu);
ret = 0;
}
- goto unlock;
+ return ret;
}
}
@@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
} else {
ret = -ENOENT;
}
-unlock:
- qemu_mutex_unlock(&handlers_mutex);
+
return ret;
}
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
index 3dd39fe1a7..db7e5c8be5 100644
--- a/hw/rdma/rdma_backend.c
+++ b/hw/rdma/rdma_backend.c
@@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
struct ibv_wc wc[2];
RdmaProtectedGSList *cqe_ctx_list;
- qemu_mutex_lock(&rdma_dev_res->lock);
- do {
- ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
+ WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
+ do {
+ ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
- trace_rdma_poll_cq(ne, ibcq);
+ trace_rdma_poll_cq(ne, ibcq);
- for (i = 0; i < ne; i++) {
- bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
- if (unlikely(!bctx)) {
- rdma_error_report("No matching ctx for req %"PRId64,
- wc[i].wr_id);
- continue;
- }
+ for (i = 0; i < ne; i++) {
+ bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
+ if (unlikely(!bctx)) {
+ rdma_error_report("No matching ctx for req %"PRId64,
+ wc[i].wr_id);
+ continue;
+ }
- comp_handler(bctx->up_ctx, &wc[i]);
+ comp_handler(bctx->up_ctx, &wc[i]);
- if (bctx->backend_qp) {
- cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
- } else {
- cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
- }
+ if (bctx->backend_qp) {
+ cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
+ } else {
+ cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
+ }
- rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
- rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
- g_free(bctx);
- }
- total_ne += ne;
- } while (ne > 0);
- atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
- qemu_mutex_unlock(&rdma_dev_res->lock);
+ rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
+ rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
+ g_free(bctx);
+ }
+ total_ne += ne;
+ } while (ne > 0);
+ atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
+ }
if (ne < 0) {
rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
index 7e9ea283c9..60957f88db 100644
--- a/hw/rdma/rdma_rm.c
+++ b/hw/rdma/rdma_rm.c
@@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
{
trace_rdma_res_tbl_dealloc(tbl->name, handle);
- qemu_mutex_lock(&tbl->lock);
+ QEMU_LOCK_GUARD(&tbl->lock);
if (handle < tbl->tbl_sz) {
clear_bit(handle, tbl->bitmap);
tbl->used--;
}
- qemu_mutex_unlock(&tbl->lock);
}
int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
--
2.17.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH v2] lockable: Replace locks with lock guard macros
2020-04-02 6:50 [PATCH v2] lockable: Replace locks with lock guard macros Simran Singhal
@ 2020-04-02 7:46 ` no-reply
2020-04-13 10:00 ` Yuval Shaia
` (4 subsequent siblings)
5 siblings, 0 replies; 9+ messages in thread
From: no-reply @ 2020-04-02 7:46 UTC (permalink / raw)
To: singhalsimran0; +Cc: stefanha, jusual, qemu-devel, yuval.shaia.ml
Patchew URL: https://patchew.org/QEMU/20200402065035.GA15477@simran-Inspiron-5558/
Hi,
This series failed the asan build test. Please find the testing commands and
their output below. If you have Docker installed, you can probably reproduce it
locally.
=== TEST SCRIPT BEGIN ===
#!/bin/bash
export ARCH=x86_64
make docker-image-fedora V=1 NETWORK=1
time make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu J=14 NETWORK=1
=== TEST SCRIPT END ===
CC x86_64-softmmu/hw/i386/pc_piix.o
CC x86_64-softmmu/hw/i386/pc_q35.o
CC x86_64-softmmu/hw/i386/microvm.o
/tmp/qemu-test/src/hw/hyperv/hyperv.c:495:5: error: unused variable 'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
QEMU_LOCK_GUARD(&handlers_mutex);
^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from macro 'QEMU_LOCK_GUARD'
---
<scratch space>:24:1: note: expanded from here
qemu_lockable_auto__COUNTER__
^
/tmp/qemu-test/src/hw/hyperv/hyperv.c:568:5: error: unused variable 'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
QEMU_LOCK_GUARD(&handlers_mutex);
^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from macro 'QEMU_LOCK_GUARD'
---
qemu_lockable_auto__COUNTER__
^
2 errors generated.
make[1]: *** [/tmp/qemu-test/src/rules.mak:69: hw/hyperv/hyperv.o] Error 1
make[1]: *** Waiting for unfinished jobs....
/tmp/qemu-test/src/hw/rdma/rdma_rm.c:150:5: error: unused variable 'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
QEMU_LOCK_GUARD(&tbl->lock);
^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from macro 'QEMU_LOCK_GUARD'
---
qemu_lockable_auto__COUNTER__
^
1 error generated.
make[1]: *** [/tmp/qemu-test/src/rules.mak:69: hw/rdma/rdma_rm.o] Error 1
make: *** [Makefile:527: x86_64-softmmu/all] Error 2
Traceback (most recent call last):
File "./tests/docker/docker.py", line 664, in <module>
sys.exit(main())
---
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['sudo', '-n', 'docker', 'run', '--label', 'com.qemu.instance.uuid=528913c5fbdd4e42bf1946168442f34c', '-u', '1003', '--security-opt', 'seccomp=unconfined', '--rm', '-e', 'TARGET_LIST=x86_64-softmmu', '-e', 'EXTRA_CONFIGURE_OPTS=', '-e', 'V=', '-e', 'J=14', '-e', 'DEBUG=', '-e', 'SHOW_ENV=', '-e', 'CCACHE_DIR=/var/tmp/ccache', '-v', '/home/patchew2/.cache/qemu-docker-ccache:/var/tmp/ccache:z', '-v', '/var/tmp/patchew-tester-tmp-hg7wzjtd/src/docker-src.2020-04-02-03.43.13.4475:/var/tmp/qemu:z,ro', 'qemu:fedora', '/var/tmp/qemu/run', 'test-debug']' returned non-zero exit status 2.
filter=--filter=label=com.qemu.instance.uuid=528913c5fbdd4e42bf1946168442f34c
make[1]: *** [docker-run] Error 1
make[1]: Leaving directory `/var/tmp/patchew-tester-tmp-hg7wzjtd/src'
make: *** [docker-run-test-debug@fedora] Error 2
real 3m40.193s
user 0m8.084s
The full log is available at
http://patchew.org/logs/20200402065035.GA15477@simran-Inspiron-5558/testing.asan/?type=message.
---
Email generated automatically by Patchew [https://patchew.org/].
Please send your feedback to patchew-devel@redhat.com
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] lockable: Replace locks with lock guard macros
2020-04-02 6:50 [PATCH v2] lockable: Replace locks with lock guard macros Simran Singhal
2020-04-02 7:46 ` no-reply
@ 2020-04-13 10:00 ` Yuval Shaia
2020-04-14 8:11 ` Marcel Apfelbaum
` (3 subsequent siblings)
5 siblings, 0 replies; 9+ messages in thread
From: Yuval Shaia @ 2020-04-13 10:00 UTC (permalink / raw)
To: Simran Singhal; +Cc: Stefan Hajnoczi, Julia Suvorova, QEMU Developers
[-- Attachment #1: Type: text/plain, Size: 6243 bytes --]
On Thu, 2 Apr 2020 at 09:50, Simran Singhal <singhalsimran0@gmail.com>
wrote:
> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---
> Changes in v2:
> -Drop changes in file hw/rdma/rdma_utils.c
>
So i guess we are expected to see this back soon, right?
Ignore my r-b and t-b for v1, i did not encounter the build errors, this
one is okay too.
For the hw/rdma stuff:
Reviewed-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
Tested-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
Thanks,
Yuval
>
> hw/hyperv/hyperv.c | 15 ++++++-------
> hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
> hw/rdma/rdma_rm.c | 3 +--
> 3 files changed, 33 insertions(+), 35 deletions(-)
>
> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> index 8ca3706f5b..4ddafe1de1 100644
> --- a/hw/hyperv/hyperv.c
> +++ b/hw/hyperv/hyperv.c
> @@ -15,6 +15,7 @@
> #include "sysemu/kvm.h"
> #include "qemu/bitops.h"
> #include "qemu/error-report.h"
> +#include "qemu/lockable.h"
> #include "qemu/queue.h"
> #include "qemu/rcu.h"
> #include "qemu/rcu_queue.h"
> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id,
> HvMsgHandler handler, void *data)
> int ret;
> MsgHandler *mh;
>
> - qemu_mutex_lock(&handlers_mutex);
> + QEMU_LOCK_GUARD(&handlers_mutex);
> QLIST_FOREACH(mh, &msg_handlers, link) {
> if (mh->conn_id == conn_id) {
> if (handler) {
> @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id,
> HvMsgHandler handler, void *data)
> g_free_rcu(mh, rcu);
> ret = 0;
> }
> - goto unlock;
> + return ret;
> }
> }
>
> @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id,
> HvMsgHandler handler, void *data)
> } else {
> ret = -ENOENT;
> }
> -unlock:
> - qemu_mutex_unlock(&handlers_mutex);
> +
> return ret;
> }
>
> @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id,
> EventNotifier *notifier)
> int ret;
> EventFlagHandler *handler;
>
> - qemu_mutex_lock(&handlers_mutex);
> + QEMU_LOCK_GUARD(&handlers_mutex);
> QLIST_FOREACH(handler, &event_flag_handlers, link) {
> if (handler->conn_id == conn_id) {
> if (notifier) {
> @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id,
> EventNotifier *notifier)
> g_free_rcu(handler, rcu);
> ret = 0;
> }
> - goto unlock;
> + return ret;
> }
> }
>
> @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id,
> EventNotifier *notifier)
> } else {
> ret = -ENOENT;
> }
> -unlock:
> - qemu_mutex_unlock(&handlers_mutex);
> +
> return ret;
> }
>
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index 3dd39fe1a7..db7e5c8be5 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources
> *rdma_dev_res, struct ibv_cq *ibcq)
> struct ibv_wc wc[2];
> RdmaProtectedGSList *cqe_ctx_list;
>
> - qemu_mutex_lock(&rdma_dev_res->lock);
> - do {
> - ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
> + WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
> + do {
> + ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
>
> - trace_rdma_poll_cq(ne, ibcq);
> + trace_rdma_poll_cq(ne, ibcq);
>
> - for (i = 0; i < ne; i++) {
> - bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> - if (unlikely(!bctx)) {
> - rdma_error_report("No matching ctx for req %"PRId64,
> - wc[i].wr_id);
> - continue;
> - }
> + for (i = 0; i < ne; i++) {
> + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> + if (unlikely(!bctx)) {
> + rdma_error_report("No matching ctx for req %"PRId64,
> + wc[i].wr_id);
> + continue;
> + }
>
> - comp_handler(bctx->up_ctx, &wc[i]);
> + comp_handler(bctx->up_ctx, &wc[i]);
>
> - if (bctx->backend_qp) {
> - cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> - } else {
> - cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> - }
> + if (bctx->backend_qp) {
> + cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> + } else {
> + cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> + }
>
> - rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> - rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> - g_free(bctx);
> - }
> - total_ne += ne;
> - } while (ne > 0);
> - atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> - qemu_mutex_unlock(&rdma_dev_res->lock);
> + rdma_protected_gslist_remove_int32(cqe_ctx_list,
> wc[i].wr_id);
> + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> + g_free(bctx);
> + }
> + total_ne += ne;
> + } while (ne > 0);
> + atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> + }
>
> if (ne < 0) {
> rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
> diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
> index 7e9ea283c9..60957f88db 100644
> --- a/hw/rdma/rdma_rm.c
> +++ b/hw/rdma/rdma_rm.c
> @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl
> *tbl, uint32_t handle)
> {
> trace_rdma_res_tbl_dealloc(tbl->name, handle);
>
> - qemu_mutex_lock(&tbl->lock);
> + QEMU_LOCK_GUARD(&tbl->lock);
>
> if (handle < tbl->tbl_sz) {
> clear_bit(handle, tbl->bitmap);
> tbl->used--;
> }
>
> - qemu_mutex_unlock(&tbl->lock);
> }
>
> int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev
> *backend_dev,
> --
> 2.17.1
>
>
[-- Attachment #2: Type: text/html, Size: 8475 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] lockable: Replace locks with lock guard macros
2020-04-02 6:50 [PATCH v2] lockable: Replace locks with lock guard macros Simran Singhal
2020-04-02 7:46 ` no-reply
2020-04-13 10:00 ` Yuval Shaia
@ 2020-04-14 8:11 ` Marcel Apfelbaum
2020-04-18 12:03 ` Marcel Apfelbaum
` (2 subsequent siblings)
5 siblings, 0 replies; 9+ messages in thread
From: Marcel Apfelbaum @ 2020-04-14 8:11 UTC (permalink / raw)
To: Simran Singhal, qemu-devel; +Cc: Stefan Hajnoczi, Julia Suvorova, Yuval Shaia
Hi Simran,
On 4/2/20 9:50 AM, Simran Singhal wrote:
> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---
> Changes in v2:
> -Drop changes in file hw/rdma/rdma_utils.c
>
> hw/hyperv/hyperv.c | 15 ++++++-------
> hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
> hw/rdma/rdma_rm.c | 3 +--
> 3 files changed, 33 insertions(+), 35 deletions(-)
>
> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> index 8ca3706f5b..4ddafe1de1 100644
> --- a/hw/hyperv/hyperv.c
> +++ b/hw/hyperv/hyperv.c
> @@ -15,6 +15,7 @@
> #include "sysemu/kvm.h"
> #include "qemu/bitops.h"
> #include "qemu/error-report.h"
> +#include "qemu/lockable.h"
> #include "qemu/queue.h"
> #include "qemu/rcu.h"
> #include "qemu/rcu_queue.h"
> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> int ret;
> MsgHandler *mh;
>
> - qemu_mutex_lock(&handlers_mutex);
> + QEMU_LOCK_GUARD(&handlers_mutex);
> QLIST_FOREACH(mh, &msg_handlers, link) {
> if (mh->conn_id == conn_id) {
> if (handler) {
> @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> g_free_rcu(mh, rcu);
> ret = 0;
> }
> - goto unlock;
> + return ret;
> }
> }
>
> @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> } else {
> ret = -ENOENT;
> }
> -unlock:
> - qemu_mutex_unlock(&handlers_mutex);
> +
> return ret;
> }
>
> @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> int ret;
> EventFlagHandler *handler;
>
> - qemu_mutex_lock(&handlers_mutex);
> + QEMU_LOCK_GUARD(&handlers_mutex);
> QLIST_FOREACH(handler, &event_flag_handlers, link) {
> if (handler->conn_id == conn_id) {
> if (notifier) {
> @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> g_free_rcu(handler, rcu);
> ret = 0;
> }
> - goto unlock;
> + return ret;
> }
> }
>
> @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> } else {
> ret = -ENOENT;
> }
> -unlock:
> - qemu_mutex_unlock(&handlers_mutex);
> +
> return ret;
> }
>
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index 3dd39fe1a7..db7e5c8be5 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
> struct ibv_wc wc[2];
> RdmaProtectedGSList *cqe_ctx_list;
>
> - qemu_mutex_lock(&rdma_dev_res->lock);
> - do {
> - ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
> + WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
> + do {
> + ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
>
> - trace_rdma_poll_cq(ne, ibcq);
> + trace_rdma_poll_cq(ne, ibcq);
>
> - for (i = 0; i < ne; i++) {
> - bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> - if (unlikely(!bctx)) {
> - rdma_error_report("No matching ctx for req %"PRId64,
> - wc[i].wr_id);
> - continue;
> - }
> + for (i = 0; i < ne; i++) {
> + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> + if (unlikely(!bctx)) {
> + rdma_error_report("No matching ctx for req %"PRId64,
> + wc[i].wr_id);
> + continue;
> + }
>
> - comp_handler(bctx->up_ctx, &wc[i]);
> + comp_handler(bctx->up_ctx, &wc[i]);
>
> - if (bctx->backend_qp) {
> - cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> - } else {
> - cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> - }
> + if (bctx->backend_qp) {
> + cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> + } else {
> + cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> + }
>
> - rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> - rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> - g_free(bctx);
> - }
> - total_ne += ne;
> - } while (ne > 0);
> - atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> - qemu_mutex_unlock(&rdma_dev_res->lock);
> + rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> + g_free(bctx);
> + }
> + total_ne += ne;
> + } while (ne > 0);
> + atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> + }
>
> if (ne < 0) {
> rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
> diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
> index 7e9ea283c9..60957f88db 100644
> --- a/hw/rdma/rdma_rm.c
> +++ b/hw/rdma/rdma_rm.c
> @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
> {
> trace_rdma_res_tbl_dealloc(tbl->name, handle);
>
> - qemu_mutex_lock(&tbl->lock);
> + QEMU_LOCK_GUARD(&tbl->lock);
>
> if (handle < tbl->tbl_sz) {
> clear_bit(handle, tbl->bitmap);
> tbl->used--;
> }
>
> - qemu_mutex_unlock(&tbl->lock);
> }
>
> int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
Reviewed-by: Marcel Apfelbaum<marcel.apfelbaum@gmail.com>
Thanks,
Marcel
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] lockable: Replace locks with lock guard macros
2020-04-02 6:50 [PATCH v2] lockable: Replace locks with lock guard macros Simran Singhal
` (2 preceding siblings ...)
2020-04-14 8:11 ` Marcel Apfelbaum
@ 2020-04-18 12:03 ` Marcel Apfelbaum
2020-04-19 2:46 ` Julia Suvorova
2020-04-20 9:13 ` Philippe Mathieu-Daudé
2020-04-24 10:51 ` Stefan Hajnoczi
5 siblings, 1 reply; 9+ messages in thread
From: Marcel Apfelbaum @ 2020-04-18 12:03 UTC (permalink / raw)
To: Simran Singhal, qemu-devel, rkagan
Cc: Stefan Hajnoczi, Julia Suvorova, Yuval Shaia
Hi Simran,
On 4/2/20 9:50 AM, Simran Singhal wrote:
> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---
> Changes in v2:
> -Drop changes in file hw/rdma/rdma_utils.c
>
> hw/hyperv/hyperv.c | 15 ++++++-------
> hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
> hw/rdma/rdma_rm.c | 3 +--
> 3 files changed, 33 insertions(+), 35 deletions(-)
>
> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> index 8ca3706f5b..4ddafe1de1 100644
> --- a/hw/hyperv/hyperv.c
> +++ b/hw/hyperv/hyperv.c
> @@ -15,6 +15,7 @@
> #include "sysemu/kvm.h"
> #include "qemu/bitops.h"
> #include "qemu/error-report.h"
> +#include "qemu/lockable.h"
> #include "qemu/queue.h"
> #include "qemu/rcu.h"
> #include "qemu/rcu_queue.h"
> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> int ret;
> MsgHandler *mh;
>
> - qemu_mutex_lock(&handlers_mutex);
> + QEMU_LOCK_GUARD(&handlers_mutex);
It does not passes compilation:
export ARCH=x86_64
make docker-image-fedora V=1 NETWORK=1
make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu NETWORK=1
Error:
CC x86_64-softmmu/hw/net/virtio-net.o
/tmp/qemu-test/src/hw/hyperv/hyperv.c:495:5: error: unused variable
'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
QEMU_LOCK_GUARD(&handlers_mutex);
^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from
macro 'QEMU_LOCK_GUARD'
g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
^
<scratch space>:24:1: note: expanded from here
qemu_lockable_auto__COUNTER__
^
/tmp/qemu-test/src/hw/hyperv/hyperv.c:568:5: error: unused variable
'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
QEMU_LOCK_GUARD(&handlers_mutex);
^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from
macro 'QEMU_LOCK_GUARD'
g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
^
<scratch space>:39:1: note: expanded from here
qemu_lockable_auto__COUNTER__
^
2 errors generated.
I suggest splitting it into an pvrdma path and hyperv patch anyway.
It will be nice to get an ack from the hyperv maintainer, adding Roman.
Thanks,
Marcel
> QLIST_FOREACH(mh, &msg_handlers, link) {
> if (mh->conn_id == conn_id) {
> if (handler) {
> @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> g_free_rcu(mh, rcu);
> ret = 0;
> }
> - goto unlock;
> + return ret;
> }
> }
>
> @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> } else {
> ret = -ENOENT;
> }
> -unlock:
> - qemu_mutex_unlock(&handlers_mutex);
> +
> return ret;
> }
>
> @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> int ret;
> EventFlagHandler *handler;
>
> - qemu_mutex_lock(&handlers_mutex);
> + QEMU_LOCK_GUARD(&handlers_mutex);
> QLIST_FOREACH(handler, &event_flag_handlers, link) {
> if (handler->conn_id == conn_id) {
> if (notifier) {
> @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> g_free_rcu(handler, rcu);
> ret = 0;
> }
> - goto unlock;
> + return ret;
> }
> }
>
> @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> } else {
> ret = -ENOENT;
> }
> -unlock:
> - qemu_mutex_unlock(&handlers_mutex);
> +
> return ret;
> }
>
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index 3dd39fe1a7..db7e5c8be5 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
> struct ibv_wc wc[2];
> RdmaProtectedGSList *cqe_ctx_list;
>
> - qemu_mutex_lock(&rdma_dev_res->lock);
> - do {
> - ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
> + WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
> + do {
> + ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
>
> - trace_rdma_poll_cq(ne, ibcq);
> + trace_rdma_poll_cq(ne, ibcq);
>
> - for (i = 0; i < ne; i++) {
> - bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> - if (unlikely(!bctx)) {
> - rdma_error_report("No matching ctx for req %"PRId64,
> - wc[i].wr_id);
> - continue;
> - }
> + for (i = 0; i < ne; i++) {
> + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> + if (unlikely(!bctx)) {
> + rdma_error_report("No matching ctx for req %"PRId64,
> + wc[i].wr_id);
> + continue;
> + }
>
> - comp_handler(bctx->up_ctx, &wc[i]);
> + comp_handler(bctx->up_ctx, &wc[i]);
>
> - if (bctx->backend_qp) {
> - cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> - } else {
> - cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> - }
> + if (bctx->backend_qp) {
> + cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> + } else {
> + cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> + }
>
> - rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> - rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> - g_free(bctx);
> - }
> - total_ne += ne;
> - } while (ne > 0);
> - atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> - qemu_mutex_unlock(&rdma_dev_res->lock);
> + rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> + g_free(bctx);
> + }
> + total_ne += ne;
> + } while (ne > 0);
> + atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> + }
>
> if (ne < 0) {
> rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
> diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
> index 7e9ea283c9..60957f88db 100644
> --- a/hw/rdma/rdma_rm.c
> +++ b/hw/rdma/rdma_rm.c
> @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
> {
> trace_rdma_res_tbl_dealloc(tbl->name, handle);
>
> - qemu_mutex_lock(&tbl->lock);
> + QEMU_LOCK_GUARD(&tbl->lock);
>
> if (handle < tbl->tbl_sz) {
> clear_bit(handle, tbl->bitmap);
> tbl->used--;
> }
>
> - qemu_mutex_unlock(&tbl->lock);
> }
>
> int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] lockable: Replace locks with lock guard macros
2020-04-18 12:03 ` Marcel Apfelbaum
@ 2020-04-19 2:46 ` Julia Suvorova
2020-04-19 6:34 ` Marcel Apfelbaum
0 siblings, 1 reply; 9+ messages in thread
From: Julia Suvorova @ 2020-04-19 2:46 UTC (permalink / raw)
To: Marcel Apfelbaum
Cc: Simran Singhal, QEMU Developers, rkagan, Yuval Shaia,
Stefan Hajnoczi
On Sat, Apr 18, 2020 at 2:03 PM Marcel Apfelbaum
<marcel.apfelbaum@gmail.com> wrote:
>
> Hi Simran,
>
> On 4/2/20 9:50 AM, Simran Singhal wrote:
> > Replace manual lock()/unlock() calls with lock guard macros
> > (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
> >
> > Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> > ---
> > Changes in v2:
> > -Drop changes in file hw/rdma/rdma_utils.c
> >
> > hw/hyperv/hyperv.c | 15 ++++++-------
> > hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
> > hw/rdma/rdma_rm.c | 3 +--
> > 3 files changed, 33 insertions(+), 35 deletions(-)
> >
> > diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> > index 8ca3706f5b..4ddafe1de1 100644
> > --- a/hw/hyperv/hyperv.c
> > +++ b/hw/hyperv/hyperv.c
> > @@ -15,6 +15,7 @@
> > #include "sysemu/kvm.h"
> > #include "qemu/bitops.h"
> > #include "qemu/error-report.h"
> > +#include "qemu/lockable.h"
> > #include "qemu/queue.h"
> > #include "qemu/rcu.h"
> > #include "qemu/rcu_queue.h"
> > @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> > int ret;
> > MsgHandler *mh;
> >
> > - qemu_mutex_lock(&handlers_mutex);
> > + QEMU_LOCK_GUARD(&handlers_mutex);
>
> It does not passes compilation:
> export ARCH=x86_64
> make docker-image-fedora V=1 NETWORK=1
> make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu NETWORK=1
This is a problem with the macros themselves, not with this patch.
This is fixed (concat '##' problem and warnings) in
[PATCH v5 0/2] Replaced locks with lock guard macros
and the patch should be based on it.
Best regards, Julia Suvorova.
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] lockable: Replace locks with lock guard macros
2020-04-19 2:46 ` Julia Suvorova
@ 2020-04-19 6:34 ` Marcel Apfelbaum
0 siblings, 0 replies; 9+ messages in thread
From: Marcel Apfelbaum @ 2020-04-19 6:34 UTC (permalink / raw)
To: Julia Suvorova
Cc: Stefan Hajnoczi, Simran Singhal, Yuval Shaia, QEMU Developers,
rkagan
Hi Julia,
On 4/19/20 5:46 AM, Julia Suvorova wrote:
> On Sat, Apr 18, 2020 at 2:03 PM Marcel Apfelbaum
> <marcel.apfelbaum@gmail.com> wrote:
>> Hi Simran,
>>
>> On 4/2/20 9:50 AM, Simran Singhal wrote:
>>> Replace manual lock()/unlock() calls with lock guard macros
>>> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>>>
>>> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
>>> ---
>>> Changes in v2:
>>> -Drop changes in file hw/rdma/rdma_utils.c
>>>
>>> hw/hyperv/hyperv.c | 15 ++++++-------
>>> hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
>>> hw/rdma/rdma_rm.c | 3 +--
>>> 3 files changed, 33 insertions(+), 35 deletions(-)
>>>
>>> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
>>> index 8ca3706f5b..4ddafe1de1 100644
>>> --- a/hw/hyperv/hyperv.c
>>> +++ b/hw/hyperv/hyperv.c
>>> @@ -15,6 +15,7 @@
>>> #include "sysemu/kvm.h"
>>> #include "qemu/bitops.h"
>>> #include "qemu/error-report.h"
>>> +#include "qemu/lockable.h"
>>> #include "qemu/queue.h"
>>> #include "qemu/rcu.h"
>>> #include "qemu/rcu_queue.h"
>>> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>>> int ret;
>>> MsgHandler *mh;
>>>
>>> - qemu_mutex_lock(&handlers_mutex);
>>> + QEMU_LOCK_GUARD(&handlers_mutex);
>> It does not passes compilation:
>> export ARCH=x86_64
>> make docker-image-fedora V=1 NETWORK=1
>> make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu NETWORK=1
> This is a problem with the macros themselves, not with this patch.
> This is fixed (concat '##' problem and warnings) in
> [PATCH v5 0/2] Replaced locks with lock guard macros
> and the patch should be based on it.
Thanks for the pointer.
I'll wait for the fix to be merged then.
Thanks,
Marcel
>
> Best regards, Julia Suvorova.
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] lockable: Replace locks with lock guard macros
2020-04-02 6:50 [PATCH v2] lockable: Replace locks with lock guard macros Simran Singhal
` (3 preceding siblings ...)
2020-04-18 12:03 ` Marcel Apfelbaum
@ 2020-04-20 9:13 ` Philippe Mathieu-Daudé
2020-04-24 10:51 ` Stefan Hajnoczi
5 siblings, 0 replies; 9+ messages in thread
From: Philippe Mathieu-Daudé @ 2020-04-20 9:13 UTC (permalink / raw)
To: Simran Singhal, qemu-devel; +Cc: Stefan Hajnoczi, Julia Suvorova, Yuval Shaia
On 4/2/20 8:50 AM, Simran Singhal wrote:
> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---
You can give a hint to reviewers by adding:
This patch is based on:
"Replace locks with lock guard macros"
https://www.mail-archive.com/qemu-devel@nongnu.org/msg694412.html
Similar hint to build bots:
Based-on: <20200404042108.389635-1-dnbrdsky@gmail.com>
You should Cc Paolo Bonzini on your v3, since he'll likely queue the
base series.
Patch looks good otherwise.
> Changes in v2:
> -Drop changes in file hw/rdma/rdma_utils.c
>
> hw/hyperv/hyperv.c | 15 ++++++-------
> hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
> hw/rdma/rdma_rm.c | 3 +--
> 3 files changed, 33 insertions(+), 35 deletions(-)
>
> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> index 8ca3706f5b..4ddafe1de1 100644
> --- a/hw/hyperv/hyperv.c
> +++ b/hw/hyperv/hyperv.c
> @@ -15,6 +15,7 @@
> #include "sysemu/kvm.h"
> #include "qemu/bitops.h"
> #include "qemu/error-report.h"
> +#include "qemu/lockable.h"
> #include "qemu/queue.h"
> #include "qemu/rcu.h"
> #include "qemu/rcu_queue.h"
> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> int ret;
> MsgHandler *mh;
>
> - qemu_mutex_lock(&handlers_mutex);
> + QEMU_LOCK_GUARD(&handlers_mutex);
> QLIST_FOREACH(mh, &msg_handlers, link) {
> if (mh->conn_id == conn_id) {
> if (handler) {
> @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> g_free_rcu(mh, rcu);
> ret = 0;
> }
> - goto unlock;
> + return ret;
> }
> }
>
> @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> } else {
> ret = -ENOENT;
> }
> -unlock:
> - qemu_mutex_unlock(&handlers_mutex);
> +
> return ret;
> }
>
> @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> int ret;
> EventFlagHandler *handler;
>
> - qemu_mutex_lock(&handlers_mutex);
> + QEMU_LOCK_GUARD(&handlers_mutex);
> QLIST_FOREACH(handler, &event_flag_handlers, link) {
> if (handler->conn_id == conn_id) {
> if (notifier) {
> @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> g_free_rcu(handler, rcu);
> ret = 0;
> }
> - goto unlock;
> + return ret;
> }
> }
>
> @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
> } else {
> ret = -ENOENT;
> }
> -unlock:
> - qemu_mutex_unlock(&handlers_mutex);
> +
> return ret;
> }
>
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index 3dd39fe1a7..db7e5c8be5 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
> struct ibv_wc wc[2];
> RdmaProtectedGSList *cqe_ctx_list;
>
> - qemu_mutex_lock(&rdma_dev_res->lock);
> - do {
> - ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
> + WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
> + do {
> + ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
>
> - trace_rdma_poll_cq(ne, ibcq);
> + trace_rdma_poll_cq(ne, ibcq);
>
> - for (i = 0; i < ne; i++) {
> - bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> - if (unlikely(!bctx)) {
> - rdma_error_report("No matching ctx for req %"PRId64,
> - wc[i].wr_id);
> - continue;
> - }
> + for (i = 0; i < ne; i++) {
> + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> + if (unlikely(!bctx)) {
> + rdma_error_report("No matching ctx for req %"PRId64,
> + wc[i].wr_id);
> + continue;
> + }
>
> - comp_handler(bctx->up_ctx, &wc[i]);
> + comp_handler(bctx->up_ctx, &wc[i]);
>
> - if (bctx->backend_qp) {
> - cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> - } else {
> - cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> - }
> + if (bctx->backend_qp) {
> + cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> + } else {
> + cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> + }
>
> - rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> - rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> - g_free(bctx);
> - }
> - total_ne += ne;
> - } while (ne > 0);
> - atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> - qemu_mutex_unlock(&rdma_dev_res->lock);
> + rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> + g_free(bctx);
> + }
> + total_ne += ne;
> + } while (ne > 0);
> + atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> + }
>
> if (ne < 0) {
> rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
> diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
> index 7e9ea283c9..60957f88db 100644
> --- a/hw/rdma/rdma_rm.c
> +++ b/hw/rdma/rdma_rm.c
> @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
> {
> trace_rdma_res_tbl_dealloc(tbl->name, handle);
>
> - qemu_mutex_lock(&tbl->lock);
> + QEMU_LOCK_GUARD(&tbl->lock);
>
> if (handle < tbl->tbl_sz) {
> clear_bit(handle, tbl->bitmap);
> tbl->used--;
> }
>
> - qemu_mutex_unlock(&tbl->lock);
> }
>
> int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] lockable: Replace locks with lock guard macros
2020-04-02 6:50 [PATCH v2] lockable: Replace locks with lock guard macros Simran Singhal
` (4 preceding siblings ...)
2020-04-20 9:13 ` Philippe Mathieu-Daudé
@ 2020-04-24 10:51 ` Stefan Hajnoczi
5 siblings, 0 replies; 9+ messages in thread
From: Stefan Hajnoczi @ 2020-04-24 10:51 UTC (permalink / raw)
To: Simran Singhal; +Cc: Julia Suvorova, qemu-devel, Yuval Shaia
[-- Attachment #1: Type: text/plain, Size: 635 bytes --]
On Thu, Apr 02, 2020 at 12:20:35PM +0530, Simran Singhal wrote:
> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---
> Changes in v2:
> -Drop changes in file hw/rdma/rdma_utils.c
>
> hw/hyperv/hyperv.c | 15 ++++++-------
> hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
> hw/rdma/rdma_rm.c | 3 +--
> 3 files changed, 33 insertions(+), 35 deletions(-)
Thanks, applied to my block-next tree:
https://github.com/stefanha/qemu/commits/block-next
Stefan
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2020-04-24 10:52 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-04-02 6:50 [PATCH v2] lockable: Replace locks with lock guard macros Simran Singhal
2020-04-02 7:46 ` no-reply
2020-04-13 10:00 ` Yuval Shaia
2020-04-14 8:11 ` Marcel Apfelbaum
2020-04-18 12:03 ` Marcel Apfelbaum
2020-04-19 2:46 ` Julia Suvorova
2020-04-19 6:34 ` Marcel Apfelbaum
2020-04-20 9:13 ` Philippe Mathieu-Daudé
2020-04-24 10:51 ` Stefan Hajnoczi
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).