From: Yuval Shaia <yuval.shaia@oracle.com>
To: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
Cc: dgilbert@redhat.com, armbru@redhat.com, qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH 02/10] hw/rdma: Introduce locked qlist
Date: Thu, 7 Feb 2019 12:28:57 +0200 [thread overview]
Message-ID: <20190207102856.GA2945@lap1> (raw)
In-Reply-To: <c8791020-7fa0-e452-16f1-974ad021dd1d@gmail.com>
On Thu, Feb 07, 2019 at 11:05:23AM +0200, Marcel Apfelbaum wrote:
> Hi Yuval,
>
> On 1/31/19 3:08 PM, Yuval Shaia wrote:
> > To make code more readable move handling of locked list to a generic
> > functions.
> >
> > Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com>
> > ---
> > hw/rdma/rdma_backend.c | 20 +++++--------------
> > hw/rdma/rdma_backend_defs.h | 8 ++------
> > hw/rdma/rdma_utils.c | 39 +++++++++++++++++++++++++++++++++++++
> > hw/rdma/rdma_utils.h | 9 +++++++++
> > 4 files changed, 55 insertions(+), 21 deletions(-)
> >
> > diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> > index 5f60856d19..2f6372f8f0 100644
> > --- a/hw/rdma/rdma_backend.c
> > +++ b/hw/rdma/rdma_backend.c
> > @@ -527,9 +527,7 @@ static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev,
> > bctx->up_ctx = ctx;
> > bctx->sge = *sge;
> > - qemu_mutex_lock(&backend_dev->recv_mads_list.lock);
> > - qlist_append_int(backend_dev->recv_mads_list.list, bctx_id);
> > - qemu_mutex_unlock(&backend_dev->recv_mads_list.lock);
> > + rdma_locked_list_append_int64(&backend_dev->recv_mads_list, bctx_id);
> > return 0;
> > }
> > @@ -913,23 +911,19 @@ static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid,
> > static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
> > RdmaCmMuxMsg *msg)
> > {
> > - QObject *o_ctx_id;
> > unsigned long cqe_ctx_id;
> > BackendCtx *bctx;
> > char *mad;
> > trace_mad_message("recv", msg->umad.mad, msg->umad_len);
> > - qemu_mutex_lock(&backend_dev->recv_mads_list.lock);
> > - o_ctx_id = qlist_pop(backend_dev->recv_mads_list.list);
> > - qemu_mutex_unlock(&backend_dev->recv_mads_list.lock);
> > - if (!o_ctx_id) {
> > + cqe_ctx_id = rdma_locked_list_pop_int64(&backend_dev->recv_mads_list);
> > + if (cqe_ctx_id == -ENOENT) {
> > rdma_warn_report("No more free MADs buffers, waiting for a while");
> > sleep(THR_POLL_TO);
> > return;
> > }
> > - cqe_ctx_id = qnum_get_uint(qobject_to(QNum, o_ctx_id));
> > bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
> > if (unlikely(!bctx)) {
> > rdma_error_report("No matching ctx for req %ld", cqe_ctx_id);
> > @@ -994,8 +988,7 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be)
> > return -EIO;
> > }
> > - qemu_mutex_init(&backend_dev->recv_mads_list.lock);
> > - backend_dev->recv_mads_list.list = qlist_new();
> > + rdma_locked_list_init(&backend_dev->recv_mads_list);
> > enable_rdmacm_mux_async(backend_dev);
> > @@ -1010,10 +1003,7 @@ static void mad_fini(RdmaBackendDev *backend_dev)
> > {
> > disable_rdmacm_mux_async(backend_dev);
> > qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be);
> > - if (backend_dev->recv_mads_list.list) {
> > - qlist_destroy_obj(QOBJECT(backend_dev->recv_mads_list.list));
> > - qemu_mutex_destroy(&backend_dev->recv_mads_list.lock);
> > - }
> > + rdma_locked_list_destroy(&backend_dev->recv_mads_list);
> > }
> > int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,
> > diff --git a/hw/rdma/rdma_backend_defs.h b/hw/rdma/rdma_backend_defs.h
> > index 15ae8b970e..bec0457f25 100644
> > --- a/hw/rdma/rdma_backend_defs.h
> > +++ b/hw/rdma/rdma_backend_defs.h
> > @@ -20,6 +20,7 @@
> > #include "chardev/char-fe.h"
> > #include <infiniband/verbs.h>
> > #include "contrib/rdmacm-mux/rdmacm-mux.h"
> > +#include "rdma_utils.h"
> > typedef struct RdmaDeviceResources RdmaDeviceResources;
> > @@ -30,11 +31,6 @@ typedef struct RdmaBackendThread {
> > bool is_running; /* Set by the thread to report its status */
> > } RdmaBackendThread;
> > -typedef struct RecvMadList {
> > - QemuMutex lock;
> > - QList *list;
> > -} RecvMadList;
> > -
> > typedef struct RdmaCmMux {
> > CharBackend *chr_be;
> > int can_receive;
> > @@ -48,7 +44,7 @@ typedef struct RdmaBackendDev {
> > struct ibv_context *context;
> > struct ibv_comp_channel *channel;
> > uint8_t port_num;
> > - RecvMadList recv_mads_list;
> > + LockedList recv_mads_list;
> > RdmaCmMux rdmacm_mux;
> > } RdmaBackendDev;
> > diff --git a/hw/rdma/rdma_utils.c b/hw/rdma/rdma_utils.c
> > index f1c980c6be..a2a4ea2a15 100644
> > --- a/hw/rdma/rdma_utils.c
> > +++ b/hw/rdma/rdma_utils.c
> > @@ -14,6 +14,8 @@
> > */
> > #include "qemu/osdep.h"
> > +#include "qapi/qmp/qlist.h"
> > +#include "qapi/qmp/qnum.h"
> > #include "trace.h"
> > #include "rdma_utils.h"
> > @@ -55,3 +57,40 @@ void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len)
> > pci_dma_unmap(dev, buffer, len, DMA_DIRECTION_TO_DEVICE, 0);
> > }
> > }
> > +
> > +void rdma_locked_list_init(LockedList *list)
> > +{
> > + qemu_mutex_init(&list->lock);
> > + list->list = qlist_new();
> > +}
> > +
> > +void rdma_locked_list_destroy(LockedList *list)
> > +{
> > + if (list->list) {
> > + qlist_destroy_obj(QOBJECT(list->list));
> > + qemu_mutex_destroy(&list->lock);
> > + list->list = NULL;
> > + }
> > +}
> > +
> > +void rdma_locked_list_append_int64(LockedList *list, int64_t value)
> > +{
> > + qemu_mutex_lock(&list->lock);
> > + qlist_append_int(list->list, value);
> > + qemu_mutex_unlock(&list->lock);
> > +}
> > +
> > +int64_t rdma_locked_list_pop_int64(LockedList *list)
> > +{
> > + QObject *obj;
> > +
> > + qemu_mutex_lock(&list->lock);
> > + obj = qlist_pop(list->list);
> > + qemu_mutex_unlock(&list->lock);
> > +
> > + if (!obj) {
> > + return -ENOENT;
> > + }
> > +
> > + return qnum_get_uint(qobject_to(QNum, obj));
> > +}
> > diff --git a/hw/rdma/rdma_utils.h b/hw/rdma/rdma_utils.h
> > index acd148837f..4ba9956f81 100644
> > --- a/hw/rdma/rdma_utils.h
> > +++ b/hw/rdma/rdma_utils.h
> > @@ -29,8 +29,17 @@
> > #define rdma_info_report(fmt, ...) \
> > info_report("%s: " fmt, "rdma", ## __VA_ARGS__)
> > +typedef struct LockedList {
>
> The naming is a little off, maybe SynchronizedList ?
I believe it will break amending the other patches in this patchset so just
in case this makes a huge different i will consider to change.
>
> The more pressing issue, it doesn't seem this code is related to RDMA.
> Does anybody think we should move this code to more appropriate place?
Well, i gathered several "general" utilities in rdma_utils.h. In case they
are needed in some other places i will be glad to relocate them.
>
> Thanks,
> Marcel
>
> > + QemuMutex lock;
> > + QList *list;
> > +} LockedList;
>
>
>
>
> > +
> > void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen);
> > void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len);
> > +void rdma_locked_list_init(LockedList *list);
> > +void rdma_locked_list_destroy(LockedList *list);
> > +void rdma_locked_list_append_int64(LockedList *list, int64_t value);
> > +int64_t rdma_locked_list_pop_int64(LockedList *list);
> > static inline void addrconf_addr_eui48(uint8_t *eui, const char *addr)
> > {
>
>
next prev parent reply other threads:[~2019-02-07 10:29 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-01-31 13:08 [Qemu-devel] [PATCH 00/10] Misc fixes to pvrdma device Yuval Shaia
2019-01-31 13:08 ` [Qemu-devel] [PATCH 01/10] hw/rdma: Switch to generic error reporting way Yuval Shaia
2019-02-01 12:36 ` Dr. David Alan Gilbert
2019-02-03 7:32 ` Yuval Shaia
2019-01-31 13:08 ` [Qemu-devel] [PATCH 02/10] hw/rdma: Introduce locked qlist Yuval Shaia
2019-02-07 9:05 ` Marcel Apfelbaum
2019-02-07 10:28 ` Yuval Shaia [this message]
2019-01-31 13:08 ` [Qemu-devel] [PATCH 03/10] hw/rdma: Warn when too many consecutive poll CQ triggered on an empty CQ Yuval Shaia
2019-02-06 10:14 ` Marcel Apfelbaum
2019-02-06 14:59 ` Yuval Shaia
2019-02-06 15:02 ` Yuval Shaia
2019-01-31 13:08 ` [Qemu-devel] [PATCH 04/10] hw/rdma: Protect against concurrent execution of poll_cq Yuval Shaia
2019-02-05 20:14 ` Marcel Apfelbaum
2019-01-31 13:08 ` [Qemu-devel] [PATCH 05/10] hw/pvrdma: Add device statistics counters Yuval Shaia
2019-02-06 10:17 ` Marcel Apfelbaum
2019-02-06 14:44 ` Yuval Shaia
2019-01-31 13:08 ` [Qemu-devel] [PATCH 06/10] hw/pvrdma: Dump device statistics counters to file Yuval Shaia
2019-02-04 13:03 ` Markus Armbruster
2019-02-04 16:14 ` Yuval Shaia
2019-02-04 18:21 ` Markus Armbruster
2019-01-31 13:08 ` [Qemu-devel] [PATCH 07/10] monitor: Expose pvrdma device statistics counters Yuval Shaia
2019-01-31 13:17 ` Eric Blake
2019-01-31 20:08 ` Yuval Shaia
2019-01-31 20:52 ` Eric Blake
2019-02-01 7:33 ` Markus Armbruster
2019-02-01 11:42 ` Dr. David Alan Gilbert
2019-02-03 7:12 ` Yuval Shaia
2019-02-03 7:06 ` Yuval Shaia
2019-02-04 8:23 ` Markus Armbruster
2019-02-04 16:07 ` Yuval Shaia
2019-02-05 7:21 ` Markus Armbruster
2019-02-04 8:00 ` Markus Armbruster
2019-01-31 13:08 ` [Qemu-devel] [PATCH 08/10] hw/rdma: Free all MAD receive buffers when device is closed Yuval Shaia
2019-02-06 10:19 ` Marcel Apfelbaum
2019-01-31 13:08 ` [Qemu-devel] [PATCH 09/10] hw/rdma: Free all receive buffers when QP is destroyed Yuval Shaia
2019-02-06 10:23 ` Marcel Apfelbaum
2019-02-06 15:55 ` Yuval Shaia
2019-01-31 13:08 ` [Qemu-devel] [PATCH 10/10] hw/pvrdma: Delete unneeded function argument Yuval Shaia
2019-02-05 20:16 ` Marcel Apfelbaum
2019-02-02 13:50 ` [Qemu-devel] [PATCH 00/10] Misc fixes to pvrdma device no-reply
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190207102856.GA2945@lap1 \
--to=yuval.shaia@oracle.com \
--cc=armbru@redhat.com \
--cc=dgilbert@redhat.com \
--cc=marcel.apfelbaum@gmail.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).