From: Yuval Shaia <yuval.shaia@oracle.com>
To: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
Cc: dgilbert@redhat.com, armbru@redhat.com, qemu-devel@nongnu.org,
eblake@redhat.com
Subject: Re: [Qemu-devel] [PATCH v1 2/9] hw/rdma: Introduce locked qlist
Date: Tue, 12 Feb 2019 09:08:09 +0200 [thread overview]
Message-ID: <20190212070808.GA3103@lap1> (raw)
In-Reply-To: <19605c6a-56c4-6467-23b6-f59d7a79cf98@gmail.com>
On Mon, Feb 11, 2019 at 10:10:56AM +0200, Marcel Apfelbaum wrote:
>
>
> On 2/10/19 12:45 PM, Yuval Shaia wrote:
> > To make code more readable move handling of locked list to a generic
> > functions.
> >
> > Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com>
> > ---
> > hw/rdma/rdma_backend.c | 20 +++++--------------
> > hw/rdma/rdma_backend_defs.h | 8 ++------
> > hw/rdma/rdma_utils.c | 39 +++++++++++++++++++++++++++++++++++++
> > hw/rdma/rdma_utils.h | 9 +++++++++
> > 4 files changed, 55 insertions(+), 21 deletions(-)
> >
> > diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> > index 5f60856d19..2f6372f8f0 100644
> > --- a/hw/rdma/rdma_backend.c
> > +++ b/hw/rdma/rdma_backend.c
> > @@ -527,9 +527,7 @@ static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev,
> > bctx->up_ctx = ctx;
> > bctx->sge = *sge;
> > - qemu_mutex_lock(&backend_dev->recv_mads_list.lock);
> > - qlist_append_int(backend_dev->recv_mads_list.list, bctx_id);
> > - qemu_mutex_unlock(&backend_dev->recv_mads_list.lock);
> > + rdma_locked_list_append_int64(&backend_dev->recv_mads_list, bctx_id);
> > return 0;
> > }
> > @@ -913,23 +911,19 @@ static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid,
> > static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
> > RdmaCmMuxMsg *msg)
> > {
> > - QObject *o_ctx_id;
> > unsigned long cqe_ctx_id;
> > BackendCtx *bctx;
> > char *mad;
> > trace_mad_message("recv", msg->umad.mad, msg->umad_len);
> > - qemu_mutex_lock(&backend_dev->recv_mads_list.lock);
> > - o_ctx_id = qlist_pop(backend_dev->recv_mads_list.list);
> > - qemu_mutex_unlock(&backend_dev->recv_mads_list.lock);
> > - if (!o_ctx_id) {
> > + cqe_ctx_id = rdma_locked_list_pop_int64(&backend_dev->recv_mads_list);
> > + if (cqe_ctx_id == -ENOENT) {
> > rdma_warn_report("No more free MADs buffers, waiting for a while");
> > sleep(THR_POLL_TO);
> > return;
> > }
> > - cqe_ctx_id = qnum_get_uint(qobject_to(QNum, o_ctx_id));
> > bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
> > if (unlikely(!bctx)) {
> > rdma_error_report("No matching ctx for req %ld", cqe_ctx_id);
> > @@ -994,8 +988,7 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be)
> > return -EIO;
> > }
> > - qemu_mutex_init(&backend_dev->recv_mads_list.lock);
> > - backend_dev->recv_mads_list.list = qlist_new();
> > + rdma_locked_list_init(&backend_dev->recv_mads_list);
> > enable_rdmacm_mux_async(backend_dev);
> > @@ -1010,10 +1003,7 @@ static void mad_fini(RdmaBackendDev *backend_dev)
> > {
> > disable_rdmacm_mux_async(backend_dev);
> > qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be);
> > - if (backend_dev->recv_mads_list.list) {
> > - qlist_destroy_obj(QOBJECT(backend_dev->recv_mads_list.list));
> > - qemu_mutex_destroy(&backend_dev->recv_mads_list.lock);
> > - }
> > + rdma_locked_list_destroy(&backend_dev->recv_mads_list);
> > }
> > int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,
> > diff --git a/hw/rdma/rdma_backend_defs.h b/hw/rdma/rdma_backend_defs.h
> > index 15ae8b970e..bec0457f25 100644
> > --- a/hw/rdma/rdma_backend_defs.h
> > +++ b/hw/rdma/rdma_backend_defs.h
> > @@ -20,6 +20,7 @@
> > #include "chardev/char-fe.h"
> > #include <infiniband/verbs.h>
> > #include "contrib/rdmacm-mux/rdmacm-mux.h"
> > +#include "rdma_utils.h"
> > typedef struct RdmaDeviceResources RdmaDeviceResources;
> > @@ -30,11 +31,6 @@ typedef struct RdmaBackendThread {
> > bool is_running; /* Set by the thread to report its status */
> > } RdmaBackendThread;
> > -typedef struct RecvMadList {
> > - QemuMutex lock;
> > - QList *list;
> > -} RecvMadList;
> > -
> > typedef struct RdmaCmMux {
> > CharBackend *chr_be;
> > int can_receive;
> > @@ -48,7 +44,7 @@ typedef struct RdmaBackendDev {
> > struct ibv_context *context;
> > struct ibv_comp_channel *channel;
> > uint8_t port_num;
> > - RecvMadList recv_mads_list;
> > + LockedList recv_mads_list;
> > RdmaCmMux rdmacm_mux;
> > } RdmaBackendDev;
> > diff --git a/hw/rdma/rdma_utils.c b/hw/rdma/rdma_utils.c
> > index f1c980c6be..a2a4ea2a15 100644
> > --- a/hw/rdma/rdma_utils.c
> > +++ b/hw/rdma/rdma_utils.c
> > @@ -14,6 +14,8 @@
> > */
> > #include "qemu/osdep.h"
> > +#include "qapi/qmp/qlist.h"
> > +#include "qapi/qmp/qnum.h"
> > #include "trace.h"
> > #include "rdma_utils.h"
> > @@ -55,3 +57,40 @@ void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len)
> > pci_dma_unmap(dev, buffer, len, DMA_DIRECTION_TO_DEVICE, 0);
> > }
> > }
> > +
> > +void rdma_locked_list_init(LockedList *list)
> > +{
> > + qemu_mutex_init(&list->lock);
> > + list->list = qlist_new();
> > +}
> > +
> > +void rdma_locked_list_destroy(LockedList *list)
> > +{
> > + if (list->list) {
> > + qlist_destroy_obj(QOBJECT(list->list));
> > + qemu_mutex_destroy(&list->lock);
> > + list->list = NULL;
> > + }
> > +}
> > +
> > +void rdma_locked_list_append_int64(LockedList *list, int64_t value)
> > +{
> > + qemu_mutex_lock(&list->lock);
> > + qlist_append_int(list->list, value);
> > + qemu_mutex_unlock(&list->lock);
> > +}
> > +
> > +int64_t rdma_locked_list_pop_int64(LockedList *list)
> > +{
> > + QObject *obj;
> > +
> > + qemu_mutex_lock(&list->lock);
> > + obj = qlist_pop(list->list);
> > + qemu_mutex_unlock(&list->lock);
> > +
> > + if (!obj) {
> > + return -ENOENT;
> > + }
> > +
> > + return qnum_get_uint(qobject_to(QNum, obj));
> > +}
> > diff --git a/hw/rdma/rdma_utils.h b/hw/rdma/rdma_utils.h
> > index acd148837f..4ba9956f81 100644
> > --- a/hw/rdma/rdma_utils.h
> > +++ b/hw/rdma/rdma_utils.h
> > @@ -29,8 +29,17 @@
> > #define rdma_info_report(fmt, ...) \
> > info_report("%s: " fmt, "rdma", ## __VA_ARGS__)
> > +typedef struct LockedList {
> > + QemuMutex lock;
> > + QList *list;
> > +} LockedList;
>
> I am still not sure about the naming, the list is not "locked",
> is "synchronized", and also is not a 'generic' list since is located in the
> rdma code.
>
> Maybe RdmaSyncList?
>
> Thanks,
> Marcel
Sure, will rename it (to RdmaProtectedQList).
Will do the same in patch #6
>
> > +
> > void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen);
> > void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len);
> > +void rdma_locked_list_init(LockedList *list);
> > +void rdma_locked_list_destroy(LockedList *list);
> > +void rdma_locked_list_append_int64(LockedList *list, int64_t value);
> > +int64_t rdma_locked_list_pop_int64(LockedList *list);
> > static inline void addrconf_addr_eui48(uint8_t *eui, const char *addr)
> > {
>
>
next prev parent reply other threads:[~2019-02-12 7:08 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-02-10 10:45 [Qemu-devel] [PATCH v1 0/9] Misc fixes to pvrdma device Yuval Shaia
2019-02-10 10:45 ` [Qemu-devel] [PATCH v1 1/9] hw/rdma: Switch to generic error reporting way Yuval Shaia
2019-02-11 8:08 ` Marcel Apfelbaum
2019-02-10 10:45 ` [Qemu-devel] [PATCH v1 2/9] hw/rdma: Introduce locked qlist Yuval Shaia
2019-02-11 8:10 ` Marcel Apfelbaum
2019-02-12 7:08 ` Yuval Shaia [this message]
2019-02-10 10:45 ` [Qemu-devel] [PATCH v1 3/9] hw/rdma: Protect against concurrent execution of poll_cq Yuval Shaia
2019-02-10 10:45 ` [Qemu-devel] [PATCH v1 4/9] {monitor, hw/pvrdma}: Expose device internals via monitor interface Yuval Shaia
2019-02-12 17:00 ` Yuval Shaia
2019-02-10 10:45 ` [Qemu-devel] [PATCH v1 5/9] hw/rdma: Free all MAD receive buffers when device is closed Yuval Shaia
2019-02-10 10:45 ` [Qemu-devel] [PATCH v1 6/9] hw/rdma: Free all receive buffers when QP is destroyed Yuval Shaia
2019-02-10 10:45 ` [Qemu-devel] [PATCH v1 7/9] hw/pvrdma: Delete unneeded function argument Yuval Shaia
2019-02-10 10:45 ` [Qemu-devel] [PATCH v1 8/9] hw/pvrdma: Delete pvrdma_exit function Yuval Shaia
2019-02-11 8:14 ` Marcel Apfelbaum
2019-02-10 10:45 ` [Qemu-devel] [PATCH v1 9/9] hw/pvrdma: Unregister from shutdown notifier when device goes down Yuval Shaia
2019-02-11 8:14 ` Marcel Apfelbaum
-- strict thread matches above, loose matches on Subject: below --
2019-02-13 6:53 [Qemu-devel] [PATCH v2 0/9] Misc fixes to pvrdma device Yuval Shaia
2019-02-13 6:53 ` [Qemu-devel] [PATCH v1 2/9] hw/rdma: Introduce locked qlist Yuval Shaia
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190212070808.GA3103@lap1 \
--to=yuval.shaia@oracle.com \
--cc=armbru@redhat.com \
--cc=dgilbert@redhat.com \
--cc=eblake@redhat.com \
--cc=marcel.apfelbaum@gmail.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).